Mercurial > hg > CbC > GCC_original
comparison gcc/ggc-page.c @ 16:04ced10e8804
gcc 7
author | kono |
---|---|
date | Fri, 27 Oct 2017 22:46:09 +0900 |
parents | f6334be47118 |
children | 84e7813d76e9 |
comparison
equal
deleted
inserted
replaced
15:561a7518be6b | 16:04ced10e8804 |
---|---|
1 /* "Bag-of-pages" garbage collector for the GNU compiler. | 1 /* "Bag-of-pages" garbage collector for the GNU compiler. |
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, | 2 Copyright (C) 1999-2017 Free Software Foundation, Inc. |
3 2010 Free Software Foundation, Inc. | |
4 | 3 |
5 This file is part of GCC. | 4 This file is part of GCC. |
6 | 5 |
7 GCC is free software; you can redistribute it and/or modify it under | 6 GCC is free software; you can redistribute it and/or modify it under |
8 the terms of the GNU General Public License as published by the Free | 7 the terms of the GNU General Public License as published by the Free |
19 <http://www.gnu.org/licenses/>. */ | 18 <http://www.gnu.org/licenses/>. */ |
20 | 19 |
21 #include "config.h" | 20 #include "config.h" |
22 #include "system.h" | 21 #include "system.h" |
23 #include "coretypes.h" | 22 #include "coretypes.h" |
24 #include "tm.h" | 23 #include "backend.h" |
24 #include "alias.h" | |
25 #include "tree.h" | 25 #include "tree.h" |
26 #include "rtl.h" | 26 #include "rtl.h" |
27 #include "memmodel.h" | |
27 #include "tm_p.h" | 28 #include "tm_p.h" |
28 #include "diagnostic-core.h" | 29 #include "diagnostic-core.h" |
29 #include "flags.h" | 30 #include "flags.h" |
30 #include "ggc.h" | |
31 #include "ggc-internal.h" | 31 #include "ggc-internal.h" |
32 #include "timevar.h" | 32 #include "timevar.h" |
33 #include "params.h" | 33 #include "params.h" |
34 #include "tree-flow.h" | 34 #include "cgraph.h" |
35 #include "cfgloop.h" | 35 #include "cfgloop.h" |
36 #include "plugin.h" | 36 #include "plugin.h" |
37 | 37 |
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a | 38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a |
39 file open. Prefer either to valloc. */ | 39 file open. Prefer either to valloc. */ |
46 # define USING_MMAP | 46 # define USING_MMAP |
47 #endif | 47 #endif |
48 | 48 |
49 #ifndef USING_MMAP | 49 #ifndef USING_MMAP |
50 #define USING_MALLOC_PAGE_GROUPS | 50 #define USING_MALLOC_PAGE_GROUPS |
51 #endif | |
52 | |
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \ | |
54 && defined(USING_MMAP) | |
55 # define USING_MADVISE | |
51 #endif | 56 #endif |
52 | 57 |
53 /* Strategy: | 58 /* Strategy: |
54 | 59 |
55 This garbage-collecting allocator allocates objects on one of a set | 60 This garbage-collecting allocator allocates objects on one of a set |
114 tree points to a list of pages, which must be scanned to find the | 119 tree points to a list of pages, which must be scanned to find the |
115 correct one. */ | 120 correct one. */ |
116 | 121 |
117 #define PAGE_L1_BITS (8) | 122 #define PAGE_L1_BITS (8) |
118 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) | 123 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) |
119 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) | 124 #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS) |
120 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) | 125 #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS) |
121 | 126 |
122 #define LOOKUP_L1(p) \ | 127 #define LOOKUP_L1(p) \ |
123 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) | 128 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) |
124 | 129 |
125 #define LOOKUP_L2(p) \ | 130 #define LOOKUP_L2(p) \ |
126 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) | 131 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) |
127 | 132 |
128 /* The number of objects per allocation page, for objects on a page of | 133 /* The number of objects per allocation page, for objects on a page of |
129 the indicated ORDER. */ | 134 the indicated ORDER. */ |
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] | 135 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] |
131 | 136 |
150 We do not care about alignment for floating-point types. */ | 155 We do not care about alignment for floating-point types. */ |
151 | 156 |
152 struct max_alignment { | 157 struct max_alignment { |
153 char c; | 158 char c; |
154 union { | 159 union { |
155 HOST_WIDEST_INT i; | 160 int64_t i; |
156 void *p; | 161 void *p; |
157 } u; | 162 } u; |
158 }; | 163 }; |
159 | 164 |
160 /* The biggest alignment required. */ | 165 /* The biggest alignment required. */ |
194 MAX_ALIGNMENT * 15, | 199 MAX_ALIGNMENT * 15, |
195 sizeof (struct tree_decl_non_common), | 200 sizeof (struct tree_decl_non_common), |
196 sizeof (struct tree_field_decl), | 201 sizeof (struct tree_field_decl), |
197 sizeof (struct tree_parm_decl), | 202 sizeof (struct tree_parm_decl), |
198 sizeof (struct tree_var_decl), | 203 sizeof (struct tree_var_decl), |
199 sizeof (struct tree_type), | 204 sizeof (struct tree_type_non_common), |
200 sizeof (struct function), | 205 sizeof (struct function), |
201 sizeof (struct basic_block_def), | 206 sizeof (struct basic_block_def), |
202 sizeof (struct cgraph_node), | 207 sizeof (struct cgraph_node), |
203 sizeof (struct loop), | 208 sizeof (struct loop), |
204 }; | 209 }; |
210 /* Compute the smallest nonnegative number which when added to X gives | 215 /* Compute the smallest nonnegative number which when added to X gives |
211 a multiple of F. */ | 216 a multiple of F. */ |
212 | 217 |
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) | 218 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) |
214 | 219 |
215 /* Compute the smallest multiple of F that is >= X. */ | 220 /* Round X to next multiple of the page size */ |
216 | 221 |
217 #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | 222 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize) |
218 | 223 |
219 /* The Ith entry is the number of objects on a page or order I. */ | 224 /* The Ith entry is the number of objects on a page or order I. */ |
220 | 225 |
221 static unsigned objects_per_page_table[NUM_ORDERS]; | 226 static unsigned objects_per_page_table[NUM_ORDERS]; |
222 | 227 |
235 } | 240 } |
236 inverse_table[NUM_ORDERS]; | 241 inverse_table[NUM_ORDERS]; |
237 | 242 |
238 /* A page_entry records the status of an allocation page. This | 243 /* A page_entry records the status of an allocation page. This |
239 structure is dynamically sized to fit the bitmap in_use_p. */ | 244 structure is dynamically sized to fit the bitmap in_use_p. */ |
240 typedef struct page_entry | 245 struct page_entry |
241 { | 246 { |
242 /* The next page-entry with objects of the same size, or NULL if | 247 /* The next page-entry with objects of the same size, or NULL if |
243 this is the last page-entry. */ | 248 this is the last page-entry. */ |
244 struct page_entry *next; | 249 struct page_entry *next; |
245 | 250 |
275 unsigned short next_bit_hint; | 280 unsigned short next_bit_hint; |
276 | 281 |
277 /* The lg of size of objects allocated from this page. */ | 282 /* The lg of size of objects allocated from this page. */ |
278 unsigned char order; | 283 unsigned char order; |
279 | 284 |
285 /* Discarded page? */ | |
286 bool discarded; | |
287 | |
280 /* A bit vector indicating whether or not objects are in use. The | 288 /* A bit vector indicating whether or not objects are in use. The |
281 Nth bit is one if the Nth object on this page is allocated. This | 289 Nth bit is one if the Nth object on this page is allocated. This |
282 array is dynamically sized. */ | 290 array is dynamically sized. */ |
283 unsigned long in_use_p[1]; | 291 unsigned long in_use_p[1]; |
284 } page_entry; | 292 }; |
285 | 293 |
286 #ifdef USING_MALLOC_PAGE_GROUPS | 294 #ifdef USING_MALLOC_PAGE_GROUPS |
287 /* A page_group describes a large allocation from malloc, from which | 295 /* A page_group describes a large allocation from malloc, from which |
288 we parcel out aligned pages. */ | 296 we parcel out aligned pages. */ |
289 typedef struct page_group | 297 struct page_group |
290 { | 298 { |
291 /* A linked list of all extant page groups. */ | 299 /* A linked list of all extant page groups. */ |
292 struct page_group *next; | 300 struct page_group *next; |
293 | 301 |
294 /* The address we received from malloc. */ | 302 /* The address we received from malloc. */ |
297 /* The size of the block. */ | 305 /* The size of the block. */ |
298 size_t alloc_size; | 306 size_t alloc_size; |
299 | 307 |
300 /* A bitmask of pages in use. */ | 308 /* A bitmask of pages in use. */ |
301 unsigned int in_use; | 309 unsigned int in_use; |
302 } page_group; | 310 }; |
303 #endif | 311 #endif |
304 | 312 |
305 #if HOST_BITS_PER_PTR <= 32 | 313 #if HOST_BITS_PER_PTR <= 32 |
306 | 314 |
307 /* On 32-bit hosts, we use a two level page table, as pictured above. */ | 315 /* On 32-bit hosts, we use a two level page table, as pictured above. */ |
319 page_entry **table[PAGE_L1_SIZE]; | 327 page_entry **table[PAGE_L1_SIZE]; |
320 } *page_table; | 328 } *page_table; |
321 | 329 |
322 #endif | 330 #endif |
323 | 331 |
332 class finalizer | |
333 { | |
334 public: | |
335 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {} | |
336 | |
337 void *addr () const { return m_addr; } | |
338 | |
339 void call () const { m_function (m_addr); } | |
340 | |
341 private: | |
342 void *m_addr; | |
343 void (*m_function)(void *); | |
344 }; | |
345 | |
346 class vec_finalizer | |
347 { | |
348 public: | |
349 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) : | |
350 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {} | |
351 | |
352 void call () const | |
353 { | |
354 for (size_t i = 0; i < m_n_objects; i++) | |
355 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size))); | |
356 } | |
357 | |
358 void *addr () const { return reinterpret_cast<void *> (m_addr); } | |
359 | |
360 private: | |
361 uintptr_t m_addr; | |
362 void (*m_function)(void *); | |
363 size_t m_object_size; | |
364 size_t m_n_objects; | |
365 }; | |
366 | |
324 #ifdef ENABLE_GC_ALWAYS_COLLECT | 367 #ifdef ENABLE_GC_ALWAYS_COLLECT |
325 /* List of free objects to be verified as actually free on the | 368 /* List of free objects to be verified as actually free on the |
326 next collection. */ | 369 next collection. */ |
327 struct free_object | 370 struct free_object |
328 { | 371 { |
330 struct free_object *next; | 373 struct free_object *next; |
331 }; | 374 }; |
332 #endif | 375 #endif |
333 | 376 |
334 /* The rest of the global variables. */ | 377 /* The rest of the global variables. */ |
335 static struct globals | 378 static struct ggc_globals |
336 { | 379 { |
337 /* The Nth element in this array is a page with objects of size 2^N. | 380 /* The Nth element in this array is a page with objects of size 2^N. |
338 If there are any pages with free objects, they will be at the | 381 If there are any pages with free objects, they will be at the |
339 head of the list. NULL if there are no page-entries for this | 382 head of the list. NULL if there are no page-entries for this |
340 object size. */ | 383 object size. */ |
412 /* Each element is a pointer to the saved in_use_p bits, if any, | 455 /* Each element is a pointer to the saved in_use_p bits, if any, |
413 zero otherwise. We allocate them all together, to enable a | 456 zero otherwise. We allocate them all together, to enable a |
414 better runtime data access pattern. */ | 457 better runtime data access pattern. */ |
415 unsigned long **save_in_use; | 458 unsigned long **save_in_use; |
416 | 459 |
460 /* Finalizers for single objects. The first index is collection_depth. */ | |
461 vec<vec<finalizer> > finalizers; | |
462 | |
463 /* Finalizers for vectors of objects. */ | |
464 vec<vec<vec_finalizer> > vec_finalizers; | |
465 | |
417 #ifdef ENABLE_GC_ALWAYS_COLLECT | 466 #ifdef ENABLE_GC_ALWAYS_COLLECT |
418 /* List of free objects to be verified as actually free on the | 467 /* List of free objects to be verified as actually free on the |
419 next collection. */ | 468 next collection. */ |
420 struct free_object *free_object_list; | 469 struct free_object *free_object_list; |
421 #endif | 470 #endif |
422 | 471 |
423 #ifdef GATHER_STATISTICS | |
424 struct | 472 struct |
425 { | 473 { |
426 /* Total GC-allocated memory. */ | 474 /* Total GC-allocated memory. */ |
427 unsigned long long total_allocated; | 475 unsigned long long total_allocated; |
428 /* Total overhead for GC-allocated memory. */ | 476 /* Total overhead for GC-allocated memory. */ |
445 unsigned long long total_allocated_per_order[NUM_ORDERS]; | 493 unsigned long long total_allocated_per_order[NUM_ORDERS]; |
446 | 494 |
447 /* The overhead for each of the allocation orders. */ | 495 /* The overhead for each of the allocation orders. */ |
448 unsigned long long total_overhead_per_order[NUM_ORDERS]; | 496 unsigned long long total_overhead_per_order[NUM_ORDERS]; |
449 } stats; | 497 } stats; |
450 #endif | |
451 } G; | 498 } G; |
499 | |
500 /* True if a gc is currently taking place. */ | |
501 | |
502 static bool in_gc = false; | |
452 | 503 |
453 /* The size in bytes required to maintain a bitmap for the objects | 504 /* The size in bytes required to maintain a bitmap for the objects |
454 on a page-entry. */ | 505 on a page-entry. */ |
455 #define BITMAP_SIZE(Num_objects) \ | 506 #define BITMAP_SIZE(Num_objects) \ |
456 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long)) | 507 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long)) |
457 | 508 |
458 /* Allocate pages in chunks of this size, to throttle calls to memory | 509 /* Allocate pages in chunks of this size, to throttle calls to memory |
459 allocation routines. The first page is used, the rest go onto the | 510 allocation routines. The first page is used, the rest go onto the |
460 free list. This cannot be larger than HOST_BITS_PER_INT for the | 511 free list. This cannot be larger than HOST_BITS_PER_INT for the |
461 in_use bitmask for page_group. Hosts that need a different value | 512 in_use bitmask for page_group. Hosts that need a different value |
462 can override this by defining GGC_QUIRE_SIZE explicitly. */ | 513 can override this by defining GGC_QUIRE_SIZE explicitly. */ |
463 #ifndef GGC_QUIRE_SIZE | 514 #ifndef GGC_QUIRE_SIZE |
464 # ifdef USING_MMAP | 515 # ifdef USING_MMAP |
465 # define GGC_QUIRE_SIZE 256 | 516 # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */ |
466 # else | 517 # else |
467 # define GGC_QUIRE_SIZE 16 | 518 # define GGC_QUIRE_SIZE 16 |
468 # endif | 519 # endif |
469 #endif | 520 #endif |
470 | 521 |
471 /* Initial guess as to how many page table entries we might need. */ | 522 /* Initial guess as to how many page table entries we might need. */ |
472 #define INITIAL_PTE_COUNT 128 | 523 #define INITIAL_PTE_COUNT 128 |
473 | 524 |
474 static int ggc_allocated_p (const void *); | |
475 static page_entry *lookup_page_table_entry (const void *); | 525 static page_entry *lookup_page_table_entry (const void *); |
476 static void set_page_table_entry (void *, page_entry *); | 526 static void set_page_table_entry (void *, page_entry *); |
477 #ifdef USING_MMAP | 527 #ifdef USING_MMAP |
478 static char *alloc_anon (char *, size_t); | 528 static char *alloc_anon (char *, size_t, bool check); |
479 #endif | 529 #endif |
480 #ifdef USING_MALLOC_PAGE_GROUPS | 530 #ifdef USING_MALLOC_PAGE_GROUPS |
481 static size_t page_group_index (char *, char *); | 531 static size_t page_group_index (char *, char *); |
482 static void set_page_group_in_use (page_group *, char *); | 532 static void set_page_group_in_use (page_group *, char *); |
483 static void clear_page_group_in_use (page_group *, char *); | 533 static void clear_page_group_in_use (page_group *, char *); |
534 #define save_in_use_p_i(__i) \ | 584 #define save_in_use_p_i(__i) \ |
535 (G.save_in_use[__i]) | 585 (G.save_in_use[__i]) |
536 #define save_in_use_p(__p) \ | 586 #define save_in_use_p(__p) \ |
537 (save_in_use_p_i (__p->index_by_depth)) | 587 (save_in_use_p_i (__p->index_by_depth)) |
538 | 588 |
539 /* Returns nonzero if P was allocated in GC'able memory. */ | 589 /* Traverse the page table and find the entry for a page. |
540 | 590 If the object wasn't allocated in GC return NULL. */ |
541 static inline int | 591 |
542 ggc_allocated_p (const void *p) | 592 static inline page_entry * |
593 safe_lookup_page_table_entry (const void *p) | |
543 { | 594 { |
544 page_entry ***base; | 595 page_entry ***base; |
545 size_t L1, L2; | 596 size_t L1, L2; |
546 | 597 |
547 #if HOST_BITS_PER_PTR <= 32 | 598 #if HOST_BITS_PER_PTR <= 32 |
548 base = &G.lookup[0]; | 599 base = &G.lookup[0]; |
549 #else | 600 #else |
550 page_table table = G.lookup; | 601 page_table table = G.lookup; |
551 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | 602 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
552 while (1) | 603 while (1) |
553 { | 604 { |
554 if (table == NULL) | 605 if (table == NULL) |
555 return 0; | 606 return NULL; |
556 if (table->high_bits == high_bits) | 607 if (table->high_bits == high_bits) |
557 break; | 608 break; |
558 table = table->next; | 609 table = table->next; |
559 } | 610 } |
560 base = &table->table[0]; | 611 base = &table->table[0]; |
561 #endif | 612 #endif |
562 | 613 |
563 /* Extract the level 1 and 2 indices. */ | 614 /* Extract the level 1 and 2 indices. */ |
564 L1 = LOOKUP_L1 (p); | 615 L1 = LOOKUP_L1 (p); |
565 L2 = LOOKUP_L2 (p); | 616 L2 = LOOKUP_L2 (p); |
566 | 617 if (! base[L1]) |
567 return base[L1] && base[L1][L2]; | 618 return NULL; |
619 | |
620 return base[L1][L2]; | |
568 } | 621 } |
569 | 622 |
570 /* Traverse the page table and find the entry for a page. | 623 /* Traverse the page table and find the entry for a page. |
571 Die (probably) if the object wasn't allocated via GC. */ | 624 Die (probably) if the object wasn't allocated via GC. */ |
572 | 625 |
578 | 631 |
579 #if HOST_BITS_PER_PTR <= 32 | 632 #if HOST_BITS_PER_PTR <= 32 |
580 base = &G.lookup[0]; | 633 base = &G.lookup[0]; |
581 #else | 634 #else |
582 page_table table = G.lookup; | 635 page_table table = G.lookup; |
583 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | 636 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
584 while (table->high_bits != high_bits) | 637 while (table->high_bits != high_bits) |
585 table = table->next; | 638 table = table->next; |
586 base = &table->table[0]; | 639 base = &table->table[0]; |
587 #endif | 640 #endif |
588 | 641 |
603 | 656 |
604 #if HOST_BITS_PER_PTR <= 32 | 657 #if HOST_BITS_PER_PTR <= 32 |
605 base = &G.lookup[0]; | 658 base = &G.lookup[0]; |
606 #else | 659 #else |
607 page_table table; | 660 page_table table; |
608 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | 661 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff; |
609 for (table = G.lookup; table; table = table->next) | 662 for (table = G.lookup; table; table = table->next) |
610 if (table->high_bits == high_bits) | 663 if (table->high_bits == high_bits) |
611 goto found; | 664 goto found; |
612 | 665 |
613 /* Not found -- allocate a new table. */ | 666 /* Not found -- allocate a new table. */ |
652 /* Allocate SIZE bytes of anonymous memory, preferably near PREF, | 705 /* Allocate SIZE bytes of anonymous memory, preferably near PREF, |
653 (if non-null). The ifdef structure here is intended to cause a | 706 (if non-null). The ifdef structure here is intended to cause a |
654 compile error unless exactly one of the HAVE_* is defined. */ | 707 compile error unless exactly one of the HAVE_* is defined. */ |
655 | 708 |
656 static inline char * | 709 static inline char * |
657 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) | 710 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check) |
658 { | 711 { |
659 #ifdef HAVE_MMAP_ANON | 712 #ifdef HAVE_MMAP_ANON |
660 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | 713 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, |
661 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | 714 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
662 #endif | 715 #endif |
665 MAP_PRIVATE, G.dev_zero_fd, 0); | 718 MAP_PRIVATE, G.dev_zero_fd, 0); |
666 #endif | 719 #endif |
667 | 720 |
668 if (page == (char *) MAP_FAILED) | 721 if (page == (char *) MAP_FAILED) |
669 { | 722 { |
723 if (!check) | |
724 return NULL; | |
670 perror ("virtual memory exhausted"); | 725 perror ("virtual memory exhausted"); |
671 exit (FATAL_EXIT_CODE); | 726 exit (FATAL_EXIT_CODE); |
672 } | 727 } |
673 | 728 |
674 /* Remember that we allocated this memory. */ | 729 /* Remember that we allocated this memory. */ |
727 bitmap_size = BITMAP_SIZE (num_objects + 1); | 782 bitmap_size = BITMAP_SIZE (num_objects + 1); |
728 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; | 783 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; |
729 entry_size = num_objects * OBJECT_SIZE (order); | 784 entry_size = num_objects * OBJECT_SIZE (order); |
730 if (entry_size < G.pagesize) | 785 if (entry_size < G.pagesize) |
731 entry_size = G.pagesize; | 786 entry_size = G.pagesize; |
787 entry_size = PAGE_ALIGN (entry_size); | |
732 | 788 |
733 entry = NULL; | 789 entry = NULL; |
734 page = NULL; | 790 page = NULL; |
735 | 791 |
736 /* Check the list of free pages for one we can use. */ | 792 /* Check the list of free pages for one we can use. */ |
738 if (p->bytes == entry_size) | 794 if (p->bytes == entry_size) |
739 break; | 795 break; |
740 | 796 |
741 if (p != NULL) | 797 if (p != NULL) |
742 { | 798 { |
799 if (p->discarded) | |
800 G.bytes_mapped += p->bytes; | |
801 p->discarded = false; | |
802 | |
743 /* Recycle the allocated memory from this page ... */ | 803 /* Recycle the allocated memory from this page ... */ |
744 *pp = p->next; | 804 *pp = p->next; |
745 page = p->page; | 805 page = p->page; |
746 | 806 |
747 #ifdef USING_MALLOC_PAGE_GROUPS | 807 #ifdef USING_MALLOC_PAGE_GROUPS |
762 { | 822 { |
763 /* We want just one page. Allocate a bunch of them and put the | 823 /* We want just one page. Allocate a bunch of them and put the |
764 extras on the freelist. (Can only do this optimization with | 824 extras on the freelist. (Can only do this optimization with |
765 mmap for backing store.) */ | 825 mmap for backing store.) */ |
766 struct page_entry *e, *f = G.free_pages; | 826 struct page_entry *e, *f = G.free_pages; |
767 int i; | 827 int i, entries = GGC_QUIRE_SIZE; |
768 | 828 |
769 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE); | 829 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false); |
830 if (page == NULL) | |
831 { | |
832 page = alloc_anon (NULL, G.pagesize, true); | |
833 entries = 1; | |
834 } | |
770 | 835 |
771 /* This loop counts down so that the chain will be in ascending | 836 /* This loop counts down so that the chain will be in ascending |
772 memory order. */ | 837 memory order. */ |
773 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) | 838 for (i = entries - 1; i >= 1; i--) |
774 { | 839 { |
775 e = XCNEWVAR (struct page_entry, page_entry_size); | 840 e = XCNEWVAR (struct page_entry, page_entry_size); |
776 e->order = order; | 841 e->order = order; |
777 e->bytes = G.pagesize; | 842 e->bytes = G.pagesize; |
778 e->page = page + (i << G.lg_pagesize); | 843 e->page = page + (i << G.lg_pagesize); |
781 } | 846 } |
782 | 847 |
783 G.free_pages = f; | 848 G.free_pages = f; |
784 } | 849 } |
785 else | 850 else |
786 page = alloc_anon (NULL, entry_size); | 851 page = alloc_anon (NULL, entry_size, true); |
787 #endif | 852 #endif |
788 #ifdef USING_MALLOC_PAGE_GROUPS | 853 #ifdef USING_MALLOC_PAGE_GROUPS |
789 else | 854 else |
790 { | 855 { |
791 /* Allocate a large block of memory and serve out the aligned | 856 /* Allocate a large block of memory and serve out the aligned |
800 alloc_size = GGC_QUIRE_SIZE * G.pagesize; | 865 alloc_size = GGC_QUIRE_SIZE * G.pagesize; |
801 else | 866 else |
802 alloc_size = entry_size + G.pagesize - 1; | 867 alloc_size = entry_size + G.pagesize - 1; |
803 allocation = XNEWVEC (char, alloc_size); | 868 allocation = XNEWVEC (char, alloc_size); |
804 | 869 |
805 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); | 870 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize); |
806 head_slop = page - allocation; | 871 head_slop = page - allocation; |
807 if (multiple_pages) | 872 if (multiple_pages) |
808 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); | 873 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); |
809 else | 874 else |
810 tail_slop = alloc_size - entry_size - head_slop; | 875 tail_slop = alloc_size - entry_size - head_slop; |
954 /* Release the free page cache to the system. */ | 1019 /* Release the free page cache to the system. */ |
955 | 1020 |
956 static void | 1021 static void |
957 release_pages (void) | 1022 release_pages (void) |
958 { | 1023 { |
959 #ifdef USING_MMAP | 1024 #ifdef USING_MADVISE |
1025 page_entry *p, *start_p; | |
1026 char *start; | |
1027 size_t len; | |
1028 size_t mapped_len; | |
1029 page_entry *next, *prev, *newprev; | |
1030 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize; | |
1031 | |
1032 /* First free larger continuous areas to the OS. | |
1033 This allows other allocators to grab these areas if needed. | |
1034 This is only done on larger chunks to avoid fragmentation. | |
1035 This does not always work because the free_pages list is only | |
1036 approximately sorted. */ | |
1037 | |
1038 p = G.free_pages; | |
1039 prev = NULL; | |
1040 while (p) | |
1041 { | |
1042 start = p->page; | |
1043 start_p = p; | |
1044 len = 0; | |
1045 mapped_len = 0; | |
1046 newprev = prev; | |
1047 while (p && p->page == start + len) | |
1048 { | |
1049 len += p->bytes; | |
1050 if (!p->discarded) | |
1051 mapped_len += p->bytes; | |
1052 newprev = p; | |
1053 p = p->next; | |
1054 } | |
1055 if (len >= free_unit) | |
1056 { | |
1057 while (start_p != p) | |
1058 { | |
1059 next = start_p->next; | |
1060 free (start_p); | |
1061 start_p = next; | |
1062 } | |
1063 munmap (start, len); | |
1064 if (prev) | |
1065 prev->next = p; | |
1066 else | |
1067 G.free_pages = p; | |
1068 G.bytes_mapped -= mapped_len; | |
1069 continue; | |
1070 } | |
1071 prev = newprev; | |
1072 } | |
1073 | |
1074 /* Now give back the fragmented pages to the OS, but keep the address | |
1075 space to reuse it next time. */ | |
1076 | |
1077 for (p = G.free_pages; p; ) | |
1078 { | |
1079 if (p->discarded) | |
1080 { | |
1081 p = p->next; | |
1082 continue; | |
1083 } | |
1084 start = p->page; | |
1085 len = p->bytes; | |
1086 start_p = p; | |
1087 p = p->next; | |
1088 while (p && p->page == start + len) | |
1089 { | |
1090 len += p->bytes; | |
1091 p = p->next; | |
1092 } | |
1093 /* Give the page back to the kernel, but don't free the mapping. | |
1094 This avoids fragmentation in the virtual memory map of the | |
1095 process. Next time we can reuse it by just touching it. */ | |
1096 madvise (start, len, MADV_DONTNEED); | |
1097 /* Don't count those pages as mapped to not touch the garbage collector | |
1098 unnecessarily. */ | |
1099 G.bytes_mapped -= len; | |
1100 while (start_p != p) | |
1101 { | |
1102 start_p->discarded = true; | |
1103 start_p = start_p->next; | |
1104 } | |
1105 } | |
1106 #endif | |
1107 #if defined(USING_MMAP) && !defined(USING_MADVISE) | |
960 page_entry *p, *next; | 1108 page_entry *p, *next; |
961 char *start; | 1109 char *start; |
962 size_t len; | 1110 size_t len; |
963 | 1111 |
964 /* Gather up adjacent pages so they are unmapped together. */ | 1112 /* Gather up adjacent pages so they are unmapped together. */ |
1052 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | 1200 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1053 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, | 1201 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, |
1054 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 | 1202 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 |
1055 }; | 1203 }; |
1056 | 1204 |
1057 /* Typed allocation function. Does nothing special in this collector. */ | 1205 /* For a given size of memory requested for allocation, return the |
1206 actual size that is going to be allocated, as well as the size | |
1207 order. */ | |
1208 | |
1209 static void | |
1210 ggc_round_alloc_size_1 (size_t requested_size, | |
1211 size_t *size_order, | |
1212 size_t *alloced_size) | |
1213 { | |
1214 size_t order, object_size; | |
1215 | |
1216 if (requested_size < NUM_SIZE_LOOKUP) | |
1217 { | |
1218 order = size_lookup[requested_size]; | |
1219 object_size = OBJECT_SIZE (order); | |
1220 } | |
1221 else | |
1222 { | |
1223 order = 10; | |
1224 while (requested_size > (object_size = OBJECT_SIZE (order))) | |
1225 order++; | |
1226 } | |
1227 | |
1228 if (size_order) | |
1229 *size_order = order; | |
1230 if (alloced_size) | |
1231 *alloced_size = object_size; | |
1232 } | |
1233 | |
1234 /* For a given size of memory requested for allocation, return the | |
1235 actual size that is going to be allocated. */ | |
1236 | |
1237 size_t | |
1238 ggc_round_alloc_size (size_t requested_size) | |
1239 { | |
1240 size_t size = 0; | |
1241 | |
1242 ggc_round_alloc_size_1 (requested_size, NULL, &size); | |
1243 return size; | |
1244 } | |
1245 | |
1246 /* Push a finalizer onto the appropriate vec. */ | |
1247 | |
1248 static void | |
1249 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n) | |
1250 { | |
1251 if (f == NULL) | |
1252 /* No finalizer. */; | |
1253 else if (n == 1) | |
1254 { | |
1255 finalizer fin (result, f); | |
1256 G.finalizers[G.context_depth].safe_push (fin); | |
1257 } | |
1258 else | |
1259 { | |
1260 vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n); | |
1261 G.vec_finalizers[G.context_depth].safe_push (fin); | |
1262 } | |
1263 } | |
1264 | |
1265 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ | |
1058 | 1266 |
1059 void * | 1267 void * |
1060 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size | 1268 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n |
1061 MEM_STAT_DECL) | 1269 MEM_STAT_DECL) |
1062 { | |
1063 return ggc_internal_alloc_stat (size PASS_MEM_STAT); | |
1064 } | |
1065 | |
1066 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ | |
1067 | |
1068 void * | |
1069 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL) | |
1070 { | 1270 { |
1071 size_t order, word, bit, object_offset, object_size; | 1271 size_t order, word, bit, object_offset, object_size; |
1072 struct page_entry *entry; | 1272 struct page_entry *entry; |
1073 void *result; | 1273 void *result; |
1074 | 1274 |
1075 if (size < NUM_SIZE_LOOKUP) | 1275 ggc_round_alloc_size_1 (size, &order, &object_size); |
1076 { | |
1077 order = size_lookup[size]; | |
1078 object_size = OBJECT_SIZE (order); | |
1079 } | |
1080 else | |
1081 { | |
1082 order = 10; | |
1083 while (size > (object_size = OBJECT_SIZE (order))) | |
1084 order++; | |
1085 } | |
1086 | 1276 |
1087 /* If there are non-full pages for this size allocation, they are at | 1277 /* If there are non-full pages for this size allocation, they are at |
1088 the head of the list. */ | 1278 the head of the list. */ |
1089 entry = G.pages[order]; | 1279 entry = G.pages[order]; |
1090 | 1280 |
1184 G.page_tails[order] = entry; | 1374 G.page_tails[order] = entry; |
1185 } | 1375 } |
1186 | 1376 |
1187 /* Calculate the object's address. */ | 1377 /* Calculate the object's address. */ |
1188 result = entry->page + object_offset; | 1378 result = entry->page + object_offset; |
1189 #ifdef GATHER_STATISTICS | 1379 if (GATHER_STATISTICS) |
1190 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, | 1380 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, |
1191 result PASS_MEM_STAT); | 1381 result FINAL_PASS_MEM_STAT); |
1192 #endif | |
1193 | 1382 |
1194 #ifdef ENABLE_GC_CHECKING | 1383 #ifdef ENABLE_GC_CHECKING |
1195 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the | 1384 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the |
1196 exact same semantics in presence of memory bugs, regardless of | 1385 exact same semantics in presence of memory bugs, regardless of |
1197 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the | 1386 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the |
1218 G.allocated += object_size; | 1407 G.allocated += object_size; |
1219 | 1408 |
1220 /* For timevar statistics. */ | 1409 /* For timevar statistics. */ |
1221 timevar_ggc_mem_total += object_size; | 1410 timevar_ggc_mem_total += object_size; |
1222 | 1411 |
1223 #ifdef GATHER_STATISTICS | 1412 if (f) |
1224 { | 1413 add_finalizer (result, f, s, n); |
1225 size_t overhead = object_size - size; | 1414 |
1226 | 1415 if (GATHER_STATISTICS) |
1227 G.stats.total_overhead += overhead; | 1416 { |
1228 G.stats.total_allocated += object_size; | 1417 size_t overhead = object_size - size; |
1229 G.stats.total_overhead_per_order[order] += overhead; | 1418 |
1230 G.stats.total_allocated_per_order[order] += object_size; | 1419 G.stats.total_overhead += overhead; |
1231 | 1420 G.stats.total_allocated += object_size; |
1232 if (size <= 32) | 1421 G.stats.total_overhead_per_order[order] += overhead; |
1233 { | 1422 G.stats.total_allocated_per_order[order] += object_size; |
1234 G.stats.total_overhead_under32 += overhead; | 1423 |
1235 G.stats.total_allocated_under32 += object_size; | 1424 if (size <= 32) |
1236 } | 1425 { |
1237 if (size <= 64) | 1426 G.stats.total_overhead_under32 += overhead; |
1238 { | 1427 G.stats.total_allocated_under32 += object_size; |
1239 G.stats.total_overhead_under64 += overhead; | 1428 } |
1240 G.stats.total_allocated_under64 += object_size; | 1429 if (size <= 64) |
1241 } | 1430 { |
1242 if (size <= 128) | 1431 G.stats.total_overhead_under64 += overhead; |
1243 { | 1432 G.stats.total_allocated_under64 += object_size; |
1244 G.stats.total_overhead_under128 += overhead; | 1433 } |
1245 G.stats.total_allocated_under128 += object_size; | 1434 if (size <= 128) |
1246 } | 1435 { |
1247 } | 1436 G.stats.total_overhead_under128 += overhead; |
1248 #endif | 1437 G.stats.total_allocated_under128 += object_size; |
1438 } | |
1439 } | |
1249 | 1440 |
1250 if (GGC_DEBUG_LEVEL >= 3) | 1441 if (GGC_DEBUG_LEVEL >= 3) |
1251 fprintf (G.debug_file, | 1442 fprintf (G.debug_file, |
1252 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", | 1443 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", |
1253 (unsigned long) size, (unsigned long) object_size, result, | 1444 (unsigned long) size, (unsigned long) object_size, result, |
1264 page_entry *entry; | 1455 page_entry *entry; |
1265 unsigned bit, word; | 1456 unsigned bit, word; |
1266 unsigned long mask; | 1457 unsigned long mask; |
1267 unsigned long offset; | 1458 unsigned long offset; |
1268 | 1459 |
1269 if (!p || !ggc_allocated_p (p)) | 1460 if (!p) |
1270 return; | 1461 return; |
1271 | 1462 |
1272 /* Look up the page on which the object is alloced. . */ | 1463 /* Look up the page on which the object is alloced. If it was not |
1273 entry = lookup_page_table_entry (p); | 1464 GC allocated, gracefully bail out. */ |
1274 gcc_assert (entry); | 1465 entry = safe_lookup_page_table_entry (p); |
1466 if (!entry) | |
1467 return; | |
1275 | 1468 |
1276 /* Calculate the index of the object on the page; this is its bit | 1469 /* Calculate the index of the object on the page; this is its bit |
1277 position in the in_use_p bitmap. Note that because a char* might | 1470 position in the in_use_p bitmap. Note that because a char* might |
1278 point to the middle of an object, we need special code here to | 1471 point to the middle of an object, we need special code here to |
1279 make sure P points to the start of an object. */ | 1472 make sure P points to the start of an object. */ |
1305 fprintf (G.debug_file, "Marking %p\n", p); | 1498 fprintf (G.debug_file, "Marking %p\n", p); |
1306 | 1499 |
1307 return; | 1500 return; |
1308 } | 1501 } |
1309 | 1502 |
1503 | |
1504 /* User-callable entry points for marking string X. */ | |
1505 | |
1506 void | |
1507 gt_ggc_mx (const char *& x) | |
1508 { | |
1509 gt_ggc_m_S (x); | |
1510 } | |
1511 | |
1512 void | |
1513 gt_ggc_mx (unsigned char *& x) | |
1514 { | |
1515 gt_ggc_m_S (x); | |
1516 } | |
1517 | |
1518 void | |
1519 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED) | |
1520 { | |
1521 } | |
1522 | |
1310 /* If P is not marked, marks it and return false. Otherwise return true. | 1523 /* If P is not marked, marks it and return false. Otherwise return true. |
1311 P must have been allocated by the GC allocator; it mustn't point to | 1524 P must have been allocated by the GC allocator; it mustn't point to |
1312 static objects, stack variables, or memory allocated with malloc. */ | 1525 static objects, stack variables, or memory allocated with malloc. */ |
1313 | 1526 |
1314 int | 1527 int |
1380 /* Release the memory for object P. */ | 1593 /* Release the memory for object P. */ |
1381 | 1594 |
1382 void | 1595 void |
1383 ggc_free (void *p) | 1596 ggc_free (void *p) |
1384 { | 1597 { |
1598 if (in_gc) | |
1599 return; | |
1600 | |
1385 page_entry *pe = lookup_page_table_entry (p); | 1601 page_entry *pe = lookup_page_table_entry (p); |
1386 size_t order = pe->order; | 1602 size_t order = pe->order; |
1387 size_t size = OBJECT_SIZE (order); | 1603 size_t size = OBJECT_SIZE (order); |
1388 | 1604 |
1389 #ifdef GATHER_STATISTICS | 1605 if (GATHER_STATISTICS) |
1390 ggc_free_overhead (p); | 1606 ggc_free_overhead (p); |
1391 #endif | |
1392 | 1607 |
1393 if (GGC_DEBUG_LEVEL >= 3) | 1608 if (GGC_DEBUG_LEVEL >= 3) |
1394 fprintf (G.debug_file, | 1609 fprintf (G.debug_file, |
1395 "Freeing object, actual size=%lu, at %p on %p\n", | 1610 "Freeing object, actual size=%lu, at %p on %p\n", |
1396 (unsigned long) size, p, (void *) pe); | 1611 (unsigned long) size, p, (void *) pe); |
1497 | 1712 |
1498 /* Initialize the ggc-mmap allocator. */ | 1713 /* Initialize the ggc-mmap allocator. */ |
1499 void | 1714 void |
1500 init_ggc (void) | 1715 init_ggc (void) |
1501 { | 1716 { |
1717 static bool init_p = false; | |
1502 unsigned order; | 1718 unsigned order; |
1503 | 1719 |
1504 G.pagesize = getpagesize(); | 1720 if (init_p) |
1721 return; | |
1722 init_p = true; | |
1723 | |
1724 G.pagesize = getpagesize (); | |
1505 G.lg_pagesize = exact_log2 (G.pagesize); | 1725 G.lg_pagesize = exact_log2 (G.pagesize); |
1506 | 1726 |
1507 #ifdef HAVE_MMAP_DEV_ZERO | 1727 #ifdef HAVE_MMAP_DEV_ZERO |
1508 G.dev_zero_fd = open ("/dev/zero", O_RDONLY); | 1728 G.dev_zero_fd = open ("/dev/zero", O_RDONLY); |
1509 if (G.dev_zero_fd == -1) | 1729 if (G.dev_zero_fd == -1) |
1520 /* StunOS has an amazing off-by-one error for the first mmap allocation | 1740 /* StunOS has an amazing off-by-one error for the first mmap allocation |
1521 after fiddling with RLIMIT_STACK. The result, as hard as it is to | 1741 after fiddling with RLIMIT_STACK. The result, as hard as it is to |
1522 believe, is an unaligned page allocation, which would cause us to | 1742 believe, is an unaligned page allocation, which would cause us to |
1523 hork badly if we tried to use it. */ | 1743 hork badly if we tried to use it. */ |
1524 { | 1744 { |
1525 char *p = alloc_anon (NULL, G.pagesize); | 1745 char *p = alloc_anon (NULL, G.pagesize, true); |
1526 struct page_entry *e; | 1746 struct page_entry *e; |
1527 if ((size_t)p & (G.pagesize - 1)) | 1747 if ((uintptr_t)p & (G.pagesize - 1)) |
1528 { | 1748 { |
1529 /* How losing. Discard this one and try another. If we still | 1749 /* How losing. Discard this one and try another. If we still |
1530 can't get something useful, give up. */ | 1750 can't get something useful, give up. */ |
1531 | 1751 |
1532 p = alloc_anon (NULL, G.pagesize); | 1752 p = alloc_anon (NULL, G.pagesize, true); |
1533 gcc_assert (!((size_t)p & (G.pagesize - 1))); | 1753 gcc_assert (!((uintptr_t)p & (G.pagesize - 1))); |
1534 } | 1754 } |
1535 | 1755 |
1536 /* We have a good page, might as well hold onto it... */ | 1756 /* We have a good page, might as well hold onto it... */ |
1537 e = XCNEW (struct page_entry); | 1757 e = XCNEW (struct page_entry); |
1538 e->bytes = G.pagesize; | 1758 e->bytes = G.pagesize; |
1587 | 1807 |
1588 G.by_depth_in_use = 0; | 1808 G.by_depth_in_use = 0; |
1589 G.by_depth_max = INITIAL_PTE_COUNT; | 1809 G.by_depth_max = INITIAL_PTE_COUNT; |
1590 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); | 1810 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
1591 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); | 1811 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); |
1812 | |
1813 /* Allocate space for the depth 0 finalizers. */ | |
1814 G.finalizers.safe_push (vNULL); | |
1815 G.vec_finalizers.safe_push (vNULL); | |
1816 gcc_assert (G.finalizers.length() == 1); | |
1592 } | 1817 } |
1593 | 1818 |
1594 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P | 1819 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P |
1595 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ | 1820 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ |
1596 | 1821 |
1642 { | 1867 { |
1643 size_t num_objects = OBJECTS_IN_PAGE (p); | 1868 size_t num_objects = OBJECTS_IN_PAGE (p); |
1644 size_t bitmap_size = BITMAP_SIZE (num_objects + 1); | 1869 size_t bitmap_size = BITMAP_SIZE (num_objects + 1); |
1645 | 1870 |
1646 /* The data should be page-aligned. */ | 1871 /* The data should be page-aligned. */ |
1647 gcc_assert (!((size_t) p->page & (G.pagesize - 1))); | 1872 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1))); |
1648 | 1873 |
1649 /* Pages that aren't in the topmost context are not collected; | 1874 /* Pages that aren't in the topmost context are not collected; |
1650 nevertheless, we need their in-use bit vectors to store GC | 1875 nevertheless, we need their in-use bit vectors to store GC |
1651 marks. So, back them up first. */ | 1876 marks. So, back them up first. */ |
1652 if (p->context_depth < G.context_depth) | 1877 if (p->context_depth < G.context_depth) |
1662 memset (p->in_use_p, 0, bitmap_size); | 1887 memset (p->in_use_p, 0, bitmap_size); |
1663 | 1888 |
1664 /* Make sure the one-past-the-end bit is always set. */ | 1889 /* Make sure the one-past-the-end bit is always set. */ |
1665 p->in_use_p[num_objects / HOST_BITS_PER_LONG] | 1890 p->in_use_p[num_objects / HOST_BITS_PER_LONG] |
1666 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); | 1891 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); |
1892 } | |
1893 } | |
1894 } | |
1895 | |
1896 /* Check if any blocks with a registered finalizer have become unmarked. If so | |
1897 run the finalizer and unregister it because the block is about to be freed. | |
1898 Note that no garantee is made about what order finalizers will run in so | |
1899 touching other objects in gc memory is extremely unwise. */ | |
1900 | |
1901 static void | |
1902 ggc_handle_finalizers () | |
1903 { | |
1904 unsigned dlen = G.finalizers.length(); | |
1905 for (unsigned d = G.context_depth; d < dlen; ++d) | |
1906 { | |
1907 vec<finalizer> &v = G.finalizers[d]; | |
1908 unsigned length = v.length (); | |
1909 for (unsigned int i = 0; i < length;) | |
1910 { | |
1911 finalizer &f = v[i]; | |
1912 if (!ggc_marked_p (f.addr ())) | |
1913 { | |
1914 f.call (); | |
1915 v.unordered_remove (i); | |
1916 length--; | |
1917 } | |
1918 else | |
1919 i++; | |
1920 } | |
1921 } | |
1922 | |
1923 gcc_assert (dlen == G.vec_finalizers.length()); | |
1924 for (unsigned d = G.context_depth; d < dlen; ++d) | |
1925 { | |
1926 vec<vec_finalizer> &vv = G.vec_finalizers[d]; | |
1927 unsigned length = vv.length (); | |
1928 for (unsigned int i = 0; i < length;) | |
1929 { | |
1930 vec_finalizer &f = vv[i]; | |
1931 if (!ggc_marked_p (f.addr ())) | |
1932 { | |
1933 f.call (); | |
1934 vv.unordered_remove (i); | |
1935 length--; | |
1936 } | |
1937 else | |
1938 i++; | |
1667 } | 1939 } |
1668 } | 1940 } |
1669 } | 1941 } |
1670 | 1942 |
1671 /* Free all empty pages. Partially empty pages need no attention | 1943 /* Free all empty pages. Partially empty pages need no attention |
1905 collection. */ | 2177 collection. */ |
1906 float allocated_last_gc = | 2178 float allocated_last_gc = |
1907 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); | 2179 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); |
1908 | 2180 |
1909 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; | 2181 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; |
1910 | |
1911 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) | 2182 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) |
1912 return; | 2183 return; |
1913 | 2184 |
1914 timevar_push (TV_GC); | 2185 timevar_push (TV_GC); |
1915 if (!quiet_flag) | 2186 if (!quiet_flag) |
1928 /* Indicate that we've seen collections at this context depth. */ | 2199 /* Indicate that we've seen collections at this context depth. */ |
1929 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; | 2200 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; |
1930 | 2201 |
1931 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); | 2202 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); |
1932 | 2203 |
2204 in_gc = true; | |
1933 clear_marks (); | 2205 clear_marks (); |
1934 ggc_mark_roots (); | 2206 ggc_mark_roots (); |
1935 #ifdef GATHER_STATISTICS | 2207 ggc_handle_finalizers (); |
1936 ggc_prune_overhead_list (); | 2208 |
1937 #endif | 2209 if (GATHER_STATISTICS) |
2210 ggc_prune_overhead_list (); | |
2211 | |
1938 poison_pages (); | 2212 poison_pages (); |
1939 validate_free_objects (); | 2213 validate_free_objects (); |
1940 sweep_pages (); | 2214 sweep_pages (); |
1941 | 2215 |
2216 in_gc = false; | |
1942 G.allocated_last_gc = G.allocated; | 2217 G.allocated_last_gc = G.allocated; |
1943 | 2218 |
1944 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); | 2219 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); |
1945 | 2220 |
1946 timevar_pop (TV_GC); | 2221 timevar_pop (TV_GC); |
1947 | 2222 |
1948 if (!quiet_flag) | 2223 if (!quiet_flag) |
1949 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); | 2224 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); |
1950 if (GGC_DEBUG_LEVEL >= 2) | 2225 if (GGC_DEBUG_LEVEL >= 2) |
1951 fprintf (G.debug_file, "END COLLECTING\n"); | 2226 fprintf (G.debug_file, "END COLLECTING\n"); |
2227 } | |
2228 | |
2229 /* Assume that all GGC memory is reachable and grow the limits for next collection. | |
2230 With checking, trigger GGC so -Q compilation outputs how much of memory really is | |
2231 reachable. */ | |
2232 | |
2233 void | |
2234 ggc_grow (void) | |
2235 { | |
2236 if (!flag_checking) | |
2237 G.allocated_last_gc = MAX (G.allocated_last_gc, | |
2238 G.allocated); | |
2239 else | |
2240 ggc_collect (); | |
2241 if (!quiet_flag) | |
2242 fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024); | |
1952 } | 2243 } |
1953 | 2244 |
1954 /* Print allocation statistics. */ | 2245 /* Print allocation statistics. */ |
1955 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ | 2246 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ |
1956 ? (x) \ | 2247 ? (x) \ |
1981 | 2272 |
1982 /* Collect some information about the various sizes of | 2273 /* Collect some information about the various sizes of |
1983 allocation. */ | 2274 allocation. */ |
1984 fprintf (stderr, | 2275 fprintf (stderr, |
1985 "Memory still allocated at the end of the compilation process\n"); | 2276 "Memory still allocated at the end of the compilation process\n"); |
1986 fprintf (stderr, "%-5s %10s %10s %10s\n", | 2277 fprintf (stderr, "%-8s %10s %10s %10s\n", |
1987 "Size", "Allocated", "Used", "Overhead"); | 2278 "Size", "Allocated", "Used", "Overhead"); |
1988 for (i = 0; i < NUM_ORDERS; ++i) | 2279 for (i = 0; i < NUM_ORDERS; ++i) |
1989 { | 2280 { |
1990 page_entry *p; | 2281 page_entry *p; |
1991 size_t allocated; | 2282 size_t allocated; |
2008 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); | 2299 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); |
2009 | 2300 |
2010 overhead += (sizeof (page_entry) - sizeof (long) | 2301 overhead += (sizeof (page_entry) - sizeof (long) |
2011 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); | 2302 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); |
2012 } | 2303 } |
2013 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n", | 2304 fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n", |
2014 (unsigned long) OBJECT_SIZE (i), | 2305 (unsigned long) OBJECT_SIZE (i), |
2015 SCALE (allocated), STAT_LABEL (allocated), | 2306 SCALE (allocated), STAT_LABEL (allocated), |
2016 SCALE (in_use), STAT_LABEL (in_use), | 2307 SCALE (in_use), STAT_LABEL (in_use), |
2017 SCALE (overhead), STAT_LABEL (overhead)); | 2308 SCALE (overhead), STAT_LABEL (overhead)); |
2018 total_overhead += overhead; | 2309 total_overhead += overhead; |
2019 } | 2310 } |
2020 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total", | 2311 fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total", |
2021 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped), | 2312 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped), |
2022 SCALE (G.allocated), STAT_LABEL(G.allocated), | 2313 SCALE (G.allocated), STAT_LABEL (G.allocated), |
2023 SCALE (total_overhead), STAT_LABEL (total_overhead)); | 2314 SCALE (total_overhead), STAT_LABEL (total_overhead)); |
2024 | 2315 |
2025 #ifdef GATHER_STATISTICS | 2316 if (GATHER_STATISTICS) |
2026 { | 2317 { |
2027 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); | 2318 fprintf (stderr, "\nTotal allocations and overheads during " |
2028 | 2319 "the compilation process\n"); |
2029 fprintf (stderr, "Total Overhead: %10lld\n", | 2320 |
2030 G.stats.total_overhead); | 2321 fprintf (stderr, "Total Overhead: %10" |
2031 fprintf (stderr, "Total Allocated: %10lld\n", | 2322 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead); |
2032 G.stats.total_allocated); | 2323 fprintf (stderr, "Total Allocated: %10" |
2033 | 2324 HOST_LONG_LONG_FORMAT "d\n", |
2034 fprintf (stderr, "Total Overhead under 32B: %10lld\n", | 2325 G.stats.total_allocated); |
2035 G.stats.total_overhead_under32); | 2326 |
2036 fprintf (stderr, "Total Allocated under 32B: %10lld\n", | 2327 fprintf (stderr, "Total Overhead under 32B: %10" |
2037 G.stats.total_allocated_under32); | 2328 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32); |
2038 fprintf (stderr, "Total Overhead under 64B: %10lld\n", | 2329 fprintf (stderr, "Total Allocated under 32B: %10" |
2039 G.stats.total_overhead_under64); | 2330 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32); |
2040 fprintf (stderr, "Total Allocated under 64B: %10lld\n", | 2331 fprintf (stderr, "Total Overhead under 64B: %10" |
2041 G.stats.total_allocated_under64); | 2332 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64); |
2042 fprintf (stderr, "Total Overhead under 128B: %10lld\n", | 2333 fprintf (stderr, "Total Allocated under 64B: %10" |
2043 G.stats.total_overhead_under128); | 2334 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64); |
2044 fprintf (stderr, "Total Allocated under 128B: %10lld\n", | 2335 fprintf (stderr, "Total Overhead under 128B: %10" |
2045 G.stats.total_allocated_under128); | 2336 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128); |
2046 | 2337 fprintf (stderr, "Total Allocated under 128B: %10" |
2047 for (i = 0; i < NUM_ORDERS; i++) | 2338 HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128); |
2048 if (G.stats.total_allocated_per_order[i]) | 2339 |
2049 { | 2340 for (i = 0; i < NUM_ORDERS; i++) |
2050 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n", | 2341 if (G.stats.total_allocated_per_order[i]) |
2051 (unsigned long) OBJECT_SIZE (i), | 2342 { |
2052 G.stats.total_overhead_per_order[i]); | 2343 fprintf (stderr, "Total Overhead page size %9lu: %10" |
2053 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n", | 2344 HOST_LONG_LONG_FORMAT "d\n", |
2054 (unsigned long) OBJECT_SIZE (i), | 2345 (unsigned long) OBJECT_SIZE (i), |
2055 G.stats.total_allocated_per_order[i]); | 2346 G.stats.total_overhead_per_order[i]); |
2056 } | 2347 fprintf (stderr, "Total Allocated page size %9lu: %10" |
2348 HOST_LONG_LONG_FORMAT "d\n", | |
2349 (unsigned long) OBJECT_SIZE (i), | |
2350 G.stats.total_allocated_per_order[i]); | |
2351 } | |
2057 } | 2352 } |
2058 #endif | |
2059 } | 2353 } |
2060 | 2354 |
2061 struct ggc_pch_ondisk | 2355 struct ggc_pch_ondisk |
2062 { | 2356 { |
2063 unsigned totals[NUM_ORDERS]; | 2357 unsigned totals[NUM_ORDERS]; |
2064 }; | 2358 }; |
2065 | 2359 |
2066 struct ggc_pch_data | 2360 struct ggc_pch_data |
2067 { | 2361 { |
2068 struct ggc_pch_ondisk d; | 2362 struct ggc_pch_ondisk d; |
2069 size_t base[NUM_ORDERS]; | 2363 uintptr_t base[NUM_ORDERS]; |
2070 size_t written[NUM_ORDERS]; | 2364 size_t written[NUM_ORDERS]; |
2071 }; | 2365 }; |
2072 | 2366 |
2073 struct ggc_pch_data * | 2367 struct ggc_pch_data * |
2074 init_ggc_pch (void) | 2368 init_ggc_pch (void) |
2076 return XCNEW (struct ggc_pch_data); | 2370 return XCNEW (struct ggc_pch_data); |
2077 } | 2371 } |
2078 | 2372 |
2079 void | 2373 void |
2080 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | 2374 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, |
2081 size_t size, bool is_string ATTRIBUTE_UNUSED, | 2375 size_t size, bool is_string ATTRIBUTE_UNUSED) |
2082 enum gt_types_enum type ATTRIBUTE_UNUSED) | |
2083 { | 2376 { |
2084 unsigned order; | 2377 unsigned order; |
2085 | 2378 |
2086 if (size < NUM_SIZE_LOOKUP) | 2379 if (size < NUM_SIZE_LOOKUP) |
2087 order = size_lookup[size]; | 2380 order = size_lookup[size]; |
2100 { | 2393 { |
2101 size_t a = 0; | 2394 size_t a = 0; |
2102 unsigned i; | 2395 unsigned i; |
2103 | 2396 |
2104 for (i = 0; i < NUM_ORDERS; i++) | 2397 for (i = 0; i < NUM_ORDERS; i++) |
2105 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); | 2398 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
2106 return a; | 2399 return a; |
2107 } | 2400 } |
2108 | 2401 |
2109 void | 2402 void |
2110 ggc_pch_this_base (struct ggc_pch_data *d, void *base) | 2403 ggc_pch_this_base (struct ggc_pch_data *d, void *base) |
2111 { | 2404 { |
2112 size_t a = (size_t) base; | 2405 uintptr_t a = (uintptr_t) base; |
2113 unsigned i; | 2406 unsigned i; |
2114 | 2407 |
2115 for (i = 0; i < NUM_ORDERS; i++) | 2408 for (i = 0; i < NUM_ORDERS; i++) |
2116 { | 2409 { |
2117 d->base[i] = a; | 2410 d->base[i] = a; |
2118 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); | 2411 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i)); |
2119 } | 2412 } |
2120 } | 2413 } |
2121 | 2414 |
2122 | 2415 |
2123 char * | 2416 char * |
2124 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | 2417 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, |
2125 size_t size, bool is_string ATTRIBUTE_UNUSED, | 2418 size_t size, bool is_string ATTRIBUTE_UNUSED) |
2126 enum gt_types_enum type ATTRIBUTE_UNUSED) | |
2127 { | 2419 { |
2128 unsigned order; | 2420 unsigned order; |
2129 char *result; | 2421 char *result; |
2130 | 2422 |
2131 if (size < NUM_SIZE_LOOKUP) | 2423 if (size < NUM_SIZE_LOOKUP) |
2148 { | 2440 { |
2149 /* Nothing to do. */ | 2441 /* Nothing to do. */ |
2150 } | 2442 } |
2151 | 2443 |
2152 void | 2444 void |
2153 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, | 2445 ggc_pch_write_object (struct ggc_pch_data *d, |
2154 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, | 2446 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, |
2155 size_t size, bool is_string ATTRIBUTE_UNUSED) | 2447 size_t size, bool is_string ATTRIBUTE_UNUSED) |
2156 { | 2448 { |
2157 unsigned order; | 2449 unsigned order; |
2158 static const char emptyBytes[256] = { 0 }; | 2450 static const char emptyBytes[256] = { 0 }; |
2165 while (size > OBJECT_SIZE (order)) | 2457 while (size > OBJECT_SIZE (order)) |
2166 order++; | 2458 order++; |
2167 } | 2459 } |
2168 | 2460 |
2169 if (fwrite (x, size, 1, f) != 1) | 2461 if (fwrite (x, size, 1, f) != 1) |
2170 fatal_error ("can%'t write PCH file: %m"); | 2462 fatal_error (input_location, "can%'t write PCH file: %m"); |
2171 | 2463 |
2172 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the | 2464 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the |
2173 object out to OBJECT_SIZE(order). This happens for strings. */ | 2465 object out to OBJECT_SIZE(order). This happens for strings. */ |
2174 | 2466 |
2175 if (size != OBJECT_SIZE (order)) | 2467 if (size != OBJECT_SIZE (order)) |
2176 { | 2468 { |
2177 unsigned padding = OBJECT_SIZE(order) - size; | 2469 unsigned padding = OBJECT_SIZE (order) - size; |
2178 | 2470 |
2179 /* To speed small writes, we use a nulled-out array that's larger | 2471 /* To speed small writes, we use a nulled-out array that's larger |
2180 than most padding requests as the source for our null bytes. This | 2472 than most padding requests as the source for our null bytes. This |
2181 permits us to do the padding with fwrite() rather than fseek(), and | 2473 permits us to do the padding with fwrite() rather than fseek(), and |
2182 limits the chance the OS may try to flush any outstanding writes. */ | 2474 limits the chance the OS may try to flush any outstanding writes. */ |
2183 if (padding <= sizeof(emptyBytes)) | 2475 if (padding <= sizeof (emptyBytes)) |
2184 { | 2476 { |
2185 if (fwrite (emptyBytes, 1, padding, f) != padding) | 2477 if (fwrite (emptyBytes, 1, padding, f) != padding) |
2186 fatal_error ("can%'t write PCH file"); | 2478 fatal_error (input_location, "can%'t write PCH file"); |
2187 } | 2479 } |
2188 else | 2480 else |
2189 { | 2481 { |
2190 /* Larger than our buffer? Just default to fseek. */ | 2482 /* Larger than our buffer? Just default to fseek. */ |
2191 if (fseek (f, padding, SEEK_CUR) != 0) | 2483 if (fseek (f, padding, SEEK_CUR) != 0) |
2192 fatal_error ("can%'t write PCH file"); | 2484 fatal_error (input_location, "can%'t write PCH file"); |
2193 } | 2485 } |
2194 } | 2486 } |
2195 | 2487 |
2196 d->written[order]++; | 2488 d->written[order]++; |
2197 if (d->written[order] == d->d.totals[order] | 2489 if (d->written[order] == d->d.totals[order] |
2198 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), | 2490 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), |
2199 G.pagesize), | 2491 G.pagesize), |
2200 SEEK_CUR) != 0) | 2492 SEEK_CUR) != 0) |
2201 fatal_error ("can%'t write PCH file: %m"); | 2493 fatal_error (input_location, "can%'t write PCH file: %m"); |
2202 } | 2494 } |
2203 | 2495 |
2204 void | 2496 void |
2205 ggc_pch_finish (struct ggc_pch_data *d, FILE *f) | 2497 ggc_pch_finish (struct ggc_pch_data *d, FILE *f) |
2206 { | 2498 { |
2207 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | 2499 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) |
2208 fatal_error ("can%'t write PCH file: %m"); | 2500 fatal_error (input_location, "can%'t write PCH file: %m"); |
2209 free (d); | 2501 free (d); |
2210 } | 2502 } |
2211 | 2503 |
2212 /* Move the PCH PTE entries just added to the end of by_depth, to the | 2504 /* Move the PCH PTE entries just added to the end of by_depth, to the |
2213 front. */ | 2505 front. */ |
2214 | 2506 |
2215 static void | 2507 static void |
2216 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) | 2508 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) |
2217 { | 2509 { |
2218 unsigned i; | |
2219 | |
2220 /* First, we swap the new entries to the front of the varrays. */ | 2510 /* First, we swap the new entries to the front of the varrays. */ |
2221 page_entry **new_by_depth; | 2511 page_entry **new_by_depth; |
2222 unsigned long **new_save_in_use; | 2512 unsigned long **new_save_in_use; |
2223 | 2513 |
2224 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); | 2514 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); |
2242 | 2532 |
2243 G.by_depth = new_by_depth; | 2533 G.by_depth = new_by_depth; |
2244 G.save_in_use = new_save_in_use; | 2534 G.save_in_use = new_save_in_use; |
2245 | 2535 |
2246 /* Now update all the index_by_depth fields. */ | 2536 /* Now update all the index_by_depth fields. */ |
2247 for (i = G.by_depth_in_use; i > 0; --i) | 2537 for (unsigned i = G.by_depth_in_use; i--;) |
2248 { | 2538 { |
2249 page_entry *p = G.by_depth[i-1]; | 2539 page_entry *p = G.by_depth[i]; |
2250 p->index_by_depth = i-1; | 2540 p->index_by_depth = i; |
2251 } | 2541 } |
2252 | 2542 |
2253 /* And last, we update the depth pointers in G.depth. The first | 2543 /* And last, we update the depth pointers in G.depth. The first |
2254 entry is already 0, and context 0 entries always start at index | 2544 entry is already 0, and context 0 entries always start at index |
2255 0, so there is nothing to update in the first slot. We need a | 2545 0, so there is nothing to update in the first slot. We need a |
2276 #ifdef ENABLE_GC_CHECKING | 2566 #ifdef ENABLE_GC_CHECKING |
2277 poison_pages (); | 2567 poison_pages (); |
2278 #endif | 2568 #endif |
2279 /* Since we free all the allocated objects, the free list becomes | 2569 /* Since we free all the allocated objects, the free list becomes |
2280 useless. Validate it now, which will also clear it. */ | 2570 useless. Validate it now, which will also clear it. */ |
2281 validate_free_objects(); | 2571 validate_free_objects (); |
2282 | 2572 |
2283 /* No object read from a PCH file should ever be freed. So, set the | 2573 /* No object read from a PCH file should ever be freed. So, set the |
2284 context depth to 1, and set the depth of all the currently-allocated | 2574 context depth to 1, and set the depth of all the currently-allocated |
2285 pages to be 1 too. PCH pages will have depth 0. */ | 2575 pages to be 1 too. PCH pages will have depth 0. */ |
2286 gcc_assert (!G.context_depth); | 2576 gcc_assert (!G.context_depth); |
2287 G.context_depth = 1; | 2577 G.context_depth = 1; |
2578 /* Allocate space for the depth 1 finalizers. */ | |
2579 G.finalizers.safe_push (vNULL); | |
2580 G.vec_finalizers.safe_push (vNULL); | |
2581 gcc_assert (G.finalizers.length() == 2); | |
2288 for (i = 0; i < NUM_ORDERS; i++) | 2582 for (i = 0; i < NUM_ORDERS; i++) |
2289 { | 2583 { |
2290 page_entry *p; | 2584 page_entry *p; |
2291 for (p = G.pages[i]; p != NULL; p = p->next) | 2585 for (p = G.pages[i]; p != NULL; p = p->next) |
2292 p->context_depth = G.context_depth; | 2586 p->context_depth = G.context_depth; |
2293 } | 2587 } |
2294 | 2588 |
2295 /* Allocate the appropriate page-table entries for the pages read from | 2589 /* Allocate the appropriate page-table entries for the pages read from |
2296 the PCH file. */ | 2590 the PCH file. */ |
2297 if (fread (&d, sizeof (d), 1, f) != 1) | 2591 if (fread (&d, sizeof (d), 1, f) != 1) |
2298 fatal_error ("can%'t read PCH file: %m"); | 2592 fatal_error (input_location, "can%'t read PCH file: %m"); |
2299 | 2593 |
2300 for (i = 0; i < NUM_ORDERS; i++) | 2594 for (i = 0; i < NUM_ORDERS; i++) |
2301 { | 2595 { |
2302 struct page_entry *entry; | 2596 struct page_entry *entry; |
2303 char *pte; | 2597 char *pte; |
2306 size_t j; | 2600 size_t j; |
2307 | 2601 |
2308 if (d.totals[i] == 0) | 2602 if (d.totals[i] == 0) |
2309 continue; | 2603 continue; |
2310 | 2604 |
2311 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); | 2605 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i)); |
2312 num_objs = bytes / OBJECT_SIZE (i); | 2606 num_objs = bytes / OBJECT_SIZE (i); |
2313 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) | 2607 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry) |
2314 - sizeof (long) | 2608 - sizeof (long) |
2315 + BITMAP_SIZE (num_objs + 1))); | 2609 + BITMAP_SIZE (num_objs + 1))); |
2316 entry->bytes = bytes; | 2610 entry->bytes = bytes; |
2353 move_ptes_to_front (count_old_page_tables, count_new_page_tables); | 2647 move_ptes_to_front (count_old_page_tables, count_new_page_tables); |
2354 | 2648 |
2355 /* Update the statistics. */ | 2649 /* Update the statistics. */ |
2356 G.allocated = G.allocated_last_gc = offs - (char *)addr; | 2650 G.allocated = G.allocated_last_gc = offs - (char *)addr; |
2357 } | 2651 } |
2358 | |
2359 struct alloc_zone | |
2360 { | |
2361 int dummy; | |
2362 }; | |
2363 | |
2364 struct alloc_zone rtl_zone; | |
2365 struct alloc_zone tree_zone; | |
2366 struct alloc_zone tree_id_zone; |