comparison gcc/ggc-zone.c @ 0:a06113de4d67

first commit
author kent <kent@cr.ie.u-ryukyu.ac.jp>
date Fri, 17 Jul 2009 14:47:48 +0900
parents
children 77e2b8dfacca
comparison
equal deleted inserted replaced
-1:000000000000 0:a06113de4d67
1 /* "Bag-of-pages" zone garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
3 Free Software Foundation, Inc.
4
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
7 <dan@codesourcery.com>.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tm_p.h"
32 #include "toplev.h"
33 #include "varray.h"
34 #include "flags.h"
35 #include "ggc.h"
36 #include "timevar.h"
37 #include "params.h"
38 #include "bitmap.h"
39
40 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
41 file open. Prefer either to valloc. */
42 #ifdef HAVE_MMAP_ANON
43 # undef HAVE_MMAP_DEV_ZERO
44
45 # include <sys/mman.h>
46 # ifndef MAP_FAILED
47 # define MAP_FAILED -1
48 # endif
49 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
50 # define MAP_ANONYMOUS MAP_ANON
51 # endif
52 # define USING_MMAP
53 #endif
54
55 #ifdef HAVE_MMAP_DEV_ZERO
56 # include <sys/mman.h>
57 # ifndef MAP_FAILED
58 # define MAP_FAILED -1
59 # endif
60 # define USING_MMAP
61 #endif
62
63 #ifndef USING_MMAP
64 #error Zone collector requires mmap
65 #endif
66
67 #if (GCC_VERSION < 3001)
68 #define prefetch(X) ((void) X)
69 #define prefetchw(X) ((void) X)
70 #else
71 #define prefetch(X) __builtin_prefetch (X)
72 #define prefetchw(X) __builtin_prefetch (X, 1, 3)
73 #endif
74
75 /* FUTURE NOTES:
76
77 If we track inter-zone pointers, we can mark single zones at a
78 time.
79
80 If we have a zone where we guarantee no inter-zone pointers, we
81 could mark that zone separately.
82
83 The garbage zone should not be marked, and we should return 1 in
84 ggc_set_mark for any object in the garbage zone, which cuts off
85 marking quickly. */
86
87 /* Strategy:
88
89 This garbage-collecting allocator segregates objects into zones.
90 It also segregates objects into "large" and "small" bins. Large
91 objects are greater than page size.
92
93 Pages for small objects are broken up into chunks. The page has
94 a bitmap which marks the start position of each chunk (whether
95 allocated or free). Free chunks are on one of the zone's free
96 lists and contain a pointer to the next free chunk. Chunks in
97 most of the free lists have a fixed size determined by the
98 free list. Chunks in the "other" sized free list have their size
99 stored right after their chain pointer.
100
101 Empty pages (of all sizes) are kept on a single page cache list,
102 and are considered first when new pages are required; they are
103 deallocated at the start of the next collection if they haven't
104 been recycled by then. The free page list is currently per-zone. */
105
106 /* Define GGC_DEBUG_LEVEL to print debugging information.
107 0: No debugging output.
108 1: GC statistics only.
109 2: Page-entry allocations/deallocations as well.
110 3: Object allocations as well.
111 4: Object marks as well. */
112 #define GGC_DEBUG_LEVEL (0)
113
114 #ifndef HOST_BITS_PER_PTR
115 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
116 #endif
117
118 /* This structure manages small free chunks. The SIZE field is only
119 initialized if the chunk is in the "other" sized free list. Large
120 chunks are allocated one at a time to their own page, and so don't
121 come in here. */
122
123 struct alloc_chunk {
124 struct alloc_chunk *next_free;
125 unsigned int size;
126 };
127
128 /* The size of the fixed-size portion of a small page descriptor. */
129 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
130
131 /* The collector's idea of the page size. This must be a power of two
132 no larger than the system page size, because pages must be aligned
133 to this amount and are tracked at this granularity in the page
134 table. We choose a size at compile time for efficiency.
135
136 We could make a better guess at compile time if PAGE_SIZE is a
137 constant in system headers, and PAGE_SHIFT is defined... */
138 #define GGC_PAGE_SIZE 4096
139 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
140 #define GGC_PAGE_SHIFT 12
141
142 #if 0
143 /* Alternative definitions which use the runtime page size. */
144 #define GGC_PAGE_SIZE G.pagesize
145 #define GGC_PAGE_MASK G.page_mask
146 #define GGC_PAGE_SHIFT G.lg_pagesize
147 #endif
148
149 /* The size of a small page managed by the garbage collector. This
150 must currently be GGC_PAGE_SIZE, but with a few changes could
151 be any multiple of it to reduce certain kinds of overhead. */
152 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE
153
154 /* Free bin information. These numbers may be in need of re-tuning.
155 In general, decreasing the number of free bins would seem to
156 increase the time it takes to allocate... */
157
158 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
159 today. */
160
161 #define NUM_FREE_BINS 64
162 #define FREE_BIN_DELTA MAX_ALIGNMENT
163 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
164
165 /* Allocation and marking parameters. */
166
167 /* The smallest allocatable unit to keep track of. */
168 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
169
170 /* The smallest markable unit. If we require each allocated object
171 to contain at least two allocatable units, we can use half as many
172 bits for the mark bitmap. But this adds considerable complexity
173 to sweeping. */
174 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
175
176 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
177
178 /* We use this structure to determine the alignment required for
179 allocations.
180
181 There are several things wrong with this estimation of alignment.
182
183 The maximum alignment for a structure is often less than the
184 maximum alignment for a basic data type; for instance, on some
185 targets long long must be aligned to sizeof (int) in a structure
186 and sizeof (long long) in a variable. i386-linux is one example;
187 Darwin is another (sometimes, depending on the compiler in use).
188
189 Also, long double is not included. Nothing in GCC uses long
190 double, so we assume that this is OK. On powerpc-darwin, adding
191 long double would bring the maximum alignment up to 16 bytes,
192 and until we need long double (or to vectorize compiler operations)
193 that's painfully wasteful. This will need to change, some day. */
194
195 struct max_alignment {
196 char c;
197 union {
198 HOST_WIDEST_INT i;
199 double d;
200 } u;
201 };
202
203 /* The biggest alignment required. */
204
205 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
206
207 /* Compute the smallest multiple of F that is >= X. */
208
209 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
210
211 /* Types to use for the allocation and mark bitmaps. It might be
212 a good idea to add ffsl to libiberty and use unsigned long
213 instead; that could speed us up where long is wider than int. */
214
215 typedef unsigned int alloc_type;
216 typedef unsigned int mark_type;
217 #define alloc_ffs(x) ffs(x)
218
219 /* A page_entry records the status of an allocation page. This is the
220 common data between all three kinds of pages - small, large, and
221 PCH. */
222 typedef struct page_entry
223 {
224 /* The address at which the memory is allocated. */
225 char *page;
226
227 /* The zone that this page entry belongs to. */
228 struct alloc_zone *zone;
229
230 #ifdef GATHER_STATISTICS
231 /* How many collections we've survived. */
232 size_t survived;
233 #endif
234
235 /* Does this page contain small objects, or one large object? */
236 bool large_p;
237
238 /* Is this page part of the loaded PCH? */
239 bool pch_p;
240 } page_entry;
241
242 /* Additional data needed for small pages. */
243 struct small_page_entry
244 {
245 struct page_entry common;
246
247 /* The next small page entry, or NULL if this is the last. */
248 struct small_page_entry *next;
249
250 /* If currently marking this zone, a pointer to the mark bits
251 for this page. If we aren't currently marking this zone,
252 this pointer may be stale (pointing to freed memory). */
253 mark_type *mark_bits;
254
255 /* The allocation bitmap. This array extends far enough to have
256 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
257 alloc_type alloc_bits[1];
258 };
259
260 /* Additional data needed for large pages. */
261 struct large_page_entry
262 {
263 struct page_entry common;
264
265 /* The next large page entry, or NULL if this is the last. */
266 struct large_page_entry *next;
267
268 /* The number of bytes allocated, not including the page entry. */
269 size_t bytes;
270
271 /* The previous page in the list, so that we can unlink this one. */
272 struct large_page_entry *prev;
273
274 /* During marking, is this object marked? */
275 bool mark_p;
276 };
277
278 /* A two-level tree is used to look up the page-entry for a given
279 pointer. Two chunks of the pointer's bits are extracted to index
280 the first and second levels of the tree, as follows:
281
282 HOST_PAGE_SIZE_BITS
283 32 | |
284 msb +----------------+----+------+------+ lsb
285 | | |
286 PAGE_L1_BITS |
287 | |
288 PAGE_L2_BITS
289
290 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
291 pages are aligned on system page boundaries. The next most
292 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
293 index values in the lookup table, respectively.
294
295 For 32-bit architectures and the settings below, there are no
296 leftover bits. For architectures with wider pointers, the lookup
297 tree points to a list of pages, which must be scanned to find the
298 correct one. */
299
300 #define PAGE_L1_BITS (8)
301 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
302 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
303 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
304
305 #define LOOKUP_L1(p) \
306 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
307
308 #define LOOKUP_L2(p) \
309 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
310
311 #if HOST_BITS_PER_PTR <= 32
312
313 /* On 32-bit hosts, we use a two level page table, as pictured above. */
314 typedef page_entry **page_table[PAGE_L1_SIZE];
315
316 #else
317
318 /* On 64-bit hosts, we use the same two level page tables plus a linked
319 list that disambiguates the top 32-bits. There will almost always be
320 exactly one entry in the list. */
321 typedef struct page_table_chain
322 {
323 struct page_table_chain *next;
324 size_t high_bits;
325 page_entry **table[PAGE_L1_SIZE];
326 } *page_table;
327
328 #endif
329
330 /* The global variables. */
331 static struct globals
332 {
333 /* The linked list of zones. */
334 struct alloc_zone *zones;
335
336 /* Lookup table for associating allocation pages with object addresses. */
337 page_table lookup;
338
339 /* The system's page size, and related constants. */
340 size_t pagesize;
341 size_t lg_pagesize;
342 size_t page_mask;
343
344 /* The size to allocate for a small page entry. This includes
345 the size of the structure and the size of the allocation
346 bitmap. */
347 size_t small_page_overhead;
348
349 #if defined (HAVE_MMAP_DEV_ZERO)
350 /* A file descriptor open to /dev/zero for reading. */
351 int dev_zero_fd;
352 #endif
353
354 /* Allocate pages in chunks of this size, to throttle calls to memory
355 allocation routines. The first page is used, the rest go onto the
356 free list. */
357 size_t quire_size;
358
359 /* The file descriptor for debugging output. */
360 FILE *debug_file;
361 } G;
362
363 /* A zone allocation structure. There is one of these for every
364 distinct allocation zone. */
365 struct alloc_zone
366 {
367 /* The most recent free chunk is saved here, instead of in the linked
368 free list, to decrease list manipulation. It is most likely that we
369 will want this one. */
370 char *cached_free;
371 size_t cached_free_size;
372
373 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
374 FREE_BIN_DELTA. All other chunks are in slot 0. */
375 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
376
377 /* The highest bin index which might be non-empty. It may turn out
378 to be empty, in which case we have to search downwards. */
379 size_t high_free_bin;
380
381 /* Bytes currently allocated in this zone. */
382 size_t allocated;
383
384 /* Linked list of the small pages in this zone. */
385 struct small_page_entry *pages;
386
387 /* Doubly linked list of large pages in this zone. */
388 struct large_page_entry *large_pages;
389
390 /* If we are currently marking this zone, a pointer to the mark bits. */
391 mark_type *mark_bits;
392
393 /* Name of the zone. */
394 const char *name;
395
396 /* The number of small pages currently allocated in this zone. */
397 size_t n_small_pages;
398
399 /* Bytes allocated at the end of the last collection. */
400 size_t allocated_last_gc;
401
402 /* Total amount of memory mapped. */
403 size_t bytes_mapped;
404
405 /* A cache of free system pages. */
406 struct small_page_entry *free_pages;
407
408 /* Next zone in the linked list of zones. */
409 struct alloc_zone *next_zone;
410
411 /* True if this zone was collected during this collection. */
412 bool was_collected;
413
414 /* True if this zone should be destroyed after the next collection. */
415 bool dead;
416
417 #ifdef GATHER_STATISTICS
418 struct
419 {
420 /* Total memory allocated with ggc_alloc. */
421 unsigned long long total_allocated;
422 /* Total overhead for memory to be allocated with ggc_alloc. */
423 unsigned long long total_overhead;
424
425 /* Total allocations and overhead for sizes less than 32, 64 and 128.
426 These sizes are interesting because they are typical cache line
427 sizes. */
428
429 unsigned long long total_allocated_under32;
430 unsigned long long total_overhead_under32;
431
432 unsigned long long total_allocated_under64;
433 unsigned long long total_overhead_under64;
434
435 unsigned long long total_allocated_under128;
436 unsigned long long total_overhead_under128;
437 } stats;
438 #endif
439 } main_zone;
440
441 /* Some default zones. */
442 struct alloc_zone rtl_zone;
443 struct alloc_zone tree_zone;
444 struct alloc_zone tree_id_zone;
445
446 /* The PCH zone does not need a normal zone structure, and it does
447 not live on the linked list of zones. */
448 struct pch_zone
449 {
450 /* The start of the PCH zone. NULL if there is none. */
451 char *page;
452
453 /* The end of the PCH zone. NULL if there is none. */
454 char *end;
455
456 /* The size of the PCH zone. 0 if there is none. */
457 size_t bytes;
458
459 /* The allocation bitmap for the PCH zone. */
460 alloc_type *alloc_bits;
461
462 /* If we are currently marking, the mark bitmap for the PCH zone.
463 When it is first read in, we could avoid marking the PCH,
464 because it will not contain any pointers to GC memory outside
465 of the PCH; however, the PCH is currently mapped as writable,
466 so we must mark it in case new pointers are added. */
467 mark_type *mark_bits;
468 } pch_zone;
469
470 #ifdef USING_MMAP
471 static char *alloc_anon (char *, size_t, struct alloc_zone *);
472 #endif
473 static struct small_page_entry * alloc_small_page (struct alloc_zone *);
474 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
475 static void free_chunk (char *, size_t, struct alloc_zone *);
476 static void free_small_page (struct small_page_entry *);
477 static void free_large_page (struct large_page_entry *);
478 static void release_pages (struct alloc_zone *);
479 static void sweep_pages (struct alloc_zone *);
480 static bool ggc_collect_1 (struct alloc_zone *, bool);
481 static void new_ggc_zone_1 (struct alloc_zone *, const char *);
482
483 /* Traverse the page table and find the entry for a page.
484 Die (probably) if the object wasn't allocated via GC. */
485
486 static inline page_entry *
487 lookup_page_table_entry (const void *p)
488 {
489 page_entry ***base;
490 size_t L1, L2;
491
492 #if HOST_BITS_PER_PTR <= 32
493 base = &G.lookup[0];
494 #else
495 page_table table = G.lookup;
496 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
497 while (table->high_bits != high_bits)
498 table = table->next;
499 base = &table->table[0];
500 #endif
501
502 /* Extract the level 1 and 2 indices. */
503 L1 = LOOKUP_L1 (p);
504 L2 = LOOKUP_L2 (p);
505
506 return base[L1][L2];
507 }
508
509 /* Traverse the page table and find the entry for a page.
510 Return NULL if the object wasn't allocated via the GC. */
511
512 static inline page_entry *
513 lookup_page_table_if_allocated (const void *p)
514 {
515 page_entry ***base;
516 size_t L1, L2;
517
518 #if HOST_BITS_PER_PTR <= 32
519 base = &G.lookup[0];
520 #else
521 page_table table = G.lookup;
522 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
523 while (1)
524 {
525 if (table == NULL)
526 return NULL;
527 if (table->high_bits == high_bits)
528 break;
529 table = table->next;
530 }
531 base = &table->table[0];
532 #endif
533
534 /* Extract the level 1 and 2 indices. */
535 L1 = LOOKUP_L1 (p);
536 if (! base[L1])
537 return NULL;
538
539 L2 = LOOKUP_L2 (p);
540 if (L2 >= PAGE_L2_SIZE)
541 return NULL;
542 /* We might have a page entry which does not correspond exactly to a
543 system page. */
544 if (base[L1][L2] && (const char *) p < base[L1][L2]->page)
545 return NULL;
546
547 return base[L1][L2];
548 }
549
550 /* Set the page table entry for the page that starts at P. If ENTRY
551 is NULL, clear the entry. */
552
553 static void
554 set_page_table_entry (void *p, page_entry *entry)
555 {
556 page_entry ***base;
557 size_t L1, L2;
558
559 #if HOST_BITS_PER_PTR <= 32
560 base = &G.lookup[0];
561 #else
562 page_table table;
563 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
564 for (table = G.lookup; table; table = table->next)
565 if (table->high_bits == high_bits)
566 goto found;
567
568 /* Not found -- allocate a new table. */
569 table = XCNEW (struct page_table_chain);
570 table->next = G.lookup;
571 table->high_bits = high_bits;
572 G.lookup = table;
573 found:
574 base = &table->table[0];
575 #endif
576
577 /* Extract the level 1 and 2 indices. */
578 L1 = LOOKUP_L1 (p);
579 L2 = LOOKUP_L2 (p);
580
581 if (base[L1] == NULL)
582 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
583
584 base[L1][L2] = entry;
585 }
586
587 /* Find the page table entry associated with OBJECT. */
588
589 static inline struct page_entry *
590 zone_get_object_page (const void *object)
591 {
592 return lookup_page_table_entry (object);
593 }
594
595 /* Find which element of the alloc_bits array OBJECT should be
596 recorded in. */
597 static inline unsigned int
598 zone_get_object_alloc_word (const void *object)
599 {
600 return (((size_t) object & (GGC_PAGE_SIZE - 1))
601 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
602 }
603
604 /* Find which bit of the appropriate word in the alloc_bits array
605 OBJECT should be recorded in. */
606 static inline unsigned int
607 zone_get_object_alloc_bit (const void *object)
608 {
609 return (((size_t) object / BYTES_PER_ALLOC_BIT)
610 % (8 * sizeof (alloc_type)));
611 }
612
613 /* Find which element of the mark_bits array OBJECT should be recorded
614 in. */
615 static inline unsigned int
616 zone_get_object_mark_word (const void *object)
617 {
618 return (((size_t) object & (GGC_PAGE_SIZE - 1))
619 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
620 }
621
622 /* Find which bit of the appropriate word in the mark_bits array
623 OBJECT should be recorded in. */
624 static inline unsigned int
625 zone_get_object_mark_bit (const void *object)
626 {
627 return (((size_t) object / BYTES_PER_MARK_BIT)
628 % (8 * sizeof (mark_type)));
629 }
630
631 /* Set the allocation bit corresponding to OBJECT in its page's
632 bitmap. Used to split this object from the preceding one. */
633 static inline void
634 zone_set_object_alloc_bit (const void *object)
635 {
636 struct small_page_entry *page
637 = (struct small_page_entry *) zone_get_object_page (object);
638 unsigned int start_word = zone_get_object_alloc_word (object);
639 unsigned int start_bit = zone_get_object_alloc_bit (object);
640
641 page->alloc_bits[start_word] |= 1L << start_bit;
642 }
643
644 /* Clear the allocation bit corresponding to OBJECT in PAGE's
645 bitmap. Used to coalesce this object with the preceding
646 one. */
647 static inline void
648 zone_clear_object_alloc_bit (struct small_page_entry *page,
649 const void *object)
650 {
651 unsigned int start_word = zone_get_object_alloc_word (object);
652 unsigned int start_bit = zone_get_object_alloc_bit (object);
653
654 /* Would xor be quicker? */
655 page->alloc_bits[start_word] &= ~(1L << start_bit);
656 }
657
658 /* Find the size of the object which starts at START_WORD and
659 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
660 Helper function for ggc_get_size and zone_find_object_size. */
661
662 static inline size_t
663 zone_object_size_1 (alloc_type *alloc_bits,
664 size_t start_word, size_t start_bit,
665 size_t max_size)
666 {
667 size_t size;
668 alloc_type alloc_word;
669 int indx;
670
671 /* Load the first word. */
672 alloc_word = alloc_bits[start_word++];
673
674 /* If that was the last bit in this word, we'll want to continue
675 with the next word. Otherwise, handle the rest of this word. */
676 if (start_bit)
677 {
678 indx = alloc_ffs (alloc_word >> start_bit);
679 if (indx)
680 /* indx is 1-based. We started at the bit after the object's
681 start, but we also ended at the bit after the object's end.
682 It cancels out. */
683 return indx * BYTES_PER_ALLOC_BIT;
684
685 /* The extra 1 accounts for the starting unit, before start_bit. */
686 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
687
688 if (size >= max_size)
689 return max_size;
690
691 alloc_word = alloc_bits[start_word++];
692 }
693 else
694 size = BYTES_PER_ALLOC_BIT;
695
696 while (alloc_word == 0)
697 {
698 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
699 if (size >= max_size)
700 return max_size;
701 alloc_word = alloc_bits[start_word++];
702 }
703
704 indx = alloc_ffs (alloc_word);
705 return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
706 }
707
708 /* Find the size of OBJECT on small page PAGE. */
709
710 static inline size_t
711 zone_find_object_size (struct small_page_entry *page,
712 const void *object)
713 {
714 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
715 unsigned int start_word = zone_get_object_alloc_word (object_midptr);
716 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
717 size_t max_size = (page->common.page + SMALL_PAGE_SIZE
718 - (const char *) object);
719
720 return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
721 max_size);
722 }
723
724 /* highest_bit assumes that alloc_type is 32 bits. */
725 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1];
726
727 /* Find the highest set bit in VALUE. Returns the bit number of that
728 bit, using the same values as ffs. */
729 static inline alloc_type
730 highest_bit (alloc_type value)
731 {
732 /* This also assumes that alloc_type is unsigned. */
733 value |= value >> 1;
734 value |= value >> 2;
735 value |= value >> 4;
736 value |= value >> 8;
737 value |= value >> 16;
738 value = value ^ (value >> 1);
739 return alloc_ffs (value);
740 }
741
742 /* Find the offset from the start of an object to P, which may point
743 into the interior of the object. */
744
745 static unsigned long
746 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word,
747 size_t start_bit)
748 {
749 unsigned int offset_in_bits;
750 alloc_type alloc_word = alloc_bits[start_word];
751
752 /* Mask off any bits after the initial bit, but make sure to include
753 the initial bit in the result. Note that START_BIT is
754 0-based. */
755 if (start_bit < 8 * sizeof (alloc_type) - 1)
756 alloc_word &= (1 << (start_bit + 1)) - 1;
757 offset_in_bits = start_bit;
758
759 /* Search for the start of the object. */
760 while (alloc_word == 0 && start_word > 0)
761 {
762 alloc_word = alloc_bits[--start_word];
763 offset_in_bits += 8 * sizeof (alloc_type);
764 }
765 /* We must always find a set bit. */
766 gcc_assert (alloc_word != 0);
767 /* Note that the result of highest_bit is 1-based. */
768 offset_in_bits -= highest_bit (alloc_word) - 1;
769
770 return BYTES_PER_ALLOC_BIT * offset_in_bits;
771 }
772
773 /* Allocate the mark bits for every zone, and set the pointers on each
774 page. */
775 static void
776 zone_allocate_marks (void)
777 {
778 struct alloc_zone *zone;
779
780 for (zone = G.zones; zone; zone = zone->next_zone)
781 {
782 struct small_page_entry *page;
783 mark_type *cur_marks;
784 size_t mark_words, mark_words_per_page;
785 #ifdef ENABLE_CHECKING
786 size_t n = 0;
787 #endif
788
789 mark_words_per_page
790 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
791 mark_words = zone->n_small_pages * mark_words_per_page;
792 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
793 mark_words);
794 cur_marks = zone->mark_bits;
795 for (page = zone->pages; page; page = page->next)
796 {
797 page->mark_bits = cur_marks;
798 cur_marks += mark_words_per_page;
799 #ifdef ENABLE_CHECKING
800 n++;
801 #endif
802 }
803 #ifdef ENABLE_CHECKING
804 gcc_assert (n == zone->n_small_pages);
805 #endif
806 }
807
808 /* We don't collect the PCH zone, but we do have to mark it
809 (for now). */
810 if (pch_zone.bytes)
811 pch_zone.mark_bits
812 = (mark_type *) xcalloc (sizeof (mark_type),
813 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
814 }
815
816 /* After marking and sweeping, release the memory used for mark bits. */
817 static void
818 zone_free_marks (void)
819 {
820 struct alloc_zone *zone;
821
822 for (zone = G.zones; zone; zone = zone->next_zone)
823 if (zone->mark_bits)
824 {
825 free (zone->mark_bits);
826 zone->mark_bits = NULL;
827 }
828
829 if (pch_zone.bytes)
830 {
831 free (pch_zone.mark_bits);
832 pch_zone.mark_bits = NULL;
833 }
834 }
835
836 #ifdef USING_MMAP
837 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
838 (if non-null). The ifdef structure here is intended to cause a
839 compile error unless exactly one of the HAVE_* is defined. */
840
841 static inline char *
842 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
843 {
844 #ifdef HAVE_MMAP_ANON
845 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
846 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
847 #endif
848 #ifdef HAVE_MMAP_DEV_ZERO
849 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
850 MAP_PRIVATE, G.dev_zero_fd, 0);
851 #endif
852
853 if (page == (char *) MAP_FAILED)
854 {
855 perror ("virtual memory exhausted");
856 exit (FATAL_EXIT_CODE);
857 }
858
859 /* Remember that we allocated this memory. */
860 zone->bytes_mapped += size;
861
862 /* Pretend we don't have access to the allocated pages. We'll enable
863 access to smaller pieces of the area in ggc_alloc. Discard the
864 handle to avoid handle leak. */
865 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
866
867 return page;
868 }
869 #endif
870
871 /* Allocate a new page for allocating small objects in ZONE, and
872 return an entry for it. */
873
874 static struct small_page_entry *
875 alloc_small_page (struct alloc_zone *zone)
876 {
877 struct small_page_entry *entry;
878
879 /* Check the list of free pages for one we can use. */
880 entry = zone->free_pages;
881 if (entry != NULL)
882 {
883 /* Recycle the allocated memory from this page ... */
884 zone->free_pages = entry->next;
885 }
886 else
887 {
888 /* We want just one page. Allocate a bunch of them and put the
889 extras on the freelist. (Can only do this optimization with
890 mmap for backing store.) */
891 struct small_page_entry *e, *f = zone->free_pages;
892 int i;
893 char *page;
894
895 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
896
897 /* This loop counts down so that the chain will be in ascending
898 memory order. */
899 for (i = G.quire_size - 1; i >= 1; i--)
900 {
901 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
902 e->common.page = page + (i << GGC_PAGE_SHIFT);
903 e->common.zone = zone;
904 e->next = f;
905 f = e;
906 set_page_table_entry (e->common.page, &e->common);
907 }
908
909 zone->free_pages = f;
910
911 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
912 entry->common.page = page;
913 entry->common.zone = zone;
914 set_page_table_entry (page, &entry->common);
915 }
916
917 zone->n_small_pages++;
918
919 if (GGC_DEBUG_LEVEL >= 2)
920 fprintf (G.debug_file,
921 "Allocating %s page at %p, data %p-%p\n",
922 entry->common.zone->name, (PTR) entry, entry->common.page,
923 entry->common.page + SMALL_PAGE_SIZE - 1);
924
925 return entry;
926 }
927
928 /* Allocate a large page of size SIZE in ZONE. */
929
930 static struct large_page_entry *
931 alloc_large_page (size_t size, struct alloc_zone *zone)
932 {
933 struct large_page_entry *entry;
934 char *page;
935 size_t needed_size;
936
937 needed_size = size + sizeof (struct large_page_entry);
938 page = XNEWVAR (char, needed_size);
939
940 entry = (struct large_page_entry *) page;
941
942 entry->next = NULL;
943 entry->common.page = page + sizeof (struct large_page_entry);
944 entry->common.large_p = true;
945 entry->common.pch_p = false;
946 entry->common.zone = zone;
947 #ifdef GATHER_STATISTICS
948 entry->common.survived = 0;
949 #endif
950 entry->mark_p = false;
951 entry->bytes = size;
952 entry->prev = NULL;
953
954 set_page_table_entry (entry->common.page, &entry->common);
955
956 if (GGC_DEBUG_LEVEL >= 2)
957 fprintf (G.debug_file,
958 "Allocating %s large page at %p, data %p-%p\n",
959 entry->common.zone->name, (PTR) entry, entry->common.page,
960 entry->common.page + SMALL_PAGE_SIZE - 1);
961
962 return entry;
963 }
964
965
966 /* For a page that is no longer needed, put it on the free page list. */
967
968 static inline void
969 free_small_page (struct small_page_entry *entry)
970 {
971 if (GGC_DEBUG_LEVEL >= 2)
972 fprintf (G.debug_file,
973 "Deallocating %s page at %p, data %p-%p\n",
974 entry->common.zone->name, (PTR) entry,
975 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
976
977 gcc_assert (!entry->common.large_p);
978
979 /* Mark the page as inaccessible. Discard the handle to
980 avoid handle leak. */
981 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page,
982 SMALL_PAGE_SIZE));
983
984 entry->next = entry->common.zone->free_pages;
985 entry->common.zone->free_pages = entry;
986 entry->common.zone->n_small_pages--;
987 }
988
989 /* Release a large page that is no longer needed. */
990
991 static inline void
992 free_large_page (struct large_page_entry *entry)
993 {
994 if (GGC_DEBUG_LEVEL >= 2)
995 fprintf (G.debug_file,
996 "Deallocating %s page at %p, data %p-%p\n",
997 entry->common.zone->name, (PTR) entry,
998 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
999
1000 gcc_assert (entry->common.large_p);
1001
1002 set_page_table_entry (entry->common.page, NULL);
1003 free (entry);
1004 }
1005
1006 /* Release the free page cache to the system. */
1007
1008 static void
1009 release_pages (struct alloc_zone *zone)
1010 {
1011 #ifdef USING_MMAP
1012 struct small_page_entry *p, *next;
1013 char *start;
1014 size_t len;
1015
1016 /* Gather up adjacent pages so they are unmapped together. */
1017 p = zone->free_pages;
1018
1019 while (p)
1020 {
1021 start = p->common.page;
1022 next = p->next;
1023 len = SMALL_PAGE_SIZE;
1024 set_page_table_entry (p->common.page, NULL);
1025 p = next;
1026
1027 while (p && p->common.page == start + len)
1028 {
1029 next = p->next;
1030 len += SMALL_PAGE_SIZE;
1031 set_page_table_entry (p->common.page, NULL);
1032 p = next;
1033 }
1034
1035 munmap (start, len);
1036 zone->bytes_mapped -= len;
1037 }
1038
1039 zone->free_pages = NULL;
1040 #endif
1041 }
1042
1043 /* Place the block at PTR of size SIZE on the free list for ZONE. */
1044
1045 static inline void
1046 free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
1047 {
1048 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
1049 size_t bin = 0;
1050
1051 bin = SIZE_BIN_DOWN (size);
1052 gcc_assert (bin != 0);
1053 if (bin > NUM_FREE_BINS)
1054 {
1055 bin = 0;
1056 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1057 sizeof (struct
1058 alloc_chunk)));
1059 chunk->size = size;
1060 chunk->next_free = zone->free_chunks[bin];
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1062 + sizeof (struct
1063 alloc_chunk),
1064 size
1065 - sizeof (struct
1066 alloc_chunk)));
1067 }
1068 else
1069 {
1070 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk,
1071 sizeof (struct
1072 alloc_chunk *)));
1073 chunk->next_free = zone->free_chunks[bin];
1074 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr
1075 + sizeof (struct
1076 alloc_chunk *),
1077 size
1078 - sizeof (struct
1079 alloc_chunk *)));
1080 }
1081
1082 zone->free_chunks[bin] = chunk;
1083 if (bin > zone->high_free_bin)
1084 zone->high_free_bin = bin;
1085 if (GGC_DEBUG_LEVEL >= 3)
1086 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1087 }
1088
1089 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
1090
1091 void *
1092 ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1093 MEM_STAT_DECL)
1094 {
1095 size_t bin;
1096 size_t csize;
1097 struct small_page_entry *entry;
1098 struct alloc_chunk *chunk, **pp;
1099 void *result;
1100 size_t size = orig_size;
1101
1102 /* Make sure that zero-sized allocations get a unique and freeable
1103 pointer. */
1104 if (size == 0)
1105 size = MAX_ALIGNMENT;
1106 else
1107 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1108
1109 /* Try to allocate the object from several different sources. Each
1110 of these cases is responsible for setting RESULT and SIZE to
1111 describe the allocated block, before jumping to FOUND. If a
1112 chunk is split, the allocate bit for the new chunk should also be
1113 set.
1114
1115 Large objects are handled specially. However, they'll just fail
1116 the next couple of conditions, so we can wait to check for them
1117 below. The large object case is relatively rare (< 1%), so this
1118 is a win. */
1119
1120 /* First try to split the last chunk we allocated. For best
1121 fragmentation behavior it would be better to look for a
1122 free bin of the appropriate size for a small object. However,
1123 we're unlikely (1% - 7%) to find one, and this gives better
1124 locality behavior anyway. This case handles the lion's share
1125 of all calls to this function. */
1126 if (size <= zone->cached_free_size)
1127 {
1128 result = zone->cached_free;
1129
1130 zone->cached_free_size -= size;
1131 if (zone->cached_free_size)
1132 {
1133 zone->cached_free += size;
1134 zone_set_object_alloc_bit (zone->cached_free);
1135 }
1136
1137 goto found;
1138 }
1139
1140 /* Next, try to find a free bin of the exactly correct size. */
1141
1142 /* We want to round SIZE up, rather than down, but we know it's
1143 already aligned to at least FREE_BIN_DELTA, so we can just
1144 shift. */
1145 bin = SIZE_BIN_DOWN (size);
1146
1147 if (bin <= NUM_FREE_BINS
1148 && (chunk = zone->free_chunks[bin]) != NULL)
1149 {
1150 /* We have a chunk of the right size. Pull it off the free list
1151 and use it. */
1152
1153 zone->free_chunks[bin] = chunk->next_free;
1154
1155 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1156 == FREE_BIN_DELTA. */
1157 result = chunk;
1158
1159 /* The allocation bits are already set correctly. HIGH_FREE_BIN
1160 may now be wrong, if this was the last chunk in the high bin.
1161 Rather than fixing it up now, wait until we need to search
1162 the free bins. */
1163
1164 goto found;
1165 }
1166
1167 /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1168 to split. We can find one in the too-big bin, or in the largest
1169 sized bin with a chunk in it. Try the largest normal-sized bin
1170 first. */
1171
1172 if (zone->high_free_bin > bin)
1173 {
1174 /* Find the highest numbered free bin. It will be at or below
1175 the watermark. */
1176 while (zone->high_free_bin > bin
1177 && zone->free_chunks[zone->high_free_bin] == NULL)
1178 zone->high_free_bin--;
1179
1180 if (zone->high_free_bin > bin)
1181 {
1182 size_t tbin = zone->high_free_bin;
1183 chunk = zone->free_chunks[tbin];
1184
1185 /* Remove the chunk from its previous bin. */
1186 zone->free_chunks[tbin] = chunk->next_free;
1187
1188 result = (char *) chunk;
1189
1190 /* Save the rest of the chunk for future allocation. */
1191 if (zone->cached_free_size)
1192 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1193
1194 chunk = (struct alloc_chunk *) ((char *) result + size);
1195 zone->cached_free = (char *) chunk;
1196 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1197
1198 /* Mark the new free chunk as an object, so that we can
1199 find the size of the newly allocated object. */
1200 zone_set_object_alloc_bit (chunk);
1201
1202 /* HIGH_FREE_BIN may now be wrong, if this was the last
1203 chunk in the high bin. Rather than fixing it up now,
1204 wait until we need to search the free bins. */
1205
1206 goto found;
1207 }
1208 }
1209
1210 /* Failing that, look through the "other" bucket for a chunk
1211 that is large enough. */
1212 pp = &(zone->free_chunks[0]);
1213 chunk = *pp;
1214 while (chunk && chunk->size < size)
1215 {
1216 pp = &chunk->next_free;
1217 chunk = *pp;
1218 }
1219
1220 if (chunk)
1221 {
1222 /* Remove the chunk from its previous bin. */
1223 *pp = chunk->next_free;
1224
1225 result = (char *) chunk;
1226
1227 /* Save the rest of the chunk for future allocation, if there's any
1228 left over. */
1229 csize = chunk->size;
1230 if (csize > size)
1231 {
1232 if (zone->cached_free_size)
1233 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1234
1235 chunk = (struct alloc_chunk *) ((char *) result + size);
1236 zone->cached_free = (char *) chunk;
1237 zone->cached_free_size = csize - size;
1238
1239 /* Mark the new free chunk as an object. */
1240 zone_set_object_alloc_bit (chunk);
1241 }
1242
1243 goto found;
1244 }
1245
1246 /* Handle large allocations. We could choose any threshold between
1247 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1248 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
1249 be guaranteed to have a unique entry in the lookup table. Large
1250 allocations will always fall through to here. */
1251 if (size > GGC_PAGE_SIZE)
1252 {
1253 struct large_page_entry *entry = alloc_large_page (size, zone);
1254
1255 #ifdef GATHER_STATISTICS
1256 entry->common.survived = 0;
1257 #endif
1258
1259 entry->next = zone->large_pages;
1260 if (zone->large_pages)
1261 zone->large_pages->prev = entry;
1262 zone->large_pages = entry;
1263
1264 result = entry->common.page;
1265
1266 goto found;
1267 }
1268
1269 /* Failing everything above, allocate a new small page. */
1270
1271 entry = alloc_small_page (zone);
1272 entry->next = zone->pages;
1273 zone->pages = entry;
1274
1275 /* Mark the first chunk in the new page. */
1276 entry->alloc_bits[0] = 1;
1277
1278 result = entry->common.page;
1279 if (size < SMALL_PAGE_SIZE)
1280 {
1281 if (zone->cached_free_size)
1282 free_chunk (zone->cached_free, zone->cached_free_size, zone);
1283
1284 zone->cached_free = (char *) result + size;
1285 zone->cached_free_size = SMALL_PAGE_SIZE - size;
1286
1287 /* Mark the new free chunk as an object. */
1288 zone_set_object_alloc_bit (zone->cached_free);
1289 }
1290
1291 found:
1292
1293 /* We could save TYPE in the chunk, but we don't use that for
1294 anything yet. If we wanted to, we could do it by adding it
1295 either before the beginning of the chunk or after its end,
1296 and adjusting the size and pointer appropriately. */
1297
1298 /* We'll probably write to this after we return. */
1299 prefetchw (result);
1300
1301 #ifdef ENABLE_GC_CHECKING
1302 /* `Poison' the entire allocated object. */
1303 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1304 memset (result, 0xaf, size);
1305 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size,
1306 size - orig_size));
1307 #endif
1308
1309 /* Tell Valgrind that the memory is there, but its content isn't
1310 defined. The bytes at the end of the object are still marked
1311 unaccessible. */
1312 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size));
1313
1314 /* Keep track of how many bytes are being allocated. This
1315 information is used in deciding when to collect. */
1316 zone->allocated += size;
1317
1318 timevar_ggc_mem_total += size;
1319
1320 #ifdef GATHER_STATISTICS
1321 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1322
1323 {
1324 size_t object_size = size;
1325 size_t overhead = object_size - orig_size;
1326
1327 zone->stats.total_overhead += overhead;
1328 zone->stats.total_allocated += object_size;
1329
1330 if (orig_size <= 32)
1331 {
1332 zone->stats.total_overhead_under32 += overhead;
1333 zone->stats.total_allocated_under32 += object_size;
1334 }
1335 if (orig_size <= 64)
1336 {
1337 zone->stats.total_overhead_under64 += overhead;
1338 zone->stats.total_allocated_under64 += object_size;
1339 }
1340 if (orig_size <= 128)
1341 {
1342 zone->stats.total_overhead_under128 += overhead;
1343 zone->stats.total_allocated_under128 += object_size;
1344 }
1345 }
1346 #endif
1347
1348 if (GGC_DEBUG_LEVEL >= 3)
1349 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1350 (unsigned long) size, result);
1351
1352 return result;
1353 }
1354
1355 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1356 for that type. */
1357
1358 void *
1359 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1360 MEM_STAT_DECL)
1361 {
1362 switch (gte)
1363 {
1364 case gt_ggc_e_14lang_tree_node:
1365 return ggc_alloc_zone_pass_stat (size, &tree_zone);
1366
1367 case gt_ggc_e_7rtx_def:
1368 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1369
1370 case gt_ggc_e_9rtvec_def:
1371 return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1372
1373 default:
1374 return ggc_alloc_zone_pass_stat (size, &main_zone);
1375 }
1376 }
1377
1378 /* Normal ggc_alloc simply allocates into the main zone. */
1379
1380 void *
1381 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1382 {
1383 return ggc_alloc_zone_pass_stat (size, &main_zone);
1384 }
1385
1386 /* Poison the chunk. */
1387 #ifdef ENABLE_GC_CHECKING
1388 #define poison_region(PTR, SIZE) \
1389 memset ((PTR), 0xa5, (SIZE))
1390 #else
1391 #define poison_region(PTR, SIZE)
1392 #endif
1393
1394 /* Free the object at P. */
1395
1396 void
1397 ggc_free (void *p)
1398 {
1399 struct page_entry *page;
1400
1401 #ifdef GATHER_STATISTICS
1402 ggc_free_overhead (p);
1403 #endif
1404
1405 poison_region (p, ggc_get_size (p));
1406
1407 page = zone_get_object_page (p);
1408
1409 if (page->large_p)
1410 {
1411 struct large_page_entry *large_page
1412 = (struct large_page_entry *) page;
1413
1414 /* Remove the page from the linked list. */
1415 if (large_page->prev)
1416 large_page->prev->next = large_page->next;
1417 else
1418 {
1419 gcc_assert (large_page->common.zone->large_pages == large_page);
1420 large_page->common.zone->large_pages = large_page->next;
1421 }
1422 if (large_page->next)
1423 large_page->next->prev = large_page->prev;
1424
1425 large_page->common.zone->allocated -= large_page->bytes;
1426
1427 /* Release the memory associated with this object. */
1428 free_large_page (large_page);
1429 }
1430 else if (page->pch_p)
1431 /* Don't do anything. We won't allocate a new object from the
1432 PCH zone so there's no point in releasing anything. */
1433 ;
1434 else
1435 {
1436 size_t size = ggc_get_size (p);
1437
1438 page->zone->allocated -= size;
1439
1440 /* Add the chunk to the free list. We don't bother with coalescing,
1441 since we are likely to want a chunk of this size again. */
1442 free_chunk ((char *)p, size, page->zone);
1443 }
1444 }
1445
1446 /* Mark function for strings. */
1447
1448 void
1449 gt_ggc_m_S (const void *p)
1450 {
1451 page_entry *entry;
1452 unsigned long offset;
1453
1454 if (!p)
1455 return;
1456
1457 /* Look up the page on which the object is alloced. . */
1458 entry = lookup_page_table_if_allocated (p);
1459 if (! entry)
1460 return;
1461
1462 if (entry->pch_p)
1463 {
1464 size_t alloc_word, alloc_bit, t;
1465 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT;
1466 alloc_word = t / (8 * sizeof (alloc_type));
1467 alloc_bit = t % (8 * sizeof (alloc_type));
1468 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word,
1469 alloc_bit);
1470 }
1471 else if (entry->large_p)
1472 {
1473 struct large_page_entry *le = (struct large_page_entry *) entry;
1474 offset = ((const char *) p) - entry->page;
1475 gcc_assert (offset < le->bytes);
1476 }
1477 else
1478 {
1479 struct small_page_entry *se = (struct small_page_entry *) entry;
1480 unsigned int start_word = zone_get_object_alloc_word (p);
1481 unsigned int start_bit = zone_get_object_alloc_bit (p);
1482 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit);
1483
1484 /* On some platforms a char* will not necessarily line up on an
1485 allocation boundary, so we have to update the offset to
1486 account for the leftover bytes. */
1487 offset += (size_t) p % BYTES_PER_ALLOC_BIT;
1488 }
1489
1490 if (offset)
1491 {
1492 /* Here we've seen a char* which does not point to the beginning
1493 of an allocated object. We assume it points to the middle of
1494 a STRING_CST. */
1495 gcc_assert (offset == offsetof (struct tree_string, str));
1496 p = ((const char *) p) - offset;
1497 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p));
1498 return;
1499 }
1500
1501 /* Inefficient, but also unlikely to matter. */
1502 ggc_set_mark (p);
1503 }
1504
1505 /* If P is not marked, mark it and return false. Otherwise return true.
1506 P must have been allocated by the GC allocator; it mustn't point to
1507 static objects, stack variables, or memory allocated with malloc. */
1508
1509 int
1510 ggc_set_mark (const void *p)
1511 {
1512 struct page_entry *page;
1513 const char *ptr = (const char *) p;
1514
1515 page = zone_get_object_page (p);
1516
1517 if (page->pch_p)
1518 {
1519 size_t mark_word, mark_bit, offset;
1520 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1521 mark_word = offset / (8 * sizeof (mark_type));
1522 mark_bit = offset % (8 * sizeof (mark_type));
1523
1524 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1525 return 1;
1526 pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1527 }
1528 else if (page->large_p)
1529 {
1530 struct large_page_entry *large_page
1531 = (struct large_page_entry *) page;
1532
1533 if (large_page->mark_p)
1534 return 1;
1535 large_page->mark_p = true;
1536 }
1537 else
1538 {
1539 struct small_page_entry *small_page
1540 = (struct small_page_entry *) page;
1541
1542 if (small_page->mark_bits[zone_get_object_mark_word (p)]
1543 & (1 << zone_get_object_mark_bit (p)))
1544 return 1;
1545 small_page->mark_bits[zone_get_object_mark_word (p)]
1546 |= (1 << zone_get_object_mark_bit (p));
1547 }
1548
1549 if (GGC_DEBUG_LEVEL >= 4)
1550 fprintf (G.debug_file, "Marking %p\n", p);
1551
1552 return 0;
1553 }
1554
1555 /* Return 1 if P has been marked, zero otherwise.
1556 P must have been allocated by the GC allocator; it mustn't point to
1557 static objects, stack variables, or memory allocated with malloc. */
1558
1559 int
1560 ggc_marked_p (const void *p)
1561 {
1562 struct page_entry *page;
1563 const char *ptr = (const char *) p;
1564
1565 page = zone_get_object_page (p);
1566
1567 if (page->pch_p)
1568 {
1569 size_t mark_word, mark_bit, offset;
1570 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1571 mark_word = offset / (8 * sizeof (mark_type));
1572 mark_bit = offset % (8 * sizeof (mark_type));
1573
1574 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1575 }
1576
1577 if (page->large_p)
1578 {
1579 struct large_page_entry *large_page
1580 = (struct large_page_entry *) page;
1581
1582 return large_page->mark_p;
1583 }
1584 else
1585 {
1586 struct small_page_entry *small_page
1587 = (struct small_page_entry *) page;
1588
1589 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1590 & (1 << zone_get_object_mark_bit (p)));
1591 }
1592 }
1593
1594 /* Return the size of the gc-able object P. */
1595
1596 size_t
1597 ggc_get_size (const void *p)
1598 {
1599 struct page_entry *page;
1600 const char *ptr = (const char *) p;
1601
1602 page = zone_get_object_page (p);
1603
1604 if (page->pch_p)
1605 {
1606 size_t alloc_word, alloc_bit, offset, max_size;
1607 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1608 alloc_word = offset / (8 * sizeof (alloc_type));
1609 alloc_bit = offset % (8 * sizeof (alloc_type));
1610 max_size = pch_zone.bytes - (ptr - pch_zone.page);
1611 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1612 max_size);
1613 }
1614
1615 if (page->large_p)
1616 return ((struct large_page_entry *)page)->bytes;
1617 else
1618 return zone_find_object_size ((struct small_page_entry *) page, p);
1619 }
1620
1621 /* Initialize the ggc-zone-mmap allocator. */
1622 void
1623 init_ggc (void)
1624 {
1625 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1626 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1627 the current assumptions to hold. */
1628
1629 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1630
1631 /* Set up the main zone by hand. */
1632 main_zone.name = "Main zone";
1633 G.zones = &main_zone;
1634
1635 /* Allocate the default zones. */
1636 new_ggc_zone_1 (&rtl_zone, "RTL zone");
1637 new_ggc_zone_1 (&tree_zone, "Tree zone");
1638 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1639
1640 G.pagesize = getpagesize();
1641 G.lg_pagesize = exact_log2 (G.pagesize);
1642 G.page_mask = ~(G.pagesize - 1);
1643
1644 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
1645 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1646
1647 /* Allocate 16 system pages at a time. */
1648 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1649
1650 /* Calculate the size of the allocation bitmap and other overhead. */
1651 /* Right now we allocate bits for the page header and bitmap. These
1652 are wasted, but a little tricky to eliminate. */
1653 G.small_page_overhead
1654 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1655 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1656
1657 #ifdef HAVE_MMAP_DEV_ZERO
1658 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1659 gcc_assert (G.dev_zero_fd != -1);
1660 #endif
1661
1662 #if 0
1663 G.debug_file = fopen ("ggc-mmap.debug", "w");
1664 setlinebuf (G.debug_file);
1665 #else
1666 G.debug_file = stdout;
1667 #endif
1668
1669 #ifdef USING_MMAP
1670 /* StunOS has an amazing off-by-one error for the first mmap allocation
1671 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1672 believe, is an unaligned page allocation, which would cause us to
1673 hork badly if we tried to use it. */
1674 {
1675 char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1676 struct small_page_entry *e;
1677 if ((size_t)p & (G.pagesize - 1))
1678 {
1679 /* How losing. Discard this one and try another. If we still
1680 can't get something useful, give up. */
1681
1682 p = alloc_anon (NULL, G.pagesize, &main_zone);
1683 gcc_assert (!((size_t)p & (G.pagesize - 1)));
1684 }
1685
1686 if (GGC_PAGE_SIZE == G.pagesize)
1687 {
1688 /* We have a good page, might as well hold onto it... */
1689 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead);
1690 e->common.page = p;
1691 e->common.zone = &main_zone;
1692 e->next = main_zone.free_pages;
1693 set_page_table_entry (e->common.page, &e->common);
1694 main_zone.free_pages = e;
1695 }
1696 else
1697 {
1698 munmap (p, G.pagesize);
1699 }
1700 }
1701 #endif
1702 }
1703
1704 /* Start a new GGC zone. */
1705
1706 static void
1707 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1708 {
1709 new_zone->name = name;
1710 new_zone->next_zone = G.zones->next_zone;
1711 G.zones->next_zone = new_zone;
1712 }
1713
1714 struct alloc_zone *
1715 new_ggc_zone (const char * name)
1716 {
1717 struct alloc_zone *new_zone = XCNEW (struct alloc_zone);
1718 new_ggc_zone_1 (new_zone, name);
1719 return new_zone;
1720 }
1721
1722 /* Destroy a GGC zone. */
1723 void
1724 destroy_ggc_zone (struct alloc_zone * dead_zone)
1725 {
1726 struct alloc_zone *z;
1727
1728 for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1729 /* Just find that zone. */
1730 continue;
1731
1732 /* We should have found the zone in the list. Anything else is fatal. */
1733 gcc_assert (z);
1734
1735 /* z is dead, baby. z is dead. */
1736 z->dead = true;
1737 }
1738
1739 /* Free all empty pages and objects within a page for a given zone */
1740
1741 static void
1742 sweep_pages (struct alloc_zone *zone)
1743 {
1744 struct large_page_entry **lpp, *lp, *lnext;
1745 struct small_page_entry **spp, *sp, *snext;
1746 char *last_free;
1747 size_t allocated = 0;
1748 bool nomarksinpage;
1749
1750 /* First, reset the free_chunks lists, since we are going to
1751 re-free free chunks in hopes of coalescing them into large chunks. */
1752 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1753 zone->high_free_bin = 0;
1754 zone->cached_free = NULL;
1755 zone->cached_free_size = 0;
1756
1757 /* Large pages are all or none affairs. Either they are completely
1758 empty, or they are completely full. */
1759 lpp = &zone->large_pages;
1760 for (lp = zone->large_pages; lp != NULL; lp = lnext)
1761 {
1762 gcc_assert (lp->common.large_p);
1763
1764 lnext = lp->next;
1765
1766 #ifdef GATHER_STATISTICS
1767 /* This page has now survived another collection. */
1768 lp->common.survived++;
1769 #endif
1770
1771 if (lp->mark_p)
1772 {
1773 lp->mark_p = false;
1774 allocated += lp->bytes;
1775 lpp = &lp->next;
1776 }
1777 else
1778 {
1779 *lpp = lnext;
1780 #ifdef ENABLE_GC_CHECKING
1781 /* Poison the page. */
1782 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1783 #endif
1784 if (lp->prev)
1785 lp->prev->next = lp->next;
1786 if (lp->next)
1787 lp->next->prev = lp->prev;
1788 free_large_page (lp);
1789 }
1790 }
1791
1792 spp = &zone->pages;
1793 for (sp = zone->pages; sp != NULL; sp = snext)
1794 {
1795 char *object, *last_object;
1796 char *end;
1797 alloc_type *alloc_word_p;
1798 mark_type *mark_word_p;
1799
1800 gcc_assert (!sp->common.large_p);
1801
1802 snext = sp->next;
1803
1804 #ifdef GATHER_STATISTICS
1805 /* This page has now survived another collection. */
1806 sp->common.survived++;
1807 #endif
1808
1809 /* Step through all chunks, consolidate those that are free and
1810 insert them into the free lists. Note that consolidation
1811 slows down collection slightly. */
1812
1813 last_object = object = sp->common.page;
1814 end = sp->common.page + SMALL_PAGE_SIZE;
1815 last_free = NULL;
1816 nomarksinpage = true;
1817 mark_word_p = sp->mark_bits;
1818 alloc_word_p = sp->alloc_bits;
1819
1820 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1821
1822 object = sp->common.page;
1823 do
1824 {
1825 unsigned int i, n;
1826 alloc_type alloc_word;
1827 mark_type mark_word;
1828
1829 alloc_word = *alloc_word_p++;
1830 mark_word = *mark_word_p++;
1831
1832 if (mark_word)
1833 nomarksinpage = false;
1834
1835 /* There ought to be some way to do this without looping... */
1836 i = 0;
1837 while ((n = alloc_ffs (alloc_word)) != 0)
1838 {
1839 /* Extend the current state for n - 1 bits. We can't
1840 shift alloc_word by n, even though it isn't used in the
1841 loop, in case only the highest bit was set. */
1842 alloc_word >>= n - 1;
1843 mark_word >>= n - 1;
1844 object += BYTES_PER_MARK_BIT * (n - 1);
1845
1846 if (mark_word & 1)
1847 {
1848 if (last_free)
1849 {
1850 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1851 object
1852 - last_free));
1853 poison_region (last_free, object - last_free);
1854 free_chunk (last_free, object - last_free, zone);
1855 last_free = NULL;
1856 }
1857 else
1858 allocated += object - last_object;
1859 last_object = object;
1860 }
1861 else
1862 {
1863 if (last_free == NULL)
1864 {
1865 last_free = object;
1866 allocated += object - last_object;
1867 }
1868 else
1869 zone_clear_object_alloc_bit (sp, object);
1870 }
1871
1872 /* Shift to just after the alloc bit we handled. */
1873 alloc_word >>= 1;
1874 mark_word >>= 1;
1875 object += BYTES_PER_MARK_BIT;
1876
1877 i += n;
1878 }
1879
1880 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1881 }
1882 while (object < end);
1883
1884 if (nomarksinpage)
1885 {
1886 *spp = snext;
1887 #ifdef ENABLE_GC_CHECKING
1888 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page,
1889 SMALL_PAGE_SIZE));
1890 /* Poison the page. */
1891 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1892 #endif
1893 free_small_page (sp);
1894 continue;
1895 }
1896 else if (last_free)
1897 {
1898 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free,
1899 object - last_free));
1900 poison_region (last_free, object - last_free);
1901 free_chunk (last_free, object - last_free, zone);
1902 }
1903 else
1904 allocated += object - last_object;
1905
1906 spp = &sp->next;
1907 }
1908
1909 zone->allocated = allocated;
1910 }
1911
1912 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING
1913 is true if we need to mark before sweeping, false if some other
1914 zone collection has already performed marking for us. Returns true
1915 if we collected, false otherwise. */
1916
1917 static bool
1918 ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1919 {
1920 #if 0
1921 /* */
1922 {
1923 int i;
1924 for (i = 0; i < NUM_FREE_BINS + 1; i++)
1925 {
1926 struct alloc_chunk *chunk;
1927 int n, tot;
1928
1929 n = 0;
1930 tot = 0;
1931 chunk = zone->free_chunks[i];
1932 while (chunk)
1933 {
1934 n++;
1935 tot += chunk->size;
1936 chunk = chunk->next_free;
1937 }
1938 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1939 i, n, tot);
1940 }
1941 }
1942 /* */
1943 #endif
1944
1945 if (!quiet_flag)
1946 fprintf (stderr, " {%s GC %luk -> ",
1947 zone->name, (unsigned long) zone->allocated / 1024);
1948
1949 /* Zero the total allocated bytes. This will be recalculated in the
1950 sweep phase. */
1951 zone->allocated = 0;
1952
1953 /* Release the pages we freed the last time we collected, but didn't
1954 reuse in the interim. */
1955 release_pages (zone);
1956
1957 if (need_marking)
1958 {
1959 zone_allocate_marks ();
1960 ggc_mark_roots ();
1961 #ifdef GATHER_STATISTICS
1962 ggc_prune_overhead_list ();
1963 #endif
1964 }
1965
1966 sweep_pages (zone);
1967 zone->was_collected = true;
1968 zone->allocated_last_gc = zone->allocated;
1969
1970 if (!quiet_flag)
1971 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1972 return true;
1973 }
1974
1975 #ifdef GATHER_STATISTICS
1976 /* Calculate the average page survival rate in terms of number of
1977 collections. */
1978
1979 static float
1980 calculate_average_page_survival (struct alloc_zone *zone)
1981 {
1982 float count = 0.0;
1983 float survival = 0.0;
1984 struct small_page_entry *p;
1985 struct large_page_entry *lp;
1986 for (p = zone->pages; p; p = p->next)
1987 {
1988 count += 1.0;
1989 survival += p->common.survived;
1990 }
1991 for (lp = zone->large_pages; lp; lp = lp->next)
1992 {
1993 count += 1.0;
1994 survival += lp->common.survived;
1995 }
1996 return survival/count;
1997 }
1998 #endif
1999
2000 /* Top level collection routine. */
2001
2002 void
2003 ggc_collect (void)
2004 {
2005 struct alloc_zone *zone;
2006 bool marked = false;
2007
2008 timevar_push (TV_GC);
2009
2010 if (!ggc_force_collect)
2011 {
2012 float allocated_last_gc = 0, allocated = 0, min_expand;
2013
2014 for (zone = G.zones; zone; zone = zone->next_zone)
2015 {
2016 allocated_last_gc += zone->allocated_last_gc;
2017 allocated += zone->allocated;
2018 }
2019
2020 allocated_last_gc =
2021 MAX (allocated_last_gc,
2022 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2023 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2024
2025 if (allocated < allocated_last_gc + min_expand)
2026 {
2027 timevar_pop (TV_GC);
2028 return;
2029 }
2030 }
2031
2032 /* Start by possibly collecting the main zone. */
2033 main_zone.was_collected = false;
2034 marked |= ggc_collect_1 (&main_zone, true);
2035
2036 /* In order to keep the number of collections down, we don't
2037 collect other zones unless we are collecting the main zone. This
2038 gives us roughly the same number of collections as we used to
2039 have with the old gc. The number of collection is important
2040 because our main slowdown (according to profiling) is now in
2041 marking. So if we mark twice as often as we used to, we'll be
2042 twice as slow. Hopefully we'll avoid this cost when we mark
2043 zone-at-a-time. */
2044 /* NOTE drow/2004-07-28: We now always collect the main zone, but
2045 keep this code in case the heuristics are further refined. */
2046
2047 if (main_zone.was_collected)
2048 {
2049 struct alloc_zone *zone;
2050
2051 for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
2052 {
2053 zone->was_collected = false;
2054 marked |= ggc_collect_1 (zone, !marked);
2055 }
2056 }
2057
2058 #ifdef GATHER_STATISTICS
2059 /* Print page survival stats, if someone wants them. */
2060 if (GGC_DEBUG_LEVEL >= 2)
2061 {
2062 for (zone = G.zones; zone; zone = zone->next_zone)
2063 {
2064 if (zone->was_collected)
2065 {
2066 float f = calculate_average_page_survival (zone);
2067 printf ("Average page survival in zone `%s' is %f\n",
2068 zone->name, f);
2069 }
2070 }
2071 }
2072 #endif
2073
2074 if (marked)
2075 zone_free_marks ();
2076
2077 /* Free dead zones. */
2078 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
2079 {
2080 if (zone->next_zone->dead)
2081 {
2082 struct alloc_zone *dead_zone = zone->next_zone;
2083
2084 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
2085
2086 /* The zone must be empty. */
2087 gcc_assert (!dead_zone->allocated);
2088
2089 /* Unchain the dead zone, release all its pages and free it. */
2090 zone->next_zone = zone->next_zone->next_zone;
2091 release_pages (dead_zone);
2092 free (dead_zone);
2093 }
2094 }
2095
2096 timevar_pop (TV_GC);
2097 }
2098
2099 /* Print allocation statistics. */
2100 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2101 ? (x) \
2102 : ((x) < 1024*1024*10 \
2103 ? (x) / 1024 \
2104 : (x) / (1024*1024))))
2105 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2106
2107 void
2108 ggc_print_statistics (void)
2109 {
2110 struct alloc_zone *zone;
2111 struct ggc_statistics stats;
2112 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
2113 size_t pte_overhead, i;
2114
2115 /* Clear the statistics. */
2116 memset (&stats, 0, sizeof (stats));
2117
2118 /* Make sure collection will really occur. */
2119 ggc_force_collect = true;
2120
2121 /* Collect and print the statistics common across collectors. */
2122 ggc_print_common_statistics (stderr, &stats);
2123
2124 ggc_force_collect = false;
2125
2126 /* Release free pages so that we will not count the bytes allocated
2127 there as part of the total allocated memory. */
2128 for (zone = G.zones; zone; zone = zone->next_zone)
2129 release_pages (zone);
2130
2131 /* Collect some information about the various sizes of
2132 allocation. */
2133 fprintf (stderr,
2134 "Memory still allocated at the end of the compilation process\n");
2135
2136 fprintf (stderr, "%20s %10s %10s %10s\n",
2137 "Zone", "Allocated", "Used", "Overhead");
2138 for (zone = G.zones; zone; zone = zone->next_zone)
2139 {
2140 struct large_page_entry *large_page;
2141 size_t overhead, allocated, in_use;
2142
2143 /* Skip empty zones. */
2144 if (!zone->pages && !zone->large_pages)
2145 continue;
2146
2147 allocated = in_use = 0;
2148
2149 overhead = sizeof (struct alloc_zone);
2150
2151 for (large_page = zone->large_pages; large_page != NULL;
2152 large_page = large_page->next)
2153 {
2154 allocated += large_page->bytes;
2155 in_use += large_page->bytes;
2156 overhead += sizeof (struct large_page_entry);
2157 }
2158
2159 /* There's no easy way to walk through the small pages finding
2160 used and unused objects. Instead, add all the pages, and
2161 subtract out the free list. */
2162
2163 allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2164 in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2165 overhead += G.small_page_overhead * zone->n_small_pages;
2166
2167 for (i = 0; i <= NUM_FREE_BINS; i++)
2168 {
2169 struct alloc_chunk *chunk = zone->free_chunks[i];
2170 while (chunk)
2171 {
2172 in_use -= ggc_get_size (chunk);
2173 chunk = chunk->next_free;
2174 }
2175 }
2176
2177 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2178 zone->name,
2179 SCALE (allocated), LABEL (allocated),
2180 SCALE (in_use), LABEL (in_use),
2181 SCALE (overhead), LABEL (overhead));
2182
2183 gcc_assert (in_use == zone->allocated);
2184
2185 total_overhead += overhead;
2186 total_allocated += zone->allocated;
2187 total_bytes_mapped += zone->bytes_mapped;
2188 }
2189
2190 /* Count the size of the page table as best we can. */
2191 #if HOST_BITS_PER_PTR <= 32
2192 pte_overhead = sizeof (G.lookup);
2193 for (i = 0; i < PAGE_L1_SIZE; i++)
2194 if (G.lookup[i])
2195 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2196 #else
2197 {
2198 page_table table = G.lookup;
2199 pte_overhead = 0;
2200 while (table)
2201 {
2202 pte_overhead += sizeof (*table);
2203 for (i = 0; i < PAGE_L1_SIZE; i++)
2204 if (table->table[i])
2205 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2206 table = table->next;
2207 }
2208 }
2209 #endif
2210 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2211 "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2212 total_overhead += pte_overhead;
2213
2214 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2215 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2216 SCALE (total_allocated), LABEL(total_allocated),
2217 SCALE (total_overhead), LABEL (total_overhead));
2218
2219 #ifdef GATHER_STATISTICS
2220 {
2221 unsigned long long all_overhead = 0, all_allocated = 0;
2222 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2223 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2224 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2225
2226 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2227
2228 for (zone = G.zones; zone; zone = zone->next_zone)
2229 {
2230 all_overhead += zone->stats.total_overhead;
2231 all_allocated += zone->stats.total_allocated;
2232
2233 all_allocated_under32 += zone->stats.total_allocated_under32;
2234 all_overhead_under32 += zone->stats.total_overhead_under32;
2235
2236 all_allocated_under64 += zone->stats.total_allocated_under64;
2237 all_overhead_under64 += zone->stats.total_overhead_under64;
2238
2239 all_allocated_under128 += zone->stats.total_allocated_under128;
2240 all_overhead_under128 += zone->stats.total_overhead_under128;
2241
2242 fprintf (stderr, "%20s: %10lld\n",
2243 zone->name, zone->stats.total_allocated);
2244 }
2245
2246 fprintf (stderr, "\n");
2247
2248 fprintf (stderr, "Total Overhead: %10lld\n",
2249 all_overhead);
2250 fprintf (stderr, "Total Allocated: %10lld\n",
2251 all_allocated);
2252
2253 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
2254 all_overhead_under32);
2255 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
2256 all_allocated_under32);
2257 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
2258 all_overhead_under64);
2259 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
2260 all_allocated_under64);
2261 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
2262 all_overhead_under128);
2263 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
2264 all_allocated_under128);
2265 }
2266 #endif
2267 }
2268
2269 /* Precompiled header support. */
2270
2271 /* For precompiled headers, we sort objects based on their type. We
2272 also sort various objects into their own buckets; currently this
2273 covers strings and IDENTIFIER_NODE trees. The choices of how
2274 to sort buckets have not yet been tuned. */
2275
2276 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
2277
2278 #define OTHER_BUCKET (gt_types_enum_last + 0)
2279 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
2280 #define STRING_BUCKET (gt_types_enum_last + 2)
2281
2282 struct ggc_pch_ondisk
2283 {
2284 size_t total;
2285 size_t type_totals[NUM_PCH_BUCKETS];
2286 };
2287
2288 struct ggc_pch_data
2289 {
2290 struct ggc_pch_ondisk d;
2291 size_t base;
2292 size_t orig_base;
2293 size_t alloc_size;
2294 alloc_type *alloc_bits;
2295 size_t type_bases[NUM_PCH_BUCKETS];
2296 size_t start_offset;
2297 };
2298
2299 /* Initialize the PCH data structure. */
2300
2301 struct ggc_pch_data *
2302 init_ggc_pch (void)
2303 {
2304 return XCNEW (struct ggc_pch_data);
2305 }
2306
2307 /* Return which of the page-aligned buckets the object at X, with type
2308 TYPE, should be sorted into in the PCH. Strings will have
2309 IS_STRING set and TYPE will be gt_types_enum_last. Other objects
2310 of unknown type will also have TYPE equal to gt_types_enum_last. */
2311
2312 static int
2313 pch_bucket (void *x, enum gt_types_enum type,
2314 bool is_string)
2315 {
2316 /* Sort identifiers into their own bucket, to improve locality
2317 when searching the identifier hash table. */
2318 if (type == gt_ggc_e_14lang_tree_node
2319 && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2320 return IDENTIFIER_BUCKET;
2321 else if (type == gt_types_enum_last)
2322 {
2323 if (is_string)
2324 return STRING_BUCKET;
2325 return OTHER_BUCKET;
2326 }
2327 return type;
2328 }
2329
2330 /* Add the size of object X to the size of the PCH data. */
2331
2332 void
2333 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2334 size_t size, bool is_string, enum gt_types_enum type)
2335 {
2336 /* NOTE: Right now we don't need to align up the size of any objects.
2337 Strings can be unaligned, and everything else is allocated to a
2338 MAX_ALIGNMENT boundary already. */
2339
2340 d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2341 }
2342
2343 /* Return the total size of the PCH data. */
2344
2345 size_t
2346 ggc_pch_total_size (struct ggc_pch_data *d)
2347 {
2348 enum gt_types_enum i;
2349 size_t alloc_size, total_size;
2350
2351 total_size = 0;
2352 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2353 {
2354 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2355 total_size += d->d.type_totals[i];
2356 }
2357 d->d.total = total_size;
2358
2359 /* Include the size of the allocation bitmap. */
2360 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2361 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2362 d->alloc_size = alloc_size;
2363
2364 return d->d.total + alloc_size;
2365 }
2366
2367 /* Set the base address for the objects in the PCH file. */
2368
2369 void
2370 ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2371 {
2372 int i;
2373 size_t base = (size_t) base_;
2374
2375 d->base = d->orig_base = base;
2376 for (i = 0; i < NUM_PCH_BUCKETS; i++)
2377 {
2378 d->type_bases[i] = base;
2379 base += d->d.type_totals[i];
2380 }
2381
2382 if (d->alloc_bits == NULL)
2383 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size);
2384 }
2385
2386 /* Allocate a place for object X of size SIZE in the PCH file. */
2387
2388 char *
2389 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2390 size_t size, bool is_string,
2391 enum gt_types_enum type)
2392 {
2393 size_t alloc_word, alloc_bit;
2394 char *result;
2395 int bucket = pch_bucket (x, type, is_string);
2396
2397 /* Record the start of the object in the allocation bitmap. We
2398 can't assert that the allocation bit is previously clear, because
2399 strings may violate the invariant that they are at least
2400 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
2401 should not be called for strings. */
2402 alloc_word = ((d->type_bases[bucket] - d->orig_base)
2403 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2404 alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2405 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2406 d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2407
2408 /* Place the object at the current pointer for this bucket. */
2409 result = (char *) d->type_bases[bucket];
2410 d->type_bases[bucket] += size;
2411 return result;
2412 }
2413
2414 /* Prepare to write out the PCH data to file F. */
2415
2416 void
2417 ggc_pch_prepare_write (struct ggc_pch_data *d,
2418 FILE *f)
2419 {
2420 /* We seek around a lot while writing. Record where the end
2421 of the padding in the PCH file is, so that we can
2422 locate each object's offset. */
2423 d->start_offset = ftell (f);
2424 }
2425
2426 /* Write out object X of SIZE to file F. */
2427
2428 void
2429 ggc_pch_write_object (struct ggc_pch_data *d,
2430 FILE *f, void *x, void *newx,
2431 size_t size, bool is_string ATTRIBUTE_UNUSED)
2432 {
2433 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2434 fatal_error ("can't seek PCH file: %m");
2435
2436 if (fwrite (x, size, 1, f) != 1)
2437 fatal_error ("can't write PCH file: %m");
2438 }
2439
2440 void
2441 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2442 {
2443 /* Write out the allocation bitmap. */
2444 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2445 fatal_error ("can't seek PCH file: %m");
2446
2447 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2448 fatal_error ("can't write PCH file: %m");
2449
2450 /* Done with the PCH, so write out our footer. */
2451 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2452 fatal_error ("can't write PCH file: %m");
2453
2454 free (d->alloc_bits);
2455 free (d);
2456 }
2457
2458 /* The PCH file from F has been mapped at ADDR. Read in any
2459 additional data from the file and set up the GC state. */
2460
2461 void
2462 ggc_pch_read (FILE *f, void *addr)
2463 {
2464 struct ggc_pch_ondisk d;
2465 size_t alloc_size;
2466 struct alloc_zone *zone;
2467 struct page_entry *pch_page;
2468 char *p;
2469
2470 if (fread (&d, sizeof (d), 1, f) != 1)
2471 fatal_error ("can't read PCH file: %m");
2472
2473 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2474 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2475
2476 pch_zone.bytes = d.total;
2477 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2478 pch_zone.page = (char *) addr;
2479 pch_zone.end = (char *) pch_zone.alloc_bits;
2480
2481 /* We've just read in a PCH file. So, every object that used to be
2482 allocated is now free. */
2483 for (zone = G.zones; zone; zone = zone->next_zone)
2484 {
2485 struct small_page_entry *page, *next_page;
2486 struct large_page_entry *large_page, *next_large_page;
2487
2488 zone->allocated = 0;
2489
2490 /* Clear the zone's free chunk list. */
2491 memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2492 zone->high_free_bin = 0;
2493 zone->cached_free = NULL;
2494 zone->cached_free_size = 0;
2495
2496 /* Move all the small pages onto the free list. */
2497 for (page = zone->pages; page != NULL; page = next_page)
2498 {
2499 next_page = page->next;
2500 memset (page->alloc_bits, 0,
2501 G.small_page_overhead - PAGE_OVERHEAD);
2502 free_small_page (page);
2503 }
2504
2505 /* Discard all the large pages. */
2506 for (large_page = zone->large_pages; large_page != NULL;
2507 large_page = next_large_page)
2508 {
2509 next_large_page = large_page->next;
2510 free_large_page (large_page);
2511 }
2512
2513 zone->pages = NULL;
2514 zone->large_pages = NULL;
2515 }
2516
2517 /* Allocate the dummy page entry for the PCH, and set all pages
2518 mapped into the PCH to reference it. */
2519 pch_page = XCNEW (struct page_entry);
2520 pch_page->page = pch_zone.page;
2521 pch_page->pch_p = true;
2522
2523 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2524 set_page_table_entry (p, pch_page);
2525 }