Mercurial > hg > CbC > CbC_gcc
annotate gcc/ggc-zone.c @ 67:f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
author | nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 22 Mar 2011 17:18:12 +0900 |
parents | b7f97abdc517 |
children |
rev | line source |
---|---|
0 | 1 /* "Bag-of-pages" zone garbage collector for the GNU compiler. |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
3 2010 Free Software Foundation, Inc. |
0 | 4 |
5 Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin | |
6 (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz | |
7 <dan@codesourcery.com>. | |
8 | |
9 This file is part of GCC. | |
10 | |
11 GCC is free software; you can redistribute it and/or modify it under | |
12 the terms of the GNU General Public License as published by the Free | |
13 Software Foundation; either version 3, or (at your option) any later | |
14 version. | |
15 | |
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 for more details. | |
20 | |
21 You should have received a copy of the GNU General Public License | |
22 along with GCC; see the file COPYING3. If not see | |
23 <http://www.gnu.org/licenses/>. */ | |
24 | |
25 #include "config.h" | |
26 #include "system.h" | |
27 #include "coretypes.h" | |
28 #include "tm.h" | |
29 #include "tree.h" | |
30 #include "rtl.h" | |
31 #include "tm_p.h" | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
32 #include "diagnostic-core.h" |
0 | 33 #include "flags.h" |
34 #include "ggc.h" | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
35 #include "ggc-internal.h" |
0 | 36 #include "timevar.h" |
37 #include "params.h" | |
38 #include "bitmap.h" | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
39 #include "plugin.h" |
0 | 40 |
41 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a | |
42 file open. Prefer either to valloc. */ | |
43 #ifdef HAVE_MMAP_ANON | |
44 # undef HAVE_MMAP_DEV_ZERO | |
45 # define USING_MMAP | |
46 #endif | |
47 | |
48 #ifdef HAVE_MMAP_DEV_ZERO | |
49 # define USING_MMAP | |
50 #endif | |
51 | |
52 #ifndef USING_MMAP | |
53 #error Zone collector requires mmap | |
54 #endif | |
55 | |
56 #if (GCC_VERSION < 3001) | |
57 #define prefetch(X) ((void) X) | |
58 #define prefetchw(X) ((void) X) | |
59 #else | |
60 #define prefetch(X) __builtin_prefetch (X) | |
61 #define prefetchw(X) __builtin_prefetch (X, 1, 3) | |
62 #endif | |
63 | |
64 /* FUTURE NOTES: | |
65 | |
66 If we track inter-zone pointers, we can mark single zones at a | |
67 time. | |
68 | |
69 If we have a zone where we guarantee no inter-zone pointers, we | |
70 could mark that zone separately. | |
71 | |
72 The garbage zone should not be marked, and we should return 1 in | |
73 ggc_set_mark for any object in the garbage zone, which cuts off | |
74 marking quickly. */ | |
75 | |
76 /* Strategy: | |
77 | |
78 This garbage-collecting allocator segregates objects into zones. | |
79 It also segregates objects into "large" and "small" bins. Large | |
80 objects are greater than page size. | |
81 | |
82 Pages for small objects are broken up into chunks. The page has | |
83 a bitmap which marks the start position of each chunk (whether | |
84 allocated or free). Free chunks are on one of the zone's free | |
85 lists and contain a pointer to the next free chunk. Chunks in | |
86 most of the free lists have a fixed size determined by the | |
87 free list. Chunks in the "other" sized free list have their size | |
88 stored right after their chain pointer. | |
89 | |
90 Empty pages (of all sizes) are kept on a single page cache list, | |
91 and are considered first when new pages are required; they are | |
92 deallocated at the start of the next collection if they haven't | |
93 been recycled by then. The free page list is currently per-zone. */ | |
94 | |
95 /* Define GGC_DEBUG_LEVEL to print debugging information. | |
96 0: No debugging output. | |
97 1: GC statistics only. | |
98 2: Page-entry allocations/deallocations as well. | |
99 3: Object allocations as well. | |
100 4: Object marks as well. */ | |
101 #define GGC_DEBUG_LEVEL (0) | |
102 | |
103 #ifndef HOST_BITS_PER_PTR | |
104 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG | |
105 #endif | |
106 | |
107 /* This structure manages small free chunks. The SIZE field is only | |
108 initialized if the chunk is in the "other" sized free list. Large | |
109 chunks are allocated one at a time to their own page, and so don't | |
110 come in here. */ | |
111 | |
112 struct alloc_chunk { | |
113 struct alloc_chunk *next_free; | |
114 unsigned int size; | |
115 }; | |
116 | |
117 /* The size of the fixed-size portion of a small page descriptor. */ | |
118 #define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits)) | |
119 | |
120 /* The collector's idea of the page size. This must be a power of two | |
121 no larger than the system page size, because pages must be aligned | |
122 to this amount and are tracked at this granularity in the page | |
123 table. We choose a size at compile time for efficiency. | |
124 | |
125 We could make a better guess at compile time if PAGE_SIZE is a | |
126 constant in system headers, and PAGE_SHIFT is defined... */ | |
127 #define GGC_PAGE_SIZE 4096 | |
128 #define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1) | |
129 #define GGC_PAGE_SHIFT 12 | |
130 | |
131 #if 0 | |
132 /* Alternative definitions which use the runtime page size. */ | |
133 #define GGC_PAGE_SIZE G.pagesize | |
134 #define GGC_PAGE_MASK G.page_mask | |
135 #define GGC_PAGE_SHIFT G.lg_pagesize | |
136 #endif | |
137 | |
138 /* The size of a small page managed by the garbage collector. This | |
139 must currently be GGC_PAGE_SIZE, but with a few changes could | |
140 be any multiple of it to reduce certain kinds of overhead. */ | |
141 #define SMALL_PAGE_SIZE GGC_PAGE_SIZE | |
142 | |
143 /* Free bin information. These numbers may be in need of re-tuning. | |
144 In general, decreasing the number of free bins would seem to | |
145 increase the time it takes to allocate... */ | |
146 | |
147 /* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size | |
148 today. */ | |
149 | |
150 #define NUM_FREE_BINS 64 | |
151 #define FREE_BIN_DELTA MAX_ALIGNMENT | |
152 #define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA) | |
153 | |
154 /* Allocation and marking parameters. */ | |
155 | |
156 /* The smallest allocatable unit to keep track of. */ | |
157 #define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT | |
158 | |
159 /* The smallest markable unit. If we require each allocated object | |
160 to contain at least two allocatable units, we can use half as many | |
161 bits for the mark bitmap. But this adds considerable complexity | |
162 to sweeping. */ | |
163 #define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT | |
164 | |
165 #define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type)) | |
166 | |
167 /* We use this structure to determine the alignment required for | |
168 allocations. | |
169 | |
170 There are several things wrong with this estimation of alignment. | |
171 | |
172 The maximum alignment for a structure is often less than the | |
173 maximum alignment for a basic data type; for instance, on some | |
174 targets long long must be aligned to sizeof (int) in a structure | |
175 and sizeof (long long) in a variable. i386-linux is one example; | |
176 Darwin is another (sometimes, depending on the compiler in use). | |
177 | |
178 Also, long double is not included. Nothing in GCC uses long | |
179 double, so we assume that this is OK. On powerpc-darwin, adding | |
180 long double would bring the maximum alignment up to 16 bytes, | |
181 and until we need long double (or to vectorize compiler operations) | |
182 that's painfully wasteful. This will need to change, some day. */ | |
183 | |
184 struct max_alignment { | |
185 char c; | |
186 union { | |
187 HOST_WIDEST_INT i; | |
188 double d; | |
189 } u; | |
190 }; | |
191 | |
192 /* The biggest alignment required. */ | |
193 | |
194 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) | |
195 | |
196 /* Compute the smallest multiple of F that is >= X. */ | |
197 | |
198 #define ROUND_UP(x, f) (CEIL (x, f) * (f)) | |
199 | |
200 /* Types to use for the allocation and mark bitmaps. It might be | |
201 a good idea to add ffsl to libiberty and use unsigned long | |
202 instead; that could speed us up where long is wider than int. */ | |
203 | |
204 typedef unsigned int alloc_type; | |
205 typedef unsigned int mark_type; | |
206 #define alloc_ffs(x) ffs(x) | |
207 | |
208 /* A page_entry records the status of an allocation page. This is the | |
209 common data between all three kinds of pages - small, large, and | |
210 PCH. */ | |
211 typedef struct page_entry | |
212 { | |
213 /* The address at which the memory is allocated. */ | |
214 char *page; | |
215 | |
216 /* The zone that this page entry belongs to. */ | |
217 struct alloc_zone *zone; | |
218 | |
219 #ifdef GATHER_STATISTICS | |
220 /* How many collections we've survived. */ | |
221 size_t survived; | |
222 #endif | |
223 | |
224 /* Does this page contain small objects, or one large object? */ | |
225 bool large_p; | |
226 | |
227 /* Is this page part of the loaded PCH? */ | |
228 bool pch_p; | |
229 } page_entry; | |
230 | |
231 /* Additional data needed for small pages. */ | |
232 struct small_page_entry | |
233 { | |
234 struct page_entry common; | |
235 | |
236 /* The next small page entry, or NULL if this is the last. */ | |
237 struct small_page_entry *next; | |
238 | |
239 /* If currently marking this zone, a pointer to the mark bits | |
240 for this page. If we aren't currently marking this zone, | |
241 this pointer may be stale (pointing to freed memory). */ | |
242 mark_type *mark_bits; | |
243 | |
244 /* The allocation bitmap. This array extends far enough to have | |
245 one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */ | |
246 alloc_type alloc_bits[1]; | |
247 }; | |
248 | |
249 /* Additional data needed for large pages. */ | |
250 struct large_page_entry | |
251 { | |
252 struct page_entry common; | |
253 | |
254 /* The next large page entry, or NULL if this is the last. */ | |
255 struct large_page_entry *next; | |
256 | |
257 /* The number of bytes allocated, not including the page entry. */ | |
258 size_t bytes; | |
259 | |
260 /* The previous page in the list, so that we can unlink this one. */ | |
261 struct large_page_entry *prev; | |
262 | |
263 /* During marking, is this object marked? */ | |
264 bool mark_p; | |
265 }; | |
266 | |
267 /* A two-level tree is used to look up the page-entry for a given | |
268 pointer. Two chunks of the pointer's bits are extracted to index | |
269 the first and second levels of the tree, as follows: | |
270 | |
271 HOST_PAGE_SIZE_BITS | |
272 32 | | | |
273 msb +----------------+----+------+------+ lsb | |
274 | | | | |
275 PAGE_L1_BITS | | |
276 | | | |
277 PAGE_L2_BITS | |
278 | |
279 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry | |
280 pages are aligned on system page boundaries. The next most | |
281 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first | |
282 index values in the lookup table, respectively. | |
283 | |
284 For 32-bit architectures and the settings below, there are no | |
285 leftover bits. For architectures with wider pointers, the lookup | |
286 tree points to a list of pages, which must be scanned to find the | |
287 correct one. */ | |
288 | |
289 #define PAGE_L1_BITS (8) | |
290 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT) | |
291 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) | |
292 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) | |
293 | |
294 #define LOOKUP_L1(p) \ | |
295 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) | |
296 | |
297 #define LOOKUP_L2(p) \ | |
298 (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1)) | |
299 | |
300 #if HOST_BITS_PER_PTR <= 32 | |
301 | |
302 /* On 32-bit hosts, we use a two level page table, as pictured above. */ | |
303 typedef page_entry **page_table[PAGE_L1_SIZE]; | |
304 | |
305 #else | |
306 | |
307 /* On 64-bit hosts, we use the same two level page tables plus a linked | |
308 list that disambiguates the top 32-bits. There will almost always be | |
309 exactly one entry in the list. */ | |
310 typedef struct page_table_chain | |
311 { | |
312 struct page_table_chain *next; | |
313 size_t high_bits; | |
314 page_entry **table[PAGE_L1_SIZE]; | |
315 } *page_table; | |
316 | |
317 #endif | |
318 | |
319 /* The global variables. */ | |
320 static struct globals | |
321 { | |
322 /* The linked list of zones. */ | |
323 struct alloc_zone *zones; | |
324 | |
325 /* Lookup table for associating allocation pages with object addresses. */ | |
326 page_table lookup; | |
327 | |
328 /* The system's page size, and related constants. */ | |
329 size_t pagesize; | |
330 size_t lg_pagesize; | |
331 size_t page_mask; | |
332 | |
333 /* The size to allocate for a small page entry. This includes | |
334 the size of the structure and the size of the allocation | |
335 bitmap. */ | |
336 size_t small_page_overhead; | |
337 | |
338 #if defined (HAVE_MMAP_DEV_ZERO) | |
339 /* A file descriptor open to /dev/zero for reading. */ | |
340 int dev_zero_fd; | |
341 #endif | |
342 | |
343 /* Allocate pages in chunks of this size, to throttle calls to memory | |
344 allocation routines. The first page is used, the rest go onto the | |
345 free list. */ | |
346 size_t quire_size; | |
347 | |
348 /* The file descriptor for debugging output. */ | |
349 FILE *debug_file; | |
350 } G; | |
351 | |
352 /* A zone allocation structure. There is one of these for every | |
353 distinct allocation zone. */ | |
354 struct alloc_zone | |
355 { | |
356 /* The most recent free chunk is saved here, instead of in the linked | |
357 free list, to decrease list manipulation. It is most likely that we | |
358 will want this one. */ | |
359 char *cached_free; | |
360 size_t cached_free_size; | |
361 | |
362 /* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size | |
363 FREE_BIN_DELTA. All other chunks are in slot 0. */ | |
364 struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1]; | |
365 | |
366 /* The highest bin index which might be non-empty. It may turn out | |
367 to be empty, in which case we have to search downwards. */ | |
368 size_t high_free_bin; | |
369 | |
370 /* Bytes currently allocated in this zone. */ | |
371 size_t allocated; | |
372 | |
373 /* Linked list of the small pages in this zone. */ | |
374 struct small_page_entry *pages; | |
375 | |
376 /* Doubly linked list of large pages in this zone. */ | |
377 struct large_page_entry *large_pages; | |
378 | |
379 /* If we are currently marking this zone, a pointer to the mark bits. */ | |
380 mark_type *mark_bits; | |
381 | |
382 /* Name of the zone. */ | |
383 const char *name; | |
384 | |
385 /* The number of small pages currently allocated in this zone. */ | |
386 size_t n_small_pages; | |
387 | |
388 /* Bytes allocated at the end of the last collection. */ | |
389 size_t allocated_last_gc; | |
390 | |
391 /* Total amount of memory mapped. */ | |
392 size_t bytes_mapped; | |
393 | |
394 /* A cache of free system pages. */ | |
395 struct small_page_entry *free_pages; | |
396 | |
397 /* Next zone in the linked list of zones. */ | |
398 struct alloc_zone *next_zone; | |
399 | |
400 /* True if this zone was collected during this collection. */ | |
401 bool was_collected; | |
402 | |
403 /* True if this zone should be destroyed after the next collection. */ | |
404 bool dead; | |
405 | |
406 #ifdef GATHER_STATISTICS | |
407 struct | |
408 { | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
409 /* Total GC-allocated memory. */ |
0 | 410 unsigned long long total_allocated; |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
411 /* Total overhead for GC-allocated memory. */ |
0 | 412 unsigned long long total_overhead; |
413 | |
414 /* Total allocations and overhead for sizes less than 32, 64 and 128. | |
415 These sizes are interesting because they are typical cache line | |
416 sizes. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
417 |
0 | 418 unsigned long long total_allocated_under32; |
419 unsigned long long total_overhead_under32; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
420 |
0 | 421 unsigned long long total_allocated_under64; |
422 unsigned long long total_overhead_under64; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
423 |
0 | 424 unsigned long long total_allocated_under128; |
425 unsigned long long total_overhead_under128; | |
426 } stats; | |
427 #endif | |
428 } main_zone; | |
429 | |
430 /* Some default zones. */ | |
431 struct alloc_zone rtl_zone; | |
432 struct alloc_zone tree_zone; | |
433 struct alloc_zone tree_id_zone; | |
434 | |
435 /* The PCH zone does not need a normal zone structure, and it does | |
436 not live on the linked list of zones. */ | |
437 struct pch_zone | |
438 { | |
439 /* The start of the PCH zone. NULL if there is none. */ | |
440 char *page; | |
441 | |
442 /* The end of the PCH zone. NULL if there is none. */ | |
443 char *end; | |
444 | |
445 /* The size of the PCH zone. 0 if there is none. */ | |
446 size_t bytes; | |
447 | |
448 /* The allocation bitmap for the PCH zone. */ | |
449 alloc_type *alloc_bits; | |
450 | |
451 /* If we are currently marking, the mark bitmap for the PCH zone. | |
452 When it is first read in, we could avoid marking the PCH, | |
453 because it will not contain any pointers to GC memory outside | |
454 of the PCH; however, the PCH is currently mapped as writable, | |
455 so we must mark it in case new pointers are added. */ | |
456 mark_type *mark_bits; | |
457 } pch_zone; | |
458 | |
459 #ifdef USING_MMAP | |
460 static char *alloc_anon (char *, size_t, struct alloc_zone *); | |
461 #endif | |
462 static struct small_page_entry * alloc_small_page (struct alloc_zone *); | |
463 static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *); | |
464 static void free_chunk (char *, size_t, struct alloc_zone *); | |
465 static void free_small_page (struct small_page_entry *); | |
466 static void free_large_page (struct large_page_entry *); | |
467 static void release_pages (struct alloc_zone *); | |
468 static void sweep_pages (struct alloc_zone *); | |
469 static bool ggc_collect_1 (struct alloc_zone *, bool); | |
470 static void new_ggc_zone_1 (struct alloc_zone *, const char *); | |
471 | |
472 /* Traverse the page table and find the entry for a page. | |
473 Die (probably) if the object wasn't allocated via GC. */ | |
474 | |
475 static inline page_entry * | |
476 lookup_page_table_entry (const void *p) | |
477 { | |
478 page_entry ***base; | |
479 size_t L1, L2; | |
480 | |
481 #if HOST_BITS_PER_PTR <= 32 | |
482 base = &G.lookup[0]; | |
483 #else | |
484 page_table table = G.lookup; | |
485 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
486 while (table->high_bits != high_bits) | |
487 table = table->next; | |
488 base = &table->table[0]; | |
489 #endif | |
490 | |
491 /* Extract the level 1 and 2 indices. */ | |
492 L1 = LOOKUP_L1 (p); | |
493 L2 = LOOKUP_L2 (p); | |
494 | |
495 return base[L1][L2]; | |
496 } | |
497 | |
498 /* Traverse the page table and find the entry for a page. | |
499 Return NULL if the object wasn't allocated via the GC. */ | |
500 | |
501 static inline page_entry * | |
502 lookup_page_table_if_allocated (const void *p) | |
503 { | |
504 page_entry ***base; | |
505 size_t L1, L2; | |
506 | |
507 #if HOST_BITS_PER_PTR <= 32 | |
508 base = &G.lookup[0]; | |
509 #else | |
510 page_table table = G.lookup; | |
511 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
512 while (1) | |
513 { | |
514 if (table == NULL) | |
515 return NULL; | |
516 if (table->high_bits == high_bits) | |
517 break; | |
518 table = table->next; | |
519 } | |
520 base = &table->table[0]; | |
521 #endif | |
522 | |
523 /* Extract the level 1 and 2 indices. */ | |
524 L1 = LOOKUP_L1 (p); | |
525 if (! base[L1]) | |
526 return NULL; | |
527 | |
528 L2 = LOOKUP_L2 (p); | |
529 if (L2 >= PAGE_L2_SIZE) | |
530 return NULL; | |
531 /* We might have a page entry which does not correspond exactly to a | |
532 system page. */ | |
533 if (base[L1][L2] && (const char *) p < base[L1][L2]->page) | |
534 return NULL; | |
535 | |
536 return base[L1][L2]; | |
537 } | |
538 | |
539 /* Set the page table entry for the page that starts at P. If ENTRY | |
540 is NULL, clear the entry. */ | |
541 | |
542 static void | |
543 set_page_table_entry (void *p, page_entry *entry) | |
544 { | |
545 page_entry ***base; | |
546 size_t L1, L2; | |
547 | |
548 #if HOST_BITS_PER_PTR <= 32 | |
549 base = &G.lookup[0]; | |
550 #else | |
551 page_table table; | |
552 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; | |
553 for (table = G.lookup; table; table = table->next) | |
554 if (table->high_bits == high_bits) | |
555 goto found; | |
556 | |
557 /* Not found -- allocate a new table. */ | |
558 table = XCNEW (struct page_table_chain); | |
559 table->next = G.lookup; | |
560 table->high_bits = high_bits; | |
561 G.lookup = table; | |
562 found: | |
563 base = &table->table[0]; | |
564 #endif | |
565 | |
566 /* Extract the level 1 and 2 indices. */ | |
567 L1 = LOOKUP_L1 (p); | |
568 L2 = LOOKUP_L2 (p); | |
569 | |
570 if (base[L1] == NULL) | |
571 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); | |
572 | |
573 base[L1][L2] = entry; | |
574 } | |
575 | |
576 /* Find the page table entry associated with OBJECT. */ | |
577 | |
578 static inline struct page_entry * | |
579 zone_get_object_page (const void *object) | |
580 { | |
581 return lookup_page_table_entry (object); | |
582 } | |
583 | |
584 /* Find which element of the alloc_bits array OBJECT should be | |
585 recorded in. */ | |
586 static inline unsigned int | |
587 zone_get_object_alloc_word (const void *object) | |
588 { | |
589 return (((size_t) object & (GGC_PAGE_SIZE - 1)) | |
590 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT)); | |
591 } | |
592 | |
593 /* Find which bit of the appropriate word in the alloc_bits array | |
594 OBJECT should be recorded in. */ | |
595 static inline unsigned int | |
596 zone_get_object_alloc_bit (const void *object) | |
597 { | |
598 return (((size_t) object / BYTES_PER_ALLOC_BIT) | |
599 % (8 * sizeof (alloc_type))); | |
600 } | |
601 | |
602 /* Find which element of the mark_bits array OBJECT should be recorded | |
603 in. */ | |
604 static inline unsigned int | |
605 zone_get_object_mark_word (const void *object) | |
606 { | |
607 return (((size_t) object & (GGC_PAGE_SIZE - 1)) | |
608 / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT)); | |
609 } | |
610 | |
611 /* Find which bit of the appropriate word in the mark_bits array | |
612 OBJECT should be recorded in. */ | |
613 static inline unsigned int | |
614 zone_get_object_mark_bit (const void *object) | |
615 { | |
616 return (((size_t) object / BYTES_PER_MARK_BIT) | |
617 % (8 * sizeof (mark_type))); | |
618 } | |
619 | |
620 /* Set the allocation bit corresponding to OBJECT in its page's | |
621 bitmap. Used to split this object from the preceding one. */ | |
622 static inline void | |
623 zone_set_object_alloc_bit (const void *object) | |
624 { | |
625 struct small_page_entry *page | |
626 = (struct small_page_entry *) zone_get_object_page (object); | |
627 unsigned int start_word = zone_get_object_alloc_word (object); | |
628 unsigned int start_bit = zone_get_object_alloc_bit (object); | |
629 | |
630 page->alloc_bits[start_word] |= 1L << start_bit; | |
631 } | |
632 | |
633 /* Clear the allocation bit corresponding to OBJECT in PAGE's | |
634 bitmap. Used to coalesce this object with the preceding | |
635 one. */ | |
636 static inline void | |
637 zone_clear_object_alloc_bit (struct small_page_entry *page, | |
638 const void *object) | |
639 { | |
640 unsigned int start_word = zone_get_object_alloc_word (object); | |
641 unsigned int start_bit = zone_get_object_alloc_bit (object); | |
642 | |
643 /* Would xor be quicker? */ | |
644 page->alloc_bits[start_word] &= ~(1L << start_bit); | |
645 } | |
646 | |
647 /* Find the size of the object which starts at START_WORD and | |
648 START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes. | |
649 Helper function for ggc_get_size and zone_find_object_size. */ | |
650 | |
651 static inline size_t | |
652 zone_object_size_1 (alloc_type *alloc_bits, | |
653 size_t start_word, size_t start_bit, | |
654 size_t max_size) | |
655 { | |
656 size_t size; | |
657 alloc_type alloc_word; | |
658 int indx; | |
659 | |
660 /* Load the first word. */ | |
661 alloc_word = alloc_bits[start_word++]; | |
662 | |
663 /* If that was the last bit in this word, we'll want to continue | |
664 with the next word. Otherwise, handle the rest of this word. */ | |
665 if (start_bit) | |
666 { | |
667 indx = alloc_ffs (alloc_word >> start_bit); | |
668 if (indx) | |
669 /* indx is 1-based. We started at the bit after the object's | |
670 start, but we also ended at the bit after the object's end. | |
671 It cancels out. */ | |
672 return indx * BYTES_PER_ALLOC_BIT; | |
673 | |
674 /* The extra 1 accounts for the starting unit, before start_bit. */ | |
675 size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT; | |
676 | |
677 if (size >= max_size) | |
678 return max_size; | |
679 | |
680 alloc_word = alloc_bits[start_word++]; | |
681 } | |
682 else | |
683 size = BYTES_PER_ALLOC_BIT; | |
684 | |
685 while (alloc_word == 0) | |
686 { | |
687 size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT; | |
688 if (size >= max_size) | |
689 return max_size; | |
690 alloc_word = alloc_bits[start_word++]; | |
691 } | |
692 | |
693 indx = alloc_ffs (alloc_word); | |
694 return size + (indx - 1) * BYTES_PER_ALLOC_BIT; | |
695 } | |
696 | |
697 /* Find the size of OBJECT on small page PAGE. */ | |
698 | |
699 static inline size_t | |
700 zone_find_object_size (struct small_page_entry *page, | |
701 const void *object) | |
702 { | |
703 const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT; | |
704 unsigned int start_word = zone_get_object_alloc_word (object_midptr); | |
705 unsigned int start_bit = zone_get_object_alloc_bit (object_midptr); | |
706 size_t max_size = (page->common.page + SMALL_PAGE_SIZE | |
707 - (const char *) object); | |
708 | |
709 return zone_object_size_1 (page->alloc_bits, start_word, start_bit, | |
710 max_size); | |
711 } | |
712 | |
713 /* highest_bit assumes that alloc_type is 32 bits. */ | |
714 extern char check_alloc_type_size[(sizeof (alloc_type) == 4) ? 1 : -1]; | |
715 | |
716 /* Find the highest set bit in VALUE. Returns the bit number of that | |
717 bit, using the same values as ffs. */ | |
718 static inline alloc_type | |
719 highest_bit (alloc_type value) | |
720 { | |
721 /* This also assumes that alloc_type is unsigned. */ | |
722 value |= value >> 1; | |
723 value |= value >> 2; | |
724 value |= value >> 4; | |
725 value |= value >> 8; | |
726 value |= value >> 16; | |
727 value = value ^ (value >> 1); | |
728 return alloc_ffs (value); | |
729 } | |
730 | |
731 /* Find the offset from the start of an object to P, which may point | |
732 into the interior of the object. */ | |
733 | |
734 static unsigned long | |
735 zone_find_object_offset (alloc_type *alloc_bits, size_t start_word, | |
736 size_t start_bit) | |
737 { | |
738 unsigned int offset_in_bits; | |
739 alloc_type alloc_word = alloc_bits[start_word]; | |
740 | |
741 /* Mask off any bits after the initial bit, but make sure to include | |
742 the initial bit in the result. Note that START_BIT is | |
743 0-based. */ | |
744 if (start_bit < 8 * sizeof (alloc_type) - 1) | |
745 alloc_word &= (1 << (start_bit + 1)) - 1; | |
746 offset_in_bits = start_bit; | |
747 | |
748 /* Search for the start of the object. */ | |
749 while (alloc_word == 0 && start_word > 0) | |
750 { | |
751 alloc_word = alloc_bits[--start_word]; | |
752 offset_in_bits += 8 * sizeof (alloc_type); | |
753 } | |
754 /* We must always find a set bit. */ | |
755 gcc_assert (alloc_word != 0); | |
756 /* Note that the result of highest_bit is 1-based. */ | |
757 offset_in_bits -= highest_bit (alloc_word) - 1; | |
758 | |
759 return BYTES_PER_ALLOC_BIT * offset_in_bits; | |
760 } | |
761 | |
762 /* Allocate the mark bits for every zone, and set the pointers on each | |
763 page. */ | |
764 static void | |
765 zone_allocate_marks (void) | |
766 { | |
767 struct alloc_zone *zone; | |
768 | |
769 for (zone = G.zones; zone; zone = zone->next_zone) | |
770 { | |
771 struct small_page_entry *page; | |
772 mark_type *cur_marks; | |
773 size_t mark_words, mark_words_per_page; | |
774 #ifdef ENABLE_CHECKING | |
775 size_t n = 0; | |
776 #endif | |
777 | |
778 mark_words_per_page | |
779 = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD; | |
780 mark_words = zone->n_small_pages * mark_words_per_page; | |
781 zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type), | |
782 mark_words); | |
783 cur_marks = zone->mark_bits; | |
784 for (page = zone->pages; page; page = page->next) | |
785 { | |
786 page->mark_bits = cur_marks; | |
787 cur_marks += mark_words_per_page; | |
788 #ifdef ENABLE_CHECKING | |
789 n++; | |
790 #endif | |
791 } | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
792 gcc_checking_assert (n == zone->n_small_pages); |
0 | 793 } |
794 | |
795 /* We don't collect the PCH zone, but we do have to mark it | |
796 (for now). */ | |
797 if (pch_zone.bytes) | |
798 pch_zone.mark_bits | |
799 = (mark_type *) xcalloc (sizeof (mark_type), | |
800 CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD)); | |
801 } | |
802 | |
803 /* After marking and sweeping, release the memory used for mark bits. */ | |
804 static void | |
805 zone_free_marks (void) | |
806 { | |
807 struct alloc_zone *zone; | |
808 | |
809 for (zone = G.zones; zone; zone = zone->next_zone) | |
810 if (zone->mark_bits) | |
811 { | |
812 free (zone->mark_bits); | |
813 zone->mark_bits = NULL; | |
814 } | |
815 | |
816 if (pch_zone.bytes) | |
817 { | |
818 free (pch_zone.mark_bits); | |
819 pch_zone.mark_bits = NULL; | |
820 } | |
821 } | |
822 | |
823 #ifdef USING_MMAP | |
824 /* Allocate SIZE bytes of anonymous memory, preferably near PREF, | |
825 (if non-null). The ifdef structure here is intended to cause a | |
826 compile error unless exactly one of the HAVE_* is defined. */ | |
827 | |
828 static inline char * | |
829 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone) | |
830 { | |
831 #ifdef HAVE_MMAP_ANON | |
832 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | |
833 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | |
834 #endif | |
835 #ifdef HAVE_MMAP_DEV_ZERO | |
836 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE, | |
837 MAP_PRIVATE, G.dev_zero_fd, 0); | |
838 #endif | |
839 | |
840 if (page == (char *) MAP_FAILED) | |
841 { | |
842 perror ("virtual memory exhausted"); | |
843 exit (FATAL_EXIT_CODE); | |
844 } | |
845 | |
846 /* Remember that we allocated this memory. */ | |
847 zone->bytes_mapped += size; | |
848 | |
849 /* Pretend we don't have access to the allocated pages. We'll enable | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
850 access to smaller pieces of the area in ggc_internal_alloc. Discard the |
0 | 851 handle to avoid handle leak. */ |
852 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size)); | |
853 | |
854 return page; | |
855 } | |
856 #endif | |
857 | |
858 /* Allocate a new page for allocating small objects in ZONE, and | |
859 return an entry for it. */ | |
860 | |
861 static struct small_page_entry * | |
862 alloc_small_page (struct alloc_zone *zone) | |
863 { | |
864 struct small_page_entry *entry; | |
865 | |
866 /* Check the list of free pages for one we can use. */ | |
867 entry = zone->free_pages; | |
868 if (entry != NULL) | |
869 { | |
870 /* Recycle the allocated memory from this page ... */ | |
871 zone->free_pages = entry->next; | |
872 } | |
873 else | |
874 { | |
875 /* We want just one page. Allocate a bunch of them and put the | |
876 extras on the freelist. (Can only do this optimization with | |
877 mmap for backing store.) */ | |
878 struct small_page_entry *e, *f = zone->free_pages; | |
879 int i; | |
880 char *page; | |
881 | |
882 page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone); | |
883 | |
884 /* This loop counts down so that the chain will be in ascending | |
885 memory order. */ | |
886 for (i = G.quire_size - 1; i >= 1; i--) | |
887 { | |
888 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead); | |
889 e->common.page = page + (i << GGC_PAGE_SHIFT); | |
890 e->common.zone = zone; | |
891 e->next = f; | |
892 f = e; | |
893 set_page_table_entry (e->common.page, &e->common); | |
894 } | |
895 | |
896 zone->free_pages = f; | |
897 | |
898 entry = XCNEWVAR (struct small_page_entry, G.small_page_overhead); | |
899 entry->common.page = page; | |
900 entry->common.zone = zone; | |
901 set_page_table_entry (page, &entry->common); | |
902 } | |
903 | |
904 zone->n_small_pages++; | |
905 | |
906 if (GGC_DEBUG_LEVEL >= 2) | |
907 fprintf (G.debug_file, | |
908 "Allocating %s page at %p, data %p-%p\n", | |
909 entry->common.zone->name, (PTR) entry, entry->common.page, | |
910 entry->common.page + SMALL_PAGE_SIZE - 1); | |
911 | |
912 return entry; | |
913 } | |
914 | |
915 /* Allocate a large page of size SIZE in ZONE. */ | |
916 | |
917 static struct large_page_entry * | |
918 alloc_large_page (size_t size, struct alloc_zone *zone) | |
919 { | |
920 struct large_page_entry *entry; | |
921 char *page; | |
922 size_t needed_size; | |
923 | |
924 needed_size = size + sizeof (struct large_page_entry); | |
925 page = XNEWVAR (char, needed_size); | |
926 | |
927 entry = (struct large_page_entry *) page; | |
928 | |
929 entry->next = NULL; | |
930 entry->common.page = page + sizeof (struct large_page_entry); | |
931 entry->common.large_p = true; | |
932 entry->common.pch_p = false; | |
933 entry->common.zone = zone; | |
934 #ifdef GATHER_STATISTICS | |
935 entry->common.survived = 0; | |
936 #endif | |
937 entry->mark_p = false; | |
938 entry->bytes = size; | |
939 entry->prev = NULL; | |
940 | |
941 set_page_table_entry (entry->common.page, &entry->common); | |
942 | |
943 if (GGC_DEBUG_LEVEL >= 2) | |
944 fprintf (G.debug_file, | |
945 "Allocating %s large page at %p, data %p-%p\n", | |
946 entry->common.zone->name, (PTR) entry, entry->common.page, | |
947 entry->common.page + SMALL_PAGE_SIZE - 1); | |
948 | |
949 return entry; | |
950 } | |
951 | |
952 | |
953 /* For a page that is no longer needed, put it on the free page list. */ | |
954 | |
955 static inline void | |
956 free_small_page (struct small_page_entry *entry) | |
957 { | |
958 if (GGC_DEBUG_LEVEL >= 2) | |
959 fprintf (G.debug_file, | |
960 "Deallocating %s page at %p, data %p-%p\n", | |
961 entry->common.zone->name, (PTR) entry, | |
962 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1); | |
963 | |
964 gcc_assert (!entry->common.large_p); | |
965 | |
966 /* Mark the page as inaccessible. Discard the handle to | |
967 avoid handle leak. */ | |
968 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->common.page, | |
969 SMALL_PAGE_SIZE)); | |
970 | |
971 entry->next = entry->common.zone->free_pages; | |
972 entry->common.zone->free_pages = entry; | |
973 entry->common.zone->n_small_pages--; | |
974 } | |
975 | |
976 /* Release a large page that is no longer needed. */ | |
977 | |
978 static inline void | |
979 free_large_page (struct large_page_entry *entry) | |
980 { | |
981 if (GGC_DEBUG_LEVEL >= 2) | |
982 fprintf (G.debug_file, | |
983 "Deallocating %s page at %p, data %p-%p\n", | |
984 entry->common.zone->name, (PTR) entry, | |
985 entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1); | |
986 | |
987 gcc_assert (entry->common.large_p); | |
988 | |
989 set_page_table_entry (entry->common.page, NULL); | |
990 free (entry); | |
991 } | |
992 | |
993 /* Release the free page cache to the system. */ | |
994 | |
995 static void | |
996 release_pages (struct alloc_zone *zone) | |
997 { | |
998 #ifdef USING_MMAP | |
999 struct small_page_entry *p, *next; | |
1000 char *start; | |
1001 size_t len; | |
1002 | |
1003 /* Gather up adjacent pages so they are unmapped together. */ | |
1004 p = zone->free_pages; | |
1005 | |
1006 while (p) | |
1007 { | |
1008 start = p->common.page; | |
1009 next = p->next; | |
1010 len = SMALL_PAGE_SIZE; | |
1011 set_page_table_entry (p->common.page, NULL); | |
1012 p = next; | |
1013 | |
1014 while (p && p->common.page == start + len) | |
1015 { | |
1016 next = p->next; | |
1017 len += SMALL_PAGE_SIZE; | |
1018 set_page_table_entry (p->common.page, NULL); | |
1019 p = next; | |
1020 } | |
1021 | |
1022 munmap (start, len); | |
1023 zone->bytes_mapped -= len; | |
1024 } | |
1025 | |
1026 zone->free_pages = NULL; | |
1027 #endif | |
1028 } | |
1029 | |
1030 /* Place the block at PTR of size SIZE on the free list for ZONE. */ | |
1031 | |
1032 static inline void | |
1033 free_chunk (char *ptr, size_t size, struct alloc_zone *zone) | |
1034 { | |
1035 struct alloc_chunk *chunk = (struct alloc_chunk *) ptr; | |
1036 size_t bin = 0; | |
1037 | |
1038 bin = SIZE_BIN_DOWN (size); | |
1039 gcc_assert (bin != 0); | |
1040 if (bin > NUM_FREE_BINS) | |
1041 { | |
1042 bin = 0; | |
1043 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk, | |
1044 sizeof (struct | |
1045 alloc_chunk))); | |
1046 chunk->size = size; | |
1047 chunk->next_free = zone->free_chunks[bin]; | |
1048 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr | |
1049 + sizeof (struct | |
1050 alloc_chunk), | |
1051 size | |
1052 - sizeof (struct | |
1053 alloc_chunk))); | |
1054 } | |
1055 else | |
1056 { | |
1057 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (chunk, | |
1058 sizeof (struct | |
1059 alloc_chunk *))); | |
1060 chunk->next_free = zone->free_chunks[bin]; | |
1061 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (ptr | |
1062 + sizeof (struct | |
1063 alloc_chunk *), | |
1064 size | |
1065 - sizeof (struct | |
1066 alloc_chunk *))); | |
1067 } | |
1068 | |
1069 zone->free_chunks[bin] = chunk; | |
1070 if (bin > zone->high_free_bin) | |
1071 zone->high_free_bin = bin; | |
1072 if (GGC_DEBUG_LEVEL >= 3) | |
1073 fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk); | |
1074 } | |
1075 | |
1076 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */ | |
1077 | |
1078 void * | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1079 ggc_internal_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1080 MEM_STAT_DECL) |
0 | 1081 { |
1082 size_t bin; | |
1083 size_t csize; | |
1084 struct small_page_entry *entry; | |
1085 struct alloc_chunk *chunk, **pp; | |
1086 void *result; | |
1087 size_t size = orig_size; | |
1088 | |
1089 /* Make sure that zero-sized allocations get a unique and freeable | |
1090 pointer. */ | |
1091 if (size == 0) | |
1092 size = MAX_ALIGNMENT; | |
1093 else | |
1094 size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT; | |
1095 | |
1096 /* Try to allocate the object from several different sources. Each | |
1097 of these cases is responsible for setting RESULT and SIZE to | |
1098 describe the allocated block, before jumping to FOUND. If a | |
1099 chunk is split, the allocate bit for the new chunk should also be | |
1100 set. | |
1101 | |
1102 Large objects are handled specially. However, they'll just fail | |
1103 the next couple of conditions, so we can wait to check for them | |
1104 below. The large object case is relatively rare (< 1%), so this | |
1105 is a win. */ | |
1106 | |
1107 /* First try to split the last chunk we allocated. For best | |
1108 fragmentation behavior it would be better to look for a | |
1109 free bin of the appropriate size for a small object. However, | |
1110 we're unlikely (1% - 7%) to find one, and this gives better | |
1111 locality behavior anyway. This case handles the lion's share | |
1112 of all calls to this function. */ | |
1113 if (size <= zone->cached_free_size) | |
1114 { | |
1115 result = zone->cached_free; | |
1116 | |
1117 zone->cached_free_size -= size; | |
1118 if (zone->cached_free_size) | |
1119 { | |
1120 zone->cached_free += size; | |
1121 zone_set_object_alloc_bit (zone->cached_free); | |
1122 } | |
1123 | |
1124 goto found; | |
1125 } | |
1126 | |
1127 /* Next, try to find a free bin of the exactly correct size. */ | |
1128 | |
1129 /* We want to round SIZE up, rather than down, but we know it's | |
1130 already aligned to at least FREE_BIN_DELTA, so we can just | |
1131 shift. */ | |
1132 bin = SIZE_BIN_DOWN (size); | |
1133 | |
1134 if (bin <= NUM_FREE_BINS | |
1135 && (chunk = zone->free_chunks[bin]) != NULL) | |
1136 { | |
1137 /* We have a chunk of the right size. Pull it off the free list | |
1138 and use it. */ | |
1139 | |
1140 zone->free_chunks[bin] = chunk->next_free; | |
1141 | |
1142 /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT | |
1143 == FREE_BIN_DELTA. */ | |
1144 result = chunk; | |
1145 | |
1146 /* The allocation bits are already set correctly. HIGH_FREE_BIN | |
1147 may now be wrong, if this was the last chunk in the high bin. | |
1148 Rather than fixing it up now, wait until we need to search | |
1149 the free bins. */ | |
1150 | |
1151 goto found; | |
1152 } | |
1153 | |
1154 /* Next, if there wasn't a chunk of the ideal size, look for a chunk | |
1155 to split. We can find one in the too-big bin, or in the largest | |
1156 sized bin with a chunk in it. Try the largest normal-sized bin | |
1157 first. */ | |
1158 | |
1159 if (zone->high_free_bin > bin) | |
1160 { | |
1161 /* Find the highest numbered free bin. It will be at or below | |
1162 the watermark. */ | |
1163 while (zone->high_free_bin > bin | |
1164 && zone->free_chunks[zone->high_free_bin] == NULL) | |
1165 zone->high_free_bin--; | |
1166 | |
1167 if (zone->high_free_bin > bin) | |
1168 { | |
1169 size_t tbin = zone->high_free_bin; | |
1170 chunk = zone->free_chunks[tbin]; | |
1171 | |
1172 /* Remove the chunk from its previous bin. */ | |
1173 zone->free_chunks[tbin] = chunk->next_free; | |
1174 | |
1175 result = (char *) chunk; | |
1176 | |
1177 /* Save the rest of the chunk for future allocation. */ | |
1178 if (zone->cached_free_size) | |
1179 free_chunk (zone->cached_free, zone->cached_free_size, zone); | |
1180 | |
1181 chunk = (struct alloc_chunk *) ((char *) result + size); | |
1182 zone->cached_free = (char *) chunk; | |
1183 zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA; | |
1184 | |
1185 /* Mark the new free chunk as an object, so that we can | |
1186 find the size of the newly allocated object. */ | |
1187 zone_set_object_alloc_bit (chunk); | |
1188 | |
1189 /* HIGH_FREE_BIN may now be wrong, if this was the last | |
1190 chunk in the high bin. Rather than fixing it up now, | |
1191 wait until we need to search the free bins. */ | |
1192 | |
1193 goto found; | |
1194 } | |
1195 } | |
1196 | |
1197 /* Failing that, look through the "other" bucket for a chunk | |
1198 that is large enough. */ | |
1199 pp = &(zone->free_chunks[0]); | |
1200 chunk = *pp; | |
1201 while (chunk && chunk->size < size) | |
1202 { | |
1203 pp = &chunk->next_free; | |
1204 chunk = *pp; | |
1205 } | |
1206 | |
1207 if (chunk) | |
1208 { | |
1209 /* Remove the chunk from its previous bin. */ | |
1210 *pp = chunk->next_free; | |
1211 | |
1212 result = (char *) chunk; | |
1213 | |
1214 /* Save the rest of the chunk for future allocation, if there's any | |
1215 left over. */ | |
1216 csize = chunk->size; | |
1217 if (csize > size) | |
1218 { | |
1219 if (zone->cached_free_size) | |
1220 free_chunk (zone->cached_free, zone->cached_free_size, zone); | |
1221 | |
1222 chunk = (struct alloc_chunk *) ((char *) result + size); | |
1223 zone->cached_free = (char *) chunk; | |
1224 zone->cached_free_size = csize - size; | |
1225 | |
1226 /* Mark the new free chunk as an object. */ | |
1227 zone_set_object_alloc_bit (chunk); | |
1228 } | |
1229 | |
1230 goto found; | |
1231 } | |
1232 | |
1233 /* Handle large allocations. We could choose any threshold between | |
1234 GGC_PAGE_SIZE - sizeof (struct large_page_entry) and | |
1235 GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't | |
1236 be guaranteed to have a unique entry in the lookup table. Large | |
1237 allocations will always fall through to here. */ | |
1238 if (size > GGC_PAGE_SIZE) | |
1239 { | |
1240 struct large_page_entry *entry = alloc_large_page (size, zone); | |
1241 | |
1242 #ifdef GATHER_STATISTICS | |
1243 entry->common.survived = 0; | |
1244 #endif | |
1245 | |
1246 entry->next = zone->large_pages; | |
1247 if (zone->large_pages) | |
1248 zone->large_pages->prev = entry; | |
1249 zone->large_pages = entry; | |
1250 | |
1251 result = entry->common.page; | |
1252 | |
1253 goto found; | |
1254 } | |
1255 | |
1256 /* Failing everything above, allocate a new small page. */ | |
1257 | |
1258 entry = alloc_small_page (zone); | |
1259 entry->next = zone->pages; | |
1260 zone->pages = entry; | |
1261 | |
1262 /* Mark the first chunk in the new page. */ | |
1263 entry->alloc_bits[0] = 1; | |
1264 | |
1265 result = entry->common.page; | |
1266 if (size < SMALL_PAGE_SIZE) | |
1267 { | |
1268 if (zone->cached_free_size) | |
1269 free_chunk (zone->cached_free, zone->cached_free_size, zone); | |
1270 | |
1271 zone->cached_free = (char *) result + size; | |
1272 zone->cached_free_size = SMALL_PAGE_SIZE - size; | |
1273 | |
1274 /* Mark the new free chunk as an object. */ | |
1275 zone_set_object_alloc_bit (zone->cached_free); | |
1276 } | |
1277 | |
1278 found: | |
1279 | |
1280 /* We could save TYPE in the chunk, but we don't use that for | |
1281 anything yet. If we wanted to, we could do it by adding it | |
1282 either before the beginning of the chunk or after its end, | |
1283 and adjusting the size and pointer appropriately. */ | |
1284 | |
1285 /* We'll probably write to this after we return. */ | |
1286 prefetchw (result); | |
1287 | |
1288 #ifdef ENABLE_GC_CHECKING | |
1289 /* `Poison' the entire allocated object. */ | |
1290 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size)); | |
1291 memset (result, 0xaf, size); | |
1292 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (result + orig_size, | |
1293 size - orig_size)); | |
1294 #endif | |
1295 | |
1296 /* Tell Valgrind that the memory is there, but its content isn't | |
1297 defined. The bytes at the end of the object are still marked | |
1298 unaccessible. */ | |
1299 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, orig_size)); | |
1300 | |
1301 /* Keep track of how many bytes are being allocated. This | |
1302 information is used in deciding when to collect. */ | |
1303 zone->allocated += size; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1304 |
0 | 1305 timevar_ggc_mem_total += size; |
1306 | |
1307 #ifdef GATHER_STATISTICS | |
1308 ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT); | |
1309 | |
1310 { | |
1311 size_t object_size = size; | |
1312 size_t overhead = object_size - orig_size; | |
1313 | |
1314 zone->stats.total_overhead += overhead; | |
1315 zone->stats.total_allocated += object_size; | |
1316 | |
1317 if (orig_size <= 32) | |
1318 { | |
1319 zone->stats.total_overhead_under32 += overhead; | |
1320 zone->stats.total_allocated_under32 += object_size; | |
1321 } | |
1322 if (orig_size <= 64) | |
1323 { | |
1324 zone->stats.total_overhead_under64 += overhead; | |
1325 zone->stats.total_allocated_under64 += object_size; | |
1326 } | |
1327 if (orig_size <= 128) | |
1328 { | |
1329 zone->stats.total_overhead_under128 += overhead; | |
1330 zone->stats.total_allocated_under128 += object_size; | |
1331 } | |
1332 } | |
1333 #endif | |
1334 | |
1335 if (GGC_DEBUG_LEVEL >= 3) | |
1336 fprintf (G.debug_file, "Allocating object, size=%lu at %p\n", | |
1337 (unsigned long) size, result); | |
1338 | |
1339 return result; | |
1340 } | |
1341 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1342 #define ggc_internal_alloc_zone_pass_stat(s,z) \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1343 ggc_internal_alloc_zone_stat (s,z PASS_MEM_STAT) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1344 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1345 void * |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1346 ggc_internal_cleared_alloc_zone_stat (size_t orig_size, |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1347 struct alloc_zone *zone MEM_STAT_DECL) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1348 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1349 void * result = ggc_internal_alloc_zone_pass_stat (orig_size, zone); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1350 memset (result, 0, orig_size); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1351 return result; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1352 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1353 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1354 |
0 | 1355 /* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone |
1356 for that type. */ | |
1357 | |
1358 void * | |
1359 ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size | |
1360 MEM_STAT_DECL) | |
1361 { | |
1362 switch (gte) | |
1363 { | |
1364 case gt_ggc_e_14lang_tree_node: | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1365 return ggc_internal_alloc_zone_pass_stat (size, &tree_zone); |
0 | 1366 |
1367 case gt_ggc_e_7rtx_def: | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1368 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone); |
0 | 1369 |
1370 case gt_ggc_e_9rtvec_def: | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1371 return ggc_internal_alloc_zone_pass_stat (size, &rtl_zone); |
0 | 1372 |
1373 default: | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1374 return ggc_internal_alloc_zone_pass_stat (size, &main_zone); |
0 | 1375 } |
1376 } | |
1377 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1378 /* Normal GC allocation simply allocates into the main zone. */ |
0 | 1379 |
1380 void * | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1381 ggc_internal_alloc_stat (size_t size MEM_STAT_DECL) |
0 | 1382 { |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1383 return ggc_internal_alloc_zone_pass_stat (size, &main_zone); |
0 | 1384 } |
1385 | |
1386 /* Poison the chunk. */ | |
1387 #ifdef ENABLE_GC_CHECKING | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1388 #define poison_region(PTR, SIZE) \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1389 do { \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1390 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED ((PTR), (SIZE))); \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1391 memset ((PTR), 0xa5, (SIZE)); \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1392 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((PTR), (SIZE))); \ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1393 } while (0) |
0 | 1394 #else |
1395 #define poison_region(PTR, SIZE) | |
1396 #endif | |
1397 | |
1398 /* Free the object at P. */ | |
1399 | |
1400 void | |
1401 ggc_free (void *p) | |
1402 { | |
1403 struct page_entry *page; | |
1404 | |
1405 #ifdef GATHER_STATISTICS | |
1406 ggc_free_overhead (p); | |
1407 #endif | |
1408 | |
1409 poison_region (p, ggc_get_size (p)); | |
1410 | |
1411 page = zone_get_object_page (p); | |
1412 | |
1413 if (page->large_p) | |
1414 { | |
1415 struct large_page_entry *large_page | |
1416 = (struct large_page_entry *) page; | |
1417 | |
1418 /* Remove the page from the linked list. */ | |
1419 if (large_page->prev) | |
1420 large_page->prev->next = large_page->next; | |
1421 else | |
1422 { | |
1423 gcc_assert (large_page->common.zone->large_pages == large_page); | |
1424 large_page->common.zone->large_pages = large_page->next; | |
1425 } | |
1426 if (large_page->next) | |
1427 large_page->next->prev = large_page->prev; | |
1428 | |
1429 large_page->common.zone->allocated -= large_page->bytes; | |
1430 | |
1431 /* Release the memory associated with this object. */ | |
1432 free_large_page (large_page); | |
1433 } | |
1434 else if (page->pch_p) | |
1435 /* Don't do anything. We won't allocate a new object from the | |
1436 PCH zone so there's no point in releasing anything. */ | |
1437 ; | |
1438 else | |
1439 { | |
1440 size_t size = ggc_get_size (p); | |
1441 | |
1442 page->zone->allocated -= size; | |
1443 | |
1444 /* Add the chunk to the free list. We don't bother with coalescing, | |
1445 since we are likely to want a chunk of this size again. */ | |
1446 free_chunk ((char *)p, size, page->zone); | |
1447 } | |
1448 } | |
1449 | |
1450 /* Mark function for strings. */ | |
1451 | |
1452 void | |
1453 gt_ggc_m_S (const void *p) | |
1454 { | |
1455 page_entry *entry; | |
1456 unsigned long offset; | |
1457 | |
1458 if (!p) | |
1459 return; | |
1460 | |
1461 /* Look up the page on which the object is alloced. . */ | |
1462 entry = lookup_page_table_if_allocated (p); | |
1463 if (! entry) | |
1464 return; | |
1465 | |
1466 if (entry->pch_p) | |
1467 { | |
1468 size_t alloc_word, alloc_bit, t; | |
1469 t = ((const char *) p - pch_zone.page) / BYTES_PER_ALLOC_BIT; | |
1470 alloc_word = t / (8 * sizeof (alloc_type)); | |
1471 alloc_bit = t % (8 * sizeof (alloc_type)); | |
1472 offset = zone_find_object_offset (pch_zone.alloc_bits, alloc_word, | |
1473 alloc_bit); | |
1474 } | |
1475 else if (entry->large_p) | |
1476 { | |
1477 struct large_page_entry *le = (struct large_page_entry *) entry; | |
1478 offset = ((const char *) p) - entry->page; | |
1479 gcc_assert (offset < le->bytes); | |
1480 } | |
1481 else | |
1482 { | |
1483 struct small_page_entry *se = (struct small_page_entry *) entry; | |
1484 unsigned int start_word = zone_get_object_alloc_word (p); | |
1485 unsigned int start_bit = zone_get_object_alloc_bit (p); | |
1486 offset = zone_find_object_offset (se->alloc_bits, start_word, start_bit); | |
1487 | |
1488 /* On some platforms a char* will not necessarily line up on an | |
1489 allocation boundary, so we have to update the offset to | |
1490 account for the leftover bytes. */ | |
1491 offset += (size_t) p % BYTES_PER_ALLOC_BIT; | |
1492 } | |
1493 | |
1494 if (offset) | |
1495 { | |
1496 /* Here we've seen a char* which does not point to the beginning | |
1497 of an allocated object. We assume it points to the middle of | |
1498 a STRING_CST. */ | |
1499 gcc_assert (offset == offsetof (struct tree_string, str)); | |
1500 p = ((const char *) p) - offset; | |
1501 gt_ggc_mx_lang_tree_node (CONST_CAST(void *, p)); | |
1502 return; | |
1503 } | |
1504 | |
1505 /* Inefficient, but also unlikely to matter. */ | |
1506 ggc_set_mark (p); | |
1507 } | |
1508 | |
1509 /* If P is not marked, mark it and return false. Otherwise return true. | |
1510 P must have been allocated by the GC allocator; it mustn't point to | |
1511 static objects, stack variables, or memory allocated with malloc. */ | |
1512 | |
1513 int | |
1514 ggc_set_mark (const void *p) | |
1515 { | |
1516 struct page_entry *page; | |
1517 const char *ptr = (const char *) p; | |
1518 | |
1519 page = zone_get_object_page (p); | |
1520 | |
1521 if (page->pch_p) | |
1522 { | |
1523 size_t mark_word, mark_bit, offset; | |
1524 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT; | |
1525 mark_word = offset / (8 * sizeof (mark_type)); | |
1526 mark_bit = offset % (8 * sizeof (mark_type)); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1527 |
0 | 1528 if (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) |
1529 return 1; | |
1530 pch_zone.mark_bits[mark_word] |= (1 << mark_bit); | |
1531 } | |
1532 else if (page->large_p) | |
1533 { | |
1534 struct large_page_entry *large_page | |
1535 = (struct large_page_entry *) page; | |
1536 | |
1537 if (large_page->mark_p) | |
1538 return 1; | |
1539 large_page->mark_p = true; | |
1540 } | |
1541 else | |
1542 { | |
1543 struct small_page_entry *small_page | |
1544 = (struct small_page_entry *) page; | |
1545 | |
1546 if (small_page->mark_bits[zone_get_object_mark_word (p)] | |
1547 & (1 << zone_get_object_mark_bit (p))) | |
1548 return 1; | |
1549 small_page->mark_bits[zone_get_object_mark_word (p)] | |
1550 |= (1 << zone_get_object_mark_bit (p)); | |
1551 } | |
1552 | |
1553 if (GGC_DEBUG_LEVEL >= 4) | |
1554 fprintf (G.debug_file, "Marking %p\n", p); | |
1555 | |
1556 return 0; | |
1557 } | |
1558 | |
1559 /* Return 1 if P has been marked, zero otherwise. | |
1560 P must have been allocated by the GC allocator; it mustn't point to | |
1561 static objects, stack variables, or memory allocated with malloc. */ | |
1562 | |
1563 int | |
1564 ggc_marked_p (const void *p) | |
1565 { | |
1566 struct page_entry *page; | |
1567 const char *ptr = (const char *) p; | |
1568 | |
1569 page = zone_get_object_page (p); | |
1570 | |
1571 if (page->pch_p) | |
1572 { | |
1573 size_t mark_word, mark_bit, offset; | |
1574 offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT; | |
1575 mark_word = offset / (8 * sizeof (mark_type)); | |
1576 mark_bit = offset % (8 * sizeof (mark_type)); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1577 |
0 | 1578 return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0; |
1579 } | |
1580 | |
1581 if (page->large_p) | |
1582 { | |
1583 struct large_page_entry *large_page | |
1584 = (struct large_page_entry *) page; | |
1585 | |
1586 return large_page->mark_p; | |
1587 } | |
1588 else | |
1589 { | |
1590 struct small_page_entry *small_page | |
1591 = (struct small_page_entry *) page; | |
1592 | |
1593 return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)] | |
1594 & (1 << zone_get_object_mark_bit (p))); | |
1595 } | |
1596 } | |
1597 | |
1598 /* Return the size of the gc-able object P. */ | |
1599 | |
1600 size_t | |
1601 ggc_get_size (const void *p) | |
1602 { | |
1603 struct page_entry *page; | |
1604 const char *ptr = (const char *) p; | |
1605 | |
1606 page = zone_get_object_page (p); | |
1607 | |
1608 if (page->pch_p) | |
1609 { | |
1610 size_t alloc_word, alloc_bit, offset, max_size; | |
1611 offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1; | |
1612 alloc_word = offset / (8 * sizeof (alloc_type)); | |
1613 alloc_bit = offset % (8 * sizeof (alloc_type)); | |
1614 max_size = pch_zone.bytes - (ptr - pch_zone.page); | |
1615 return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit, | |
1616 max_size); | |
1617 } | |
1618 | |
1619 if (page->large_p) | |
1620 return ((struct large_page_entry *)page)->bytes; | |
1621 else | |
1622 return zone_find_object_size ((struct small_page_entry *) page, p); | |
1623 } | |
1624 | |
1625 /* Initialize the ggc-zone-mmap allocator. */ | |
1626 void | |
1627 init_ggc (void) | |
1628 { | |
1629 /* The allocation size must be greater than BYTES_PER_MARK_BIT, and | |
1630 a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for | |
1631 the current assumptions to hold. */ | |
1632 | |
1633 gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT); | |
1634 | |
1635 /* Set up the main zone by hand. */ | |
1636 main_zone.name = "Main zone"; | |
1637 G.zones = &main_zone; | |
1638 | |
1639 /* Allocate the default zones. */ | |
1640 new_ggc_zone_1 (&rtl_zone, "RTL zone"); | |
1641 new_ggc_zone_1 (&tree_zone, "Tree zone"); | |
1642 new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone"); | |
1643 | |
1644 G.pagesize = getpagesize(); | |
1645 G.lg_pagesize = exact_log2 (G.pagesize); | |
1646 G.page_mask = ~(G.pagesize - 1); | |
1647 | |
1648 /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */ | |
1649 gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0); | |
1650 | |
1651 /* Allocate 16 system pages at a time. */ | |
1652 G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE; | |
1653 | |
1654 /* Calculate the size of the allocation bitmap and other overhead. */ | |
1655 /* Right now we allocate bits for the page header and bitmap. These | |
1656 are wasted, but a little tricky to eliminate. */ | |
1657 G.small_page_overhead | |
1658 = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8); | |
1659 /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */ | |
1660 | |
1661 #ifdef HAVE_MMAP_DEV_ZERO | |
1662 G.dev_zero_fd = open ("/dev/zero", O_RDONLY); | |
1663 gcc_assert (G.dev_zero_fd != -1); | |
1664 #endif | |
1665 | |
1666 #if 0 | |
1667 G.debug_file = fopen ("ggc-mmap.debug", "w"); | |
1668 setlinebuf (G.debug_file); | |
1669 #else | |
1670 G.debug_file = stdout; | |
1671 #endif | |
1672 | |
1673 #ifdef USING_MMAP | |
1674 /* StunOS has an amazing off-by-one error for the first mmap allocation | |
1675 after fiddling with RLIMIT_STACK. The result, as hard as it is to | |
1676 believe, is an unaligned page allocation, which would cause us to | |
1677 hork badly if we tried to use it. */ | |
1678 { | |
1679 char *p = alloc_anon (NULL, G.pagesize, &main_zone); | |
1680 struct small_page_entry *e; | |
1681 if ((size_t)p & (G.pagesize - 1)) | |
1682 { | |
1683 /* How losing. Discard this one and try another. If we still | |
1684 can't get something useful, give up. */ | |
1685 | |
1686 p = alloc_anon (NULL, G.pagesize, &main_zone); | |
1687 gcc_assert (!((size_t)p & (G.pagesize - 1))); | |
1688 } | |
1689 | |
1690 if (GGC_PAGE_SIZE == G.pagesize) | |
1691 { | |
1692 /* We have a good page, might as well hold onto it... */ | |
1693 e = XCNEWVAR (struct small_page_entry, G.small_page_overhead); | |
1694 e->common.page = p; | |
1695 e->common.zone = &main_zone; | |
1696 e->next = main_zone.free_pages; | |
1697 set_page_table_entry (e->common.page, &e->common); | |
1698 main_zone.free_pages = e; | |
1699 } | |
1700 else | |
1701 { | |
1702 munmap (p, G.pagesize); | |
1703 } | |
1704 } | |
1705 #endif | |
1706 } | |
1707 | |
1708 /* Start a new GGC zone. */ | |
1709 | |
1710 static void | |
1711 new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name) | |
1712 { | |
1713 new_zone->name = name; | |
1714 new_zone->next_zone = G.zones->next_zone; | |
1715 G.zones->next_zone = new_zone; | |
1716 } | |
1717 | |
1718 /* Free all empty pages and objects within a page for a given zone */ | |
1719 | |
1720 static void | |
1721 sweep_pages (struct alloc_zone *zone) | |
1722 { | |
1723 struct large_page_entry **lpp, *lp, *lnext; | |
1724 struct small_page_entry **spp, *sp, *snext; | |
1725 char *last_free; | |
1726 size_t allocated = 0; | |
1727 bool nomarksinpage; | |
1728 | |
1729 /* First, reset the free_chunks lists, since we are going to | |
1730 re-free free chunks in hopes of coalescing them into large chunks. */ | |
1731 memset (zone->free_chunks, 0, sizeof (zone->free_chunks)); | |
1732 zone->high_free_bin = 0; | |
1733 zone->cached_free = NULL; | |
1734 zone->cached_free_size = 0; | |
1735 | |
1736 /* Large pages are all or none affairs. Either they are completely | |
1737 empty, or they are completely full. */ | |
1738 lpp = &zone->large_pages; | |
1739 for (lp = zone->large_pages; lp != NULL; lp = lnext) | |
1740 { | |
1741 gcc_assert (lp->common.large_p); | |
1742 | |
1743 lnext = lp->next; | |
1744 | |
1745 #ifdef GATHER_STATISTICS | |
1746 /* This page has now survived another collection. */ | |
1747 lp->common.survived++; | |
1748 #endif | |
1749 | |
1750 if (lp->mark_p) | |
1751 { | |
1752 lp->mark_p = false; | |
1753 allocated += lp->bytes; | |
1754 lpp = &lp->next; | |
1755 } | |
1756 else | |
1757 { | |
1758 *lpp = lnext; | |
1759 #ifdef ENABLE_GC_CHECKING | |
1760 /* Poison the page. */ | |
1761 memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE); | |
1762 #endif | |
1763 if (lp->prev) | |
1764 lp->prev->next = lp->next; | |
1765 if (lp->next) | |
1766 lp->next->prev = lp->prev; | |
1767 free_large_page (lp); | |
1768 } | |
1769 } | |
1770 | |
1771 spp = &zone->pages; | |
1772 for (sp = zone->pages; sp != NULL; sp = snext) | |
1773 { | |
1774 char *object, *last_object; | |
1775 char *end; | |
1776 alloc_type *alloc_word_p; | |
1777 mark_type *mark_word_p; | |
1778 | |
1779 gcc_assert (!sp->common.large_p); | |
1780 | |
1781 snext = sp->next; | |
1782 | |
1783 #ifdef GATHER_STATISTICS | |
1784 /* This page has now survived another collection. */ | |
1785 sp->common.survived++; | |
1786 #endif | |
1787 | |
1788 /* Step through all chunks, consolidate those that are free and | |
1789 insert them into the free lists. Note that consolidation | |
1790 slows down collection slightly. */ | |
1791 | |
1792 last_object = object = sp->common.page; | |
1793 end = sp->common.page + SMALL_PAGE_SIZE; | |
1794 last_free = NULL; | |
1795 nomarksinpage = true; | |
1796 mark_word_p = sp->mark_bits; | |
1797 alloc_word_p = sp->alloc_bits; | |
1798 | |
1799 gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT); | |
1800 | |
1801 object = sp->common.page; | |
1802 do | |
1803 { | |
1804 unsigned int i, n; | |
1805 alloc_type alloc_word; | |
1806 mark_type mark_word; | |
1807 | |
1808 alloc_word = *alloc_word_p++; | |
1809 mark_word = *mark_word_p++; | |
1810 | |
1811 if (mark_word) | |
1812 nomarksinpage = false; | |
1813 | |
1814 /* There ought to be some way to do this without looping... */ | |
1815 i = 0; | |
1816 while ((n = alloc_ffs (alloc_word)) != 0) | |
1817 { | |
1818 /* Extend the current state for n - 1 bits. We can't | |
1819 shift alloc_word by n, even though it isn't used in the | |
1820 loop, in case only the highest bit was set. */ | |
1821 alloc_word >>= n - 1; | |
1822 mark_word >>= n - 1; | |
1823 object += BYTES_PER_MARK_BIT * (n - 1); | |
1824 | |
1825 if (mark_word & 1) | |
1826 { | |
1827 if (last_free) | |
1828 { | |
1829 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free, | |
1830 object | |
1831 - last_free)); | |
1832 poison_region (last_free, object - last_free); | |
1833 free_chunk (last_free, object - last_free, zone); | |
1834 last_free = NULL; | |
1835 } | |
1836 else | |
1837 allocated += object - last_object; | |
1838 last_object = object; | |
1839 } | |
1840 else | |
1841 { | |
1842 if (last_free == NULL) | |
1843 { | |
1844 last_free = object; | |
1845 allocated += object - last_object; | |
1846 } | |
1847 else | |
1848 zone_clear_object_alloc_bit (sp, object); | |
1849 } | |
1850 | |
1851 /* Shift to just after the alloc bit we handled. */ | |
1852 alloc_word >>= 1; | |
1853 mark_word >>= 1; | |
1854 object += BYTES_PER_MARK_BIT; | |
1855 | |
1856 i += n; | |
1857 } | |
1858 | |
1859 object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i); | |
1860 } | |
1861 while (object < end); | |
1862 | |
1863 if (nomarksinpage) | |
1864 { | |
1865 *spp = snext; | |
1866 #ifdef ENABLE_GC_CHECKING | |
1867 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (sp->common.page, | |
1868 SMALL_PAGE_SIZE)); | |
1869 /* Poison the page. */ | |
1870 memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE); | |
1871 #endif | |
1872 free_small_page (sp); | |
1873 continue; | |
1874 } | |
1875 else if (last_free) | |
1876 { | |
1877 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (last_free, | |
1878 object - last_free)); | |
1879 poison_region (last_free, object - last_free); | |
1880 free_chunk (last_free, object - last_free, zone); | |
1881 } | |
1882 else | |
1883 allocated += object - last_object; | |
1884 | |
1885 spp = &sp->next; | |
1886 } | |
1887 | |
1888 zone->allocated = allocated; | |
1889 } | |
1890 | |
1891 /* mark-and-sweep routine for collecting a single zone. NEED_MARKING | |
1892 is true if we need to mark before sweeping, false if some other | |
1893 zone collection has already performed marking for us. Returns true | |
1894 if we collected, false otherwise. */ | |
1895 | |
1896 static bool | |
1897 ggc_collect_1 (struct alloc_zone *zone, bool need_marking) | |
1898 { | |
1899 #if 0 | |
1900 /* */ | |
1901 { | |
1902 int i; | |
1903 for (i = 0; i < NUM_FREE_BINS + 1; i++) | |
1904 { | |
1905 struct alloc_chunk *chunk; | |
1906 int n, tot; | |
1907 | |
1908 n = 0; | |
1909 tot = 0; | |
1910 chunk = zone->free_chunks[i]; | |
1911 while (chunk) | |
1912 { | |
1913 n++; | |
1914 tot += chunk->size; | |
1915 chunk = chunk->next_free; | |
1916 } | |
1917 fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n", | |
1918 i, n, tot); | |
1919 } | |
1920 } | |
1921 /* */ | |
1922 #endif | |
1923 | |
1924 if (!quiet_flag) | |
1925 fprintf (stderr, " {%s GC %luk -> ", | |
1926 zone->name, (unsigned long) zone->allocated / 1024); | |
1927 | |
1928 /* Zero the total allocated bytes. This will be recalculated in the | |
1929 sweep phase. */ | |
1930 zone->allocated = 0; | |
1931 | |
1932 /* Release the pages we freed the last time we collected, but didn't | |
1933 reuse in the interim. */ | |
1934 release_pages (zone); | |
1935 | |
1936 if (need_marking) | |
1937 { | |
1938 zone_allocate_marks (); | |
1939 ggc_mark_roots (); | |
1940 #ifdef GATHER_STATISTICS | |
1941 ggc_prune_overhead_list (); | |
1942 #endif | |
1943 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1944 |
0 | 1945 sweep_pages (zone); |
1946 zone->was_collected = true; | |
1947 zone->allocated_last_gc = zone->allocated; | |
1948 | |
1949 if (!quiet_flag) | |
1950 fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024); | |
1951 return true; | |
1952 } | |
1953 | |
1954 #ifdef GATHER_STATISTICS | |
1955 /* Calculate the average page survival rate in terms of number of | |
1956 collections. */ | |
1957 | |
1958 static float | |
1959 calculate_average_page_survival (struct alloc_zone *zone) | |
1960 { | |
1961 float count = 0.0; | |
1962 float survival = 0.0; | |
1963 struct small_page_entry *p; | |
1964 struct large_page_entry *lp; | |
1965 for (p = zone->pages; p; p = p->next) | |
1966 { | |
1967 count += 1.0; | |
1968 survival += p->common.survived; | |
1969 } | |
1970 for (lp = zone->large_pages; lp; lp = lp->next) | |
1971 { | |
1972 count += 1.0; | |
1973 survival += lp->common.survived; | |
1974 } | |
1975 return survival/count; | |
1976 } | |
1977 #endif | |
1978 | |
1979 /* Top level collection routine. */ | |
1980 | |
1981 void | |
1982 ggc_collect (void) | |
1983 { | |
1984 struct alloc_zone *zone; | |
1985 bool marked = false; | |
1986 | |
1987 timevar_push (TV_GC); | |
1988 | |
1989 if (!ggc_force_collect) | |
1990 { | |
1991 float allocated_last_gc = 0, allocated = 0, min_expand; | |
1992 | |
1993 for (zone = G.zones; zone; zone = zone->next_zone) | |
1994 { | |
1995 allocated_last_gc += zone->allocated_last_gc; | |
1996 allocated += zone->allocated; | |
1997 } | |
1998 | |
1999 allocated_last_gc = | |
2000 MAX (allocated_last_gc, | |
2001 (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); | |
2002 min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; | |
2003 | |
2004 if (allocated < allocated_last_gc + min_expand) | |
2005 { | |
2006 timevar_pop (TV_GC); | |
2007 return; | |
2008 } | |
2009 } | |
2010 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2011 invoke_plugin_callbacks (PLUGIN_GGC_START, NULL); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2012 |
0 | 2013 /* Start by possibly collecting the main zone. */ |
2014 main_zone.was_collected = false; | |
2015 marked |= ggc_collect_1 (&main_zone, true); | |
2016 | |
2017 /* In order to keep the number of collections down, we don't | |
2018 collect other zones unless we are collecting the main zone. This | |
2019 gives us roughly the same number of collections as we used to | |
2020 have with the old gc. The number of collection is important | |
2021 because our main slowdown (according to profiling) is now in | |
2022 marking. So if we mark twice as often as we used to, we'll be | |
2023 twice as slow. Hopefully we'll avoid this cost when we mark | |
2024 zone-at-a-time. */ | |
2025 /* NOTE drow/2004-07-28: We now always collect the main zone, but | |
2026 keep this code in case the heuristics are further refined. */ | |
2027 | |
2028 if (main_zone.was_collected) | |
2029 { | |
2030 struct alloc_zone *zone; | |
2031 | |
2032 for (zone = main_zone.next_zone; zone; zone = zone->next_zone) | |
2033 { | |
2034 zone->was_collected = false; | |
2035 marked |= ggc_collect_1 (zone, !marked); | |
2036 } | |
2037 } | |
2038 | |
2039 #ifdef GATHER_STATISTICS | |
2040 /* Print page survival stats, if someone wants them. */ | |
2041 if (GGC_DEBUG_LEVEL >= 2) | |
2042 { | |
2043 for (zone = G.zones; zone; zone = zone->next_zone) | |
2044 { | |
2045 if (zone->was_collected) | |
2046 { | |
2047 float f = calculate_average_page_survival (zone); | |
2048 printf ("Average page survival in zone `%s' is %f\n", | |
2049 zone->name, f); | |
2050 } | |
2051 } | |
2052 } | |
2053 #endif | |
2054 | |
2055 if (marked) | |
2056 zone_free_marks (); | |
2057 | |
2058 /* Free dead zones. */ | |
2059 for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone) | |
2060 { | |
2061 if (zone->next_zone->dead) | |
2062 { | |
2063 struct alloc_zone *dead_zone = zone->next_zone; | |
2064 | |
2065 printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name); | |
2066 | |
2067 /* The zone must be empty. */ | |
2068 gcc_assert (!dead_zone->allocated); | |
2069 | |
2070 /* Unchain the dead zone, release all its pages and free it. */ | |
2071 zone->next_zone = zone->next_zone->next_zone; | |
2072 release_pages (dead_zone); | |
2073 free (dead_zone); | |
2074 } | |
2075 } | |
2076 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2077 invoke_plugin_callbacks (PLUGIN_GGC_END, NULL); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2078 |
0 | 2079 timevar_pop (TV_GC); |
2080 } | |
2081 | |
2082 /* Print allocation statistics. */ | |
2083 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ | |
2084 ? (x) \ | |
2085 : ((x) < 1024*1024*10 \ | |
2086 ? (x) / 1024 \ | |
2087 : (x) / (1024*1024)))) | |
2088 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) | |
2089 | |
2090 void | |
2091 ggc_print_statistics (void) | |
2092 { | |
2093 struct alloc_zone *zone; | |
2094 struct ggc_statistics stats; | |
2095 size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0; | |
2096 size_t pte_overhead, i; | |
2097 | |
2098 /* Clear the statistics. */ | |
2099 memset (&stats, 0, sizeof (stats)); | |
2100 | |
2101 /* Make sure collection will really occur. */ | |
2102 ggc_force_collect = true; | |
2103 | |
2104 /* Collect and print the statistics common across collectors. */ | |
2105 ggc_print_common_statistics (stderr, &stats); | |
2106 | |
2107 ggc_force_collect = false; | |
2108 | |
2109 /* Release free pages so that we will not count the bytes allocated | |
2110 there as part of the total allocated memory. */ | |
2111 for (zone = G.zones; zone; zone = zone->next_zone) | |
2112 release_pages (zone); | |
2113 | |
2114 /* Collect some information about the various sizes of | |
2115 allocation. */ | |
2116 fprintf (stderr, | |
2117 "Memory still allocated at the end of the compilation process\n"); | |
2118 | |
2119 fprintf (stderr, "%20s %10s %10s %10s\n", | |
2120 "Zone", "Allocated", "Used", "Overhead"); | |
2121 for (zone = G.zones; zone; zone = zone->next_zone) | |
2122 { | |
2123 struct large_page_entry *large_page; | |
2124 size_t overhead, allocated, in_use; | |
2125 | |
2126 /* Skip empty zones. */ | |
2127 if (!zone->pages && !zone->large_pages) | |
2128 continue; | |
2129 | |
2130 allocated = in_use = 0; | |
2131 | |
2132 overhead = sizeof (struct alloc_zone); | |
2133 | |
2134 for (large_page = zone->large_pages; large_page != NULL; | |
2135 large_page = large_page->next) | |
2136 { | |
2137 allocated += large_page->bytes; | |
2138 in_use += large_page->bytes; | |
2139 overhead += sizeof (struct large_page_entry); | |
2140 } | |
2141 | |
2142 /* There's no easy way to walk through the small pages finding | |
2143 used and unused objects. Instead, add all the pages, and | |
2144 subtract out the free list. */ | |
2145 | |
2146 allocated += GGC_PAGE_SIZE * zone->n_small_pages; | |
2147 in_use += GGC_PAGE_SIZE * zone->n_small_pages; | |
2148 overhead += G.small_page_overhead * zone->n_small_pages; | |
2149 | |
2150 for (i = 0; i <= NUM_FREE_BINS; i++) | |
2151 { | |
2152 struct alloc_chunk *chunk = zone->free_chunks[i]; | |
2153 while (chunk) | |
2154 { | |
2155 in_use -= ggc_get_size (chunk); | |
2156 chunk = chunk->next_free; | |
2157 } | |
2158 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2159 |
0 | 2160 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", |
2161 zone->name, | |
2162 SCALE (allocated), LABEL (allocated), | |
2163 SCALE (in_use), LABEL (in_use), | |
2164 SCALE (overhead), LABEL (overhead)); | |
2165 | |
2166 gcc_assert (in_use == zone->allocated); | |
2167 | |
2168 total_overhead += overhead; | |
2169 total_allocated += zone->allocated; | |
2170 total_bytes_mapped += zone->bytes_mapped; | |
2171 } | |
2172 | |
2173 /* Count the size of the page table as best we can. */ | |
2174 #if HOST_BITS_PER_PTR <= 32 | |
2175 pte_overhead = sizeof (G.lookup); | |
2176 for (i = 0; i < PAGE_L1_SIZE; i++) | |
2177 if (G.lookup[i]) | |
2178 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *); | |
2179 #else | |
2180 { | |
2181 page_table table = G.lookup; | |
2182 pte_overhead = 0; | |
2183 while (table) | |
2184 { | |
2185 pte_overhead += sizeof (*table); | |
2186 for (i = 0; i < PAGE_L1_SIZE; i++) | |
2187 if (table->table[i]) | |
2188 pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *); | |
2189 table = table->next; | |
2190 } | |
2191 } | |
2192 #endif | |
2193 fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table", | |
2194 "", "", SCALE (pte_overhead), LABEL (pte_overhead)); | |
2195 total_overhead += pte_overhead; | |
2196 | |
2197 fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total", | |
2198 SCALE (total_bytes_mapped), LABEL (total_bytes_mapped), | |
2199 SCALE (total_allocated), LABEL(total_allocated), | |
2200 SCALE (total_overhead), LABEL (total_overhead)); | |
2201 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2202 #ifdef GATHER_STATISTICS |
0 | 2203 { |
2204 unsigned long long all_overhead = 0, all_allocated = 0; | |
2205 unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0; | |
2206 unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0; | |
2207 unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0; | |
2208 | |
2209 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); | |
2210 | |
2211 for (zone = G.zones; zone; zone = zone->next_zone) | |
2212 { | |
2213 all_overhead += zone->stats.total_overhead; | |
2214 all_allocated += zone->stats.total_allocated; | |
2215 | |
2216 all_allocated_under32 += zone->stats.total_allocated_under32; | |
2217 all_overhead_under32 += zone->stats.total_overhead_under32; | |
2218 | |
2219 all_allocated_under64 += zone->stats.total_allocated_under64; | |
2220 all_overhead_under64 += zone->stats.total_overhead_under64; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2221 |
0 | 2222 all_allocated_under128 += zone->stats.total_allocated_under128; |
2223 all_overhead_under128 += zone->stats.total_overhead_under128; | |
2224 | |
2225 fprintf (stderr, "%20s: %10lld\n", | |
2226 zone->name, zone->stats.total_allocated); | |
2227 } | |
2228 | |
2229 fprintf (stderr, "\n"); | |
2230 | |
2231 fprintf (stderr, "Total Overhead: %10lld\n", | |
2232 all_overhead); | |
2233 fprintf (stderr, "Total Allocated: %10lld\n", | |
2234 all_allocated); | |
2235 | |
2236 fprintf (stderr, "Total Overhead under 32B: %10lld\n", | |
2237 all_overhead_under32); | |
2238 fprintf (stderr, "Total Allocated under 32B: %10lld\n", | |
2239 all_allocated_under32); | |
2240 fprintf (stderr, "Total Overhead under 64B: %10lld\n", | |
2241 all_overhead_under64); | |
2242 fprintf (stderr, "Total Allocated under 64B: %10lld\n", | |
2243 all_allocated_under64); | |
2244 fprintf (stderr, "Total Overhead under 128B: %10lld\n", | |
2245 all_overhead_under128); | |
2246 fprintf (stderr, "Total Allocated under 128B: %10lld\n", | |
2247 all_allocated_under128); | |
2248 } | |
2249 #endif | |
2250 } | |
2251 | |
2252 /* Precompiled header support. */ | |
2253 | |
2254 /* For precompiled headers, we sort objects based on their type. We | |
2255 also sort various objects into their own buckets; currently this | |
2256 covers strings and IDENTIFIER_NODE trees. The choices of how | |
2257 to sort buckets have not yet been tuned. */ | |
2258 | |
2259 #define NUM_PCH_BUCKETS (gt_types_enum_last + 3) | |
2260 | |
2261 #define OTHER_BUCKET (gt_types_enum_last + 0) | |
2262 #define IDENTIFIER_BUCKET (gt_types_enum_last + 1) | |
2263 #define STRING_BUCKET (gt_types_enum_last + 2) | |
2264 | |
2265 struct ggc_pch_ondisk | |
2266 { | |
2267 size_t total; | |
2268 size_t type_totals[NUM_PCH_BUCKETS]; | |
2269 }; | |
2270 | |
2271 struct ggc_pch_data | |
2272 { | |
2273 struct ggc_pch_ondisk d; | |
2274 size_t base; | |
2275 size_t orig_base; | |
2276 size_t alloc_size; | |
2277 alloc_type *alloc_bits; | |
2278 size_t type_bases[NUM_PCH_BUCKETS]; | |
2279 size_t start_offset; | |
2280 }; | |
2281 | |
2282 /* Initialize the PCH data structure. */ | |
2283 | |
2284 struct ggc_pch_data * | |
2285 init_ggc_pch (void) | |
2286 { | |
2287 return XCNEW (struct ggc_pch_data); | |
2288 } | |
2289 | |
2290 /* Return which of the page-aligned buckets the object at X, with type | |
2291 TYPE, should be sorted into in the PCH. Strings will have | |
2292 IS_STRING set and TYPE will be gt_types_enum_last. Other objects | |
2293 of unknown type will also have TYPE equal to gt_types_enum_last. */ | |
2294 | |
2295 static int | |
2296 pch_bucket (void *x, enum gt_types_enum type, | |
2297 bool is_string) | |
2298 { | |
2299 /* Sort identifiers into their own bucket, to improve locality | |
2300 when searching the identifier hash table. */ | |
2301 if (type == gt_ggc_e_14lang_tree_node | |
2302 && TREE_CODE ((tree) x) == IDENTIFIER_NODE) | |
2303 return IDENTIFIER_BUCKET; | |
2304 else if (type == gt_types_enum_last) | |
2305 { | |
2306 if (is_string) | |
2307 return STRING_BUCKET; | |
2308 return OTHER_BUCKET; | |
2309 } | |
2310 return type; | |
2311 } | |
2312 | |
2313 /* Add the size of object X to the size of the PCH data. */ | |
2314 | |
2315 void | |
2316 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, | |
2317 size_t size, bool is_string, enum gt_types_enum type) | |
2318 { | |
2319 /* NOTE: Right now we don't need to align up the size of any objects. | |
2320 Strings can be unaligned, and everything else is allocated to a | |
2321 MAX_ALIGNMENT boundary already. */ | |
2322 | |
2323 d->d.type_totals[pch_bucket (x, type, is_string)] += size; | |
2324 } | |
2325 | |
2326 /* Return the total size of the PCH data. */ | |
2327 | |
2328 size_t | |
2329 ggc_pch_total_size (struct ggc_pch_data *d) | |
2330 { | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2331 int i; |
0 | 2332 size_t alloc_size, total_size; |
2333 | |
2334 total_size = 0; | |
2335 for (i = 0; i < NUM_PCH_BUCKETS; i++) | |
2336 { | |
2337 d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE); | |
2338 total_size += d->d.type_totals[i]; | |
2339 } | |
2340 d->d.total = total_size; | |
2341 | |
2342 /* Include the size of the allocation bitmap. */ | |
2343 alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8); | |
2344 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT); | |
2345 d->alloc_size = alloc_size; | |
2346 | |
2347 return d->d.total + alloc_size; | |
2348 } | |
2349 | |
2350 /* Set the base address for the objects in the PCH file. */ | |
2351 | |
2352 void | |
2353 ggc_pch_this_base (struct ggc_pch_data *d, void *base_) | |
2354 { | |
2355 int i; | |
2356 size_t base = (size_t) base_; | |
2357 | |
2358 d->base = d->orig_base = base; | |
2359 for (i = 0; i < NUM_PCH_BUCKETS; i++) | |
2360 { | |
2361 d->type_bases[i] = base; | |
2362 base += d->d.type_totals[i]; | |
2363 } | |
2364 | |
2365 if (d->alloc_bits == NULL) | |
2366 d->alloc_bits = XCNEWVAR (alloc_type, d->alloc_size); | |
2367 } | |
2368 | |
2369 /* Allocate a place for object X of size SIZE in the PCH file. */ | |
2370 | |
2371 char * | |
2372 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x, | |
2373 size_t size, bool is_string, | |
2374 enum gt_types_enum type) | |
2375 { | |
2376 size_t alloc_word, alloc_bit; | |
2377 char *result; | |
2378 int bucket = pch_bucket (x, type, is_string); | |
2379 | |
2380 /* Record the start of the object in the allocation bitmap. We | |
2381 can't assert that the allocation bit is previously clear, because | |
2382 strings may violate the invariant that they are at least | |
2383 BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size | |
2384 should not be called for strings. */ | |
2385 alloc_word = ((d->type_bases[bucket] - d->orig_base) | |
2386 / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT)); | |
2387 alloc_bit = ((d->type_bases[bucket] - d->orig_base) | |
2388 / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type)); | |
2389 d->alloc_bits[alloc_word] |= 1L << alloc_bit; | |
2390 | |
2391 /* Place the object at the current pointer for this bucket. */ | |
2392 result = (char *) d->type_bases[bucket]; | |
2393 d->type_bases[bucket] += size; | |
2394 return result; | |
2395 } | |
2396 | |
2397 /* Prepare to write out the PCH data to file F. */ | |
2398 | |
2399 void | |
2400 ggc_pch_prepare_write (struct ggc_pch_data *d, | |
2401 FILE *f) | |
2402 { | |
2403 /* We seek around a lot while writing. Record where the end | |
2404 of the padding in the PCH file is, so that we can | |
2405 locate each object's offset. */ | |
2406 d->start_offset = ftell (f); | |
2407 } | |
2408 | |
2409 /* Write out object X of SIZE to file F. */ | |
2410 | |
2411 void | |
2412 ggc_pch_write_object (struct ggc_pch_data *d, | |
2413 FILE *f, void *x, void *newx, | |
2414 size_t size, bool is_string ATTRIBUTE_UNUSED) | |
2415 { | |
2416 if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2417 fatal_error ("can%'t seek PCH file: %m"); |
0 | 2418 |
2419 if (fwrite (x, size, 1, f) != 1) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2420 fatal_error ("can%'t write PCH file: %m"); |
0 | 2421 } |
2422 | |
2423 void | |
2424 ggc_pch_finish (struct ggc_pch_data *d, FILE *f) | |
2425 { | |
2426 /* Write out the allocation bitmap. */ | |
2427 if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2428 fatal_error ("can%'t seek PCH file: %m"); |
0 | 2429 |
2430 if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2431 fatal_error ("can%'t write PCH file: %m"); |
0 | 2432 |
2433 /* Done with the PCH, so write out our footer. */ | |
2434 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2435 fatal_error ("can%'t write PCH file: %m"); |
0 | 2436 |
2437 free (d->alloc_bits); | |
2438 free (d); | |
2439 } | |
2440 | |
2441 /* The PCH file from F has been mapped at ADDR. Read in any | |
2442 additional data from the file and set up the GC state. */ | |
2443 | |
2444 void | |
2445 ggc_pch_read (FILE *f, void *addr) | |
2446 { | |
2447 struct ggc_pch_ondisk d; | |
2448 size_t alloc_size; | |
2449 struct alloc_zone *zone; | |
2450 struct page_entry *pch_page; | |
2451 char *p; | |
2452 | |
2453 if (fread (&d, sizeof (d), 1, f) != 1) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2454 fatal_error ("can%'t read PCH file: %m"); |
0 | 2455 |
2456 alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8); | |
2457 alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT); | |
2458 | |
2459 pch_zone.bytes = d.total; | |
2460 pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes); | |
2461 pch_zone.page = (char *) addr; | |
2462 pch_zone.end = (char *) pch_zone.alloc_bits; | |
2463 | |
2464 /* We've just read in a PCH file. So, every object that used to be | |
2465 allocated is now free. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2466 #ifdef GATHER_STATISTICS |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2467 zone_allocate_marks (); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2468 ggc_prune_overhead_list (); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2469 zone_free_marks (); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2470 #endif |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2471 |
0 | 2472 for (zone = G.zones; zone; zone = zone->next_zone) |
2473 { | |
2474 struct small_page_entry *page, *next_page; | |
2475 struct large_page_entry *large_page, *next_large_page; | |
2476 | |
2477 zone->allocated = 0; | |
2478 | |
2479 /* Clear the zone's free chunk list. */ | |
2480 memset (zone->free_chunks, 0, sizeof (zone->free_chunks)); | |
2481 zone->high_free_bin = 0; | |
2482 zone->cached_free = NULL; | |
2483 zone->cached_free_size = 0; | |
2484 | |
2485 /* Move all the small pages onto the free list. */ | |
2486 for (page = zone->pages; page != NULL; page = next_page) | |
2487 { | |
2488 next_page = page->next; | |
2489 memset (page->alloc_bits, 0, | |
2490 G.small_page_overhead - PAGE_OVERHEAD); | |
2491 free_small_page (page); | |
2492 } | |
2493 | |
2494 /* Discard all the large pages. */ | |
2495 for (large_page = zone->large_pages; large_page != NULL; | |
2496 large_page = next_large_page) | |
2497 { | |
2498 next_large_page = large_page->next; | |
2499 free_large_page (large_page); | |
2500 } | |
2501 | |
2502 zone->pages = NULL; | |
2503 zone->large_pages = NULL; | |
2504 } | |
2505 | |
2506 /* Allocate the dummy page entry for the PCH, and set all pages | |
2507 mapped into the PCH to reference it. */ | |
2508 pch_page = XCNEW (struct page_entry); | |
2509 pch_page->page = pch_zone.page; | |
2510 pch_page->pch_p = true; | |
2511 | |
2512 for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE) | |
2513 set_page_table_entry (p, pch_page); | |
2514 } |