0
|
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
|
|
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008
|
|
3 Free Software Foundation, Inc.
|
|
4
|
|
5 This file is part of GCC.
|
|
6
|
|
7 GCC is free software; you can redistribute it and/or modify it under
|
|
8 the terms of the GNU General Public License as published by the Free
|
|
9 Software Foundation; either version 3, or (at your option) any later
|
|
10 version.
|
|
11
|
|
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
15 for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with GCC; see the file COPYING3. If not see
|
|
19 <http://www.gnu.org/licenses/>. */
|
|
20
|
|
21 #include "config.h"
|
|
22 #include "system.h"
|
|
23 #include "coretypes.h"
|
|
24 #include "tm.h"
|
|
25 #include "tree.h"
|
|
26 #include "rtl.h"
|
|
27 #include "tm_p.h"
|
|
28 #include "toplev.h"
|
|
29 #include "flags.h"
|
|
30 #include "ggc.h"
|
|
31 #include "timevar.h"
|
|
32 #include "params.h"
|
|
33 #include "tree-flow.h"
|
|
34
|
|
35 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
|
|
36 file open. Prefer either to valloc. */
|
|
37 #ifdef HAVE_MMAP_ANON
|
|
38 # undef HAVE_MMAP_DEV_ZERO
|
|
39
|
|
40 # include <sys/mman.h>
|
|
41 # ifndef MAP_FAILED
|
|
42 # define MAP_FAILED -1
|
|
43 # endif
|
|
44 # if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
|
|
45 # define MAP_ANONYMOUS MAP_ANON
|
|
46 # endif
|
|
47 # define USING_MMAP
|
|
48
|
|
49 #endif
|
|
50
|
|
51 #ifdef HAVE_MMAP_DEV_ZERO
|
|
52
|
|
53 # include <sys/mman.h>
|
|
54 # ifndef MAP_FAILED
|
|
55 # define MAP_FAILED -1
|
|
56 # endif
|
|
57 # define USING_MMAP
|
|
58
|
|
59 #endif
|
|
60
|
|
61 #ifndef USING_MMAP
|
|
62 #define USING_MALLOC_PAGE_GROUPS
|
|
63 #endif
|
|
64
|
|
65 /* Strategy:
|
|
66
|
|
67 This garbage-collecting allocator allocates objects on one of a set
|
|
68 of pages. Each page can allocate objects of a single size only;
|
|
69 available sizes are powers of two starting at four bytes. The size
|
|
70 of an allocation request is rounded up to the next power of two
|
|
71 (`order'), and satisfied from the appropriate page.
|
|
72
|
|
73 Each page is recorded in a page-entry, which also maintains an
|
|
74 in-use bitmap of object positions on the page. This allows the
|
|
75 allocation state of a particular object to be flipped without
|
|
76 touching the page itself.
|
|
77
|
|
78 Each page-entry also has a context depth, which is used to track
|
|
79 pushing and popping of allocation contexts. Only objects allocated
|
|
80 in the current (highest-numbered) context may be collected.
|
|
81
|
|
82 Page entries are arranged in an array of singly-linked lists. The
|
|
83 array is indexed by the allocation size, in bits, of the pages on
|
|
84 it; i.e. all pages on a list allocate objects of the same size.
|
|
85 Pages are ordered on the list such that all non-full pages precede
|
|
86 all full pages, with non-full pages arranged in order of decreasing
|
|
87 context depth.
|
|
88
|
|
89 Empty pages (of all orders) are kept on a single page cache list,
|
|
90 and are considered first when new pages are required; they are
|
|
91 deallocated at the start of the next collection if they haven't
|
|
92 been recycled by then. */
|
|
93
|
|
94 /* Define GGC_DEBUG_LEVEL to print debugging information.
|
|
95 0: No debugging output.
|
|
96 1: GC statistics only.
|
|
97 2: Page-entry allocations/deallocations as well.
|
|
98 3: Object allocations as well.
|
|
99 4: Object marks as well. */
|
|
100 #define GGC_DEBUG_LEVEL (0)
|
|
101
|
|
102 #ifndef HOST_BITS_PER_PTR
|
|
103 #define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
|
|
104 #endif
|
|
105
|
|
106
|
|
107 /* A two-level tree is used to look up the page-entry for a given
|
|
108 pointer. Two chunks of the pointer's bits are extracted to index
|
|
109 the first and second levels of the tree, as follows:
|
|
110
|
|
111 HOST_PAGE_SIZE_BITS
|
|
112 32 | |
|
|
113 msb +----------------+----+------+------+ lsb
|
|
114 | | |
|
|
115 PAGE_L1_BITS |
|
|
116 | |
|
|
117 PAGE_L2_BITS
|
|
118
|
|
119 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
|
|
120 pages are aligned on system page boundaries. The next most
|
|
121 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
|
|
122 index values in the lookup table, respectively.
|
|
123
|
|
124 For 32-bit architectures and the settings below, there are no
|
|
125 leftover bits. For architectures with wider pointers, the lookup
|
|
126 tree points to a list of pages, which must be scanned to find the
|
|
127 correct one. */
|
|
128
|
|
129 #define PAGE_L1_BITS (8)
|
|
130 #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
|
|
131 #define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
|
|
132 #define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
|
|
133
|
|
134 #define LOOKUP_L1(p) \
|
|
135 (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
|
|
136
|
|
137 #define LOOKUP_L2(p) \
|
|
138 (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
|
|
139
|
|
140 /* The number of objects per allocation page, for objects on a page of
|
|
141 the indicated ORDER. */
|
|
142 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
|
|
143
|
|
144 /* The number of objects in P. */
|
|
145 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
|
|
146
|
|
147 /* The size of an object on a page of the indicated ORDER. */
|
|
148 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
|
|
149
|
|
150 /* For speed, we avoid doing a general integer divide to locate the
|
|
151 offset in the allocation bitmap, by precalculating numbers M, S
|
|
152 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
|
|
153 within the page which is evenly divisible by the object size Z. */
|
|
154 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
|
|
155 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
|
|
156 #define OFFSET_TO_BIT(OFFSET, ORDER) \
|
|
157 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
|
|
158
|
|
159 /* The number of extra orders, not corresponding to power-of-two sized
|
|
160 objects. */
|
|
161
|
|
162 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
|
|
163
|
|
164 #define RTL_SIZE(NSLOTS) \
|
|
165 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
|
|
166
|
|
167 #define TREE_EXP_SIZE(OPS) \
|
|
168 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
|
|
169
|
|
170 /* The Ith entry is the maximum size of an object to be stored in the
|
|
171 Ith extra order. Adding a new entry to this array is the *only*
|
|
172 thing you need to do to add a new special allocation size. */
|
|
173
|
|
174 static const size_t extra_order_size_table[] = {
|
|
175 sizeof (struct var_ann_d),
|
|
176 sizeof (struct tree_decl_non_common),
|
|
177 sizeof (struct tree_field_decl),
|
|
178 sizeof (struct tree_parm_decl),
|
|
179 sizeof (struct tree_var_decl),
|
|
180 sizeof (struct tree_list),
|
|
181 sizeof (struct tree_ssa_name),
|
|
182 sizeof (struct function),
|
|
183 sizeof (struct basic_block_def),
|
|
184 sizeof (bitmap_element),
|
|
185 sizeof (bitmap_head),
|
|
186 TREE_EXP_SIZE (2),
|
|
187 RTL_SIZE (2), /* MEM, PLUS, etc. */
|
|
188 RTL_SIZE (9), /* INSN */
|
|
189 };
|
|
190
|
|
191 /* The total number of orders. */
|
|
192
|
|
193 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
|
|
194
|
|
195 /* We use this structure to determine the alignment required for
|
|
196 allocations. For power-of-two sized allocations, that's not a
|
|
197 problem, but it does matter for odd-sized allocations. */
|
|
198
|
|
199 struct max_alignment {
|
|
200 char c;
|
|
201 union {
|
|
202 HOST_WIDEST_INT i;
|
|
203 long double d;
|
|
204 } u;
|
|
205 };
|
|
206
|
|
207 /* The biggest alignment required. */
|
|
208
|
|
209 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
|
|
210
|
|
211 /* Compute the smallest nonnegative number which when added to X gives
|
|
212 a multiple of F. */
|
|
213
|
|
214 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
|
|
215
|
|
216 /* Compute the smallest multiple of F that is >= X. */
|
|
217
|
|
218 #define ROUND_UP(x, f) (CEIL (x, f) * (f))
|
|
219
|
|
220 /* The Ith entry is the number of objects on a page or order I. */
|
|
221
|
|
222 static unsigned objects_per_page_table[NUM_ORDERS];
|
|
223
|
|
224 /* The Ith entry is the size of an object on a page of order I. */
|
|
225
|
|
226 static size_t object_size_table[NUM_ORDERS];
|
|
227
|
|
228 /* The Ith entry is a pair of numbers (mult, shift) such that
|
|
229 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
|
|
230 for all k evenly divisible by OBJECT_SIZE(I). */
|
|
231
|
|
232 static struct
|
|
233 {
|
|
234 size_t mult;
|
|
235 unsigned int shift;
|
|
236 }
|
|
237 inverse_table[NUM_ORDERS];
|
|
238
|
|
239 /* A page_entry records the status of an allocation page. This
|
|
240 structure is dynamically sized to fit the bitmap in_use_p. */
|
|
241 typedef struct page_entry
|
|
242 {
|
|
243 /* The next page-entry with objects of the same size, or NULL if
|
|
244 this is the last page-entry. */
|
|
245 struct page_entry *next;
|
|
246
|
|
247 /* The previous page-entry with objects of the same size, or NULL if
|
|
248 this is the first page-entry. The PREV pointer exists solely to
|
|
249 keep the cost of ggc_free manageable. */
|
|
250 struct page_entry *prev;
|
|
251
|
|
252 /* The number of bytes allocated. (This will always be a multiple
|
|
253 of the host system page size.) */
|
|
254 size_t bytes;
|
|
255
|
|
256 /* The address at which the memory is allocated. */
|
|
257 char *page;
|
|
258
|
|
259 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
260 /* Back pointer to the page group this page came from. */
|
|
261 struct page_group *group;
|
|
262 #endif
|
|
263
|
|
264 /* This is the index in the by_depth varray where this page table
|
|
265 can be found. */
|
|
266 unsigned long index_by_depth;
|
|
267
|
|
268 /* Context depth of this page. */
|
|
269 unsigned short context_depth;
|
|
270
|
|
271 /* The number of free objects remaining on this page. */
|
|
272 unsigned short num_free_objects;
|
|
273
|
|
274 /* A likely candidate for the bit position of a free object for the
|
|
275 next allocation from this page. */
|
|
276 unsigned short next_bit_hint;
|
|
277
|
|
278 /* The lg of size of objects allocated from this page. */
|
|
279 unsigned char order;
|
|
280
|
|
281 /* A bit vector indicating whether or not objects are in use. The
|
|
282 Nth bit is one if the Nth object on this page is allocated. This
|
|
283 array is dynamically sized. */
|
|
284 unsigned long in_use_p[1];
|
|
285 } page_entry;
|
|
286
|
|
287 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
288 /* A page_group describes a large allocation from malloc, from which
|
|
289 we parcel out aligned pages. */
|
|
290 typedef struct page_group
|
|
291 {
|
|
292 /* A linked list of all extant page groups. */
|
|
293 struct page_group *next;
|
|
294
|
|
295 /* The address we received from malloc. */
|
|
296 char *allocation;
|
|
297
|
|
298 /* The size of the block. */
|
|
299 size_t alloc_size;
|
|
300
|
|
301 /* A bitmask of pages in use. */
|
|
302 unsigned int in_use;
|
|
303 } page_group;
|
|
304 #endif
|
|
305
|
|
306 #if HOST_BITS_PER_PTR <= 32
|
|
307
|
|
308 /* On 32-bit hosts, we use a two level page table, as pictured above. */
|
|
309 typedef page_entry **page_table[PAGE_L1_SIZE];
|
|
310
|
|
311 #else
|
|
312
|
|
313 /* On 64-bit hosts, we use the same two level page tables plus a linked
|
|
314 list that disambiguates the top 32-bits. There will almost always be
|
|
315 exactly one entry in the list. */
|
|
316 typedef struct page_table_chain
|
|
317 {
|
|
318 struct page_table_chain *next;
|
|
319 size_t high_bits;
|
|
320 page_entry **table[PAGE_L1_SIZE];
|
|
321 } *page_table;
|
|
322
|
|
323 #endif
|
|
324
|
|
325 /* The rest of the global variables. */
|
|
326 static struct globals
|
|
327 {
|
|
328 /* The Nth element in this array is a page with objects of size 2^N.
|
|
329 If there are any pages with free objects, they will be at the
|
|
330 head of the list. NULL if there are no page-entries for this
|
|
331 object size. */
|
|
332 page_entry *pages[NUM_ORDERS];
|
|
333
|
|
334 /* The Nth element in this array is the last page with objects of
|
|
335 size 2^N. NULL if there are no page-entries for this object
|
|
336 size. */
|
|
337 page_entry *page_tails[NUM_ORDERS];
|
|
338
|
|
339 /* Lookup table for associating allocation pages with object addresses. */
|
|
340 page_table lookup;
|
|
341
|
|
342 /* The system's page size. */
|
|
343 size_t pagesize;
|
|
344 size_t lg_pagesize;
|
|
345
|
|
346 /* Bytes currently allocated. */
|
|
347 size_t allocated;
|
|
348
|
|
349 /* Bytes currently allocated at the end of the last collection. */
|
|
350 size_t allocated_last_gc;
|
|
351
|
|
352 /* Total amount of memory mapped. */
|
|
353 size_t bytes_mapped;
|
|
354
|
|
355 /* Bit N set if any allocations have been done at context depth N. */
|
|
356 unsigned long context_depth_allocations;
|
|
357
|
|
358 /* Bit N set if any collections have been done at context depth N. */
|
|
359 unsigned long context_depth_collections;
|
|
360
|
|
361 /* The current depth in the context stack. */
|
|
362 unsigned short context_depth;
|
|
363
|
|
364 /* A file descriptor open to /dev/zero for reading. */
|
|
365 #if defined (HAVE_MMAP_DEV_ZERO)
|
|
366 int dev_zero_fd;
|
|
367 #endif
|
|
368
|
|
369 /* A cache of free system pages. */
|
|
370 page_entry *free_pages;
|
|
371
|
|
372 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
373 page_group *page_groups;
|
|
374 #endif
|
|
375
|
|
376 /* The file descriptor for debugging output. */
|
|
377 FILE *debug_file;
|
|
378
|
|
379 /* Current number of elements in use in depth below. */
|
|
380 unsigned int depth_in_use;
|
|
381
|
|
382 /* Maximum number of elements that can be used before resizing. */
|
|
383 unsigned int depth_max;
|
|
384
|
|
385 /* Each element of this array is an index in by_depth where the given
|
|
386 depth starts. This structure is indexed by that given depth we
|
|
387 are interested in. */
|
|
388 unsigned int *depth;
|
|
389
|
|
390 /* Current number of elements in use in by_depth below. */
|
|
391 unsigned int by_depth_in_use;
|
|
392
|
|
393 /* Maximum number of elements that can be used before resizing. */
|
|
394 unsigned int by_depth_max;
|
|
395
|
|
396 /* Each element of this array is a pointer to a page_entry, all
|
|
397 page_entries can be found in here by increasing depth.
|
|
398 index_by_depth in the page_entry is the index into this data
|
|
399 structure where that page_entry can be found. This is used to
|
|
400 speed up finding all page_entries at a particular depth. */
|
|
401 page_entry **by_depth;
|
|
402
|
|
403 /* Each element is a pointer to the saved in_use_p bits, if any,
|
|
404 zero otherwise. We allocate them all together, to enable a
|
|
405 better runtime data access pattern. */
|
|
406 unsigned long **save_in_use;
|
|
407
|
|
408 #ifdef ENABLE_GC_ALWAYS_COLLECT
|
|
409 /* List of free objects to be verified as actually free on the
|
|
410 next collection. */
|
|
411 struct free_object
|
|
412 {
|
|
413 void *object;
|
|
414 struct free_object *next;
|
|
415 } *free_object_list;
|
|
416 #endif
|
|
417
|
|
418 #ifdef GATHER_STATISTICS
|
|
419 struct
|
|
420 {
|
|
421 /* Total memory allocated with ggc_alloc. */
|
|
422 unsigned long long total_allocated;
|
|
423 /* Total overhead for memory to be allocated with ggc_alloc. */
|
|
424 unsigned long long total_overhead;
|
|
425
|
|
426 /* Total allocations and overhead for sizes less than 32, 64 and 128.
|
|
427 These sizes are interesting because they are typical cache line
|
|
428 sizes. */
|
|
429
|
|
430 unsigned long long total_allocated_under32;
|
|
431 unsigned long long total_overhead_under32;
|
|
432
|
|
433 unsigned long long total_allocated_under64;
|
|
434 unsigned long long total_overhead_under64;
|
|
435
|
|
436 unsigned long long total_allocated_under128;
|
|
437 unsigned long long total_overhead_under128;
|
|
438
|
|
439 /* The allocations for each of the allocation orders. */
|
|
440 unsigned long long total_allocated_per_order[NUM_ORDERS];
|
|
441
|
|
442 /* The overhead for each of the allocation orders. */
|
|
443 unsigned long long total_overhead_per_order[NUM_ORDERS];
|
|
444 } stats;
|
|
445 #endif
|
|
446 } G;
|
|
447
|
|
448 /* The size in bytes required to maintain a bitmap for the objects
|
|
449 on a page-entry. */
|
|
450 #define BITMAP_SIZE(Num_objects) \
|
|
451 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
|
|
452
|
|
453 /* Allocate pages in chunks of this size, to throttle calls to memory
|
|
454 allocation routines. The first page is used, the rest go onto the
|
|
455 free list. This cannot be larger than HOST_BITS_PER_INT for the
|
|
456 in_use bitmask for page_group. Hosts that need a different value
|
|
457 can override this by defining GGC_QUIRE_SIZE explicitly. */
|
|
458 #ifndef GGC_QUIRE_SIZE
|
|
459 # ifdef USING_MMAP
|
|
460 # define GGC_QUIRE_SIZE 256
|
|
461 # else
|
|
462 # define GGC_QUIRE_SIZE 16
|
|
463 # endif
|
|
464 #endif
|
|
465
|
|
466 /* Initial guess as to how many page table entries we might need. */
|
|
467 #define INITIAL_PTE_COUNT 128
|
|
468
|
|
469 static int ggc_allocated_p (const void *);
|
|
470 static page_entry *lookup_page_table_entry (const void *);
|
|
471 static void set_page_table_entry (void *, page_entry *);
|
|
472 #ifdef USING_MMAP
|
|
473 static char *alloc_anon (char *, size_t);
|
|
474 #endif
|
|
475 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
476 static size_t page_group_index (char *, char *);
|
|
477 static void set_page_group_in_use (page_group *, char *);
|
|
478 static void clear_page_group_in_use (page_group *, char *);
|
|
479 #endif
|
|
480 static struct page_entry * alloc_page (unsigned);
|
|
481 static void free_page (struct page_entry *);
|
|
482 static void release_pages (void);
|
|
483 static void clear_marks (void);
|
|
484 static void sweep_pages (void);
|
|
485 static void ggc_recalculate_in_use_p (page_entry *);
|
|
486 static void compute_inverse (unsigned);
|
|
487 static inline void adjust_depth (void);
|
|
488 static void move_ptes_to_front (int, int);
|
|
489
|
|
490 void debug_print_page_list (int);
|
|
491 static void push_depth (unsigned int);
|
|
492 static void push_by_depth (page_entry *, unsigned long *);
|
|
493
|
|
494 /* Push an entry onto G.depth. */
|
|
495
|
|
496 inline static void
|
|
497 push_depth (unsigned int i)
|
|
498 {
|
|
499 if (G.depth_in_use >= G.depth_max)
|
|
500 {
|
|
501 G.depth_max *= 2;
|
|
502 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
|
|
503 }
|
|
504 G.depth[G.depth_in_use++] = i;
|
|
505 }
|
|
506
|
|
507 /* Push an entry onto G.by_depth and G.save_in_use. */
|
|
508
|
|
509 inline static void
|
|
510 push_by_depth (page_entry *p, unsigned long *s)
|
|
511 {
|
|
512 if (G.by_depth_in_use >= G.by_depth_max)
|
|
513 {
|
|
514 G.by_depth_max *= 2;
|
|
515 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
|
|
516 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
|
|
517 G.by_depth_max);
|
|
518 }
|
|
519 G.by_depth[G.by_depth_in_use] = p;
|
|
520 G.save_in_use[G.by_depth_in_use++] = s;
|
|
521 }
|
|
522
|
|
523 #if (GCC_VERSION < 3001)
|
|
524 #define prefetch(X) ((void) X)
|
|
525 #else
|
|
526 #define prefetch(X) __builtin_prefetch (X)
|
|
527 #endif
|
|
528
|
|
529 #define save_in_use_p_i(__i) \
|
|
530 (G.save_in_use[__i])
|
|
531 #define save_in_use_p(__p) \
|
|
532 (save_in_use_p_i (__p->index_by_depth))
|
|
533
|
|
534 /* Returns nonzero if P was allocated in GC'able memory. */
|
|
535
|
|
536 static inline int
|
|
537 ggc_allocated_p (const void *p)
|
|
538 {
|
|
539 page_entry ***base;
|
|
540 size_t L1, L2;
|
|
541
|
|
542 #if HOST_BITS_PER_PTR <= 32
|
|
543 base = &G.lookup[0];
|
|
544 #else
|
|
545 page_table table = G.lookup;
|
|
546 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
|
|
547 while (1)
|
|
548 {
|
|
549 if (table == NULL)
|
|
550 return 0;
|
|
551 if (table->high_bits == high_bits)
|
|
552 break;
|
|
553 table = table->next;
|
|
554 }
|
|
555 base = &table->table[0];
|
|
556 #endif
|
|
557
|
|
558 /* Extract the level 1 and 2 indices. */
|
|
559 L1 = LOOKUP_L1 (p);
|
|
560 L2 = LOOKUP_L2 (p);
|
|
561
|
|
562 return base[L1] && base[L1][L2];
|
|
563 }
|
|
564
|
|
565 /* Traverse the page table and find the entry for a page.
|
|
566 Die (probably) if the object wasn't allocated via GC. */
|
|
567
|
|
568 static inline page_entry *
|
|
569 lookup_page_table_entry (const void *p)
|
|
570 {
|
|
571 page_entry ***base;
|
|
572 size_t L1, L2;
|
|
573
|
|
574 #if HOST_BITS_PER_PTR <= 32
|
|
575 base = &G.lookup[0];
|
|
576 #else
|
|
577 page_table table = G.lookup;
|
|
578 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
|
|
579 while (table->high_bits != high_bits)
|
|
580 table = table->next;
|
|
581 base = &table->table[0];
|
|
582 #endif
|
|
583
|
|
584 /* Extract the level 1 and 2 indices. */
|
|
585 L1 = LOOKUP_L1 (p);
|
|
586 L2 = LOOKUP_L2 (p);
|
|
587
|
|
588 return base[L1][L2];
|
|
589 }
|
|
590
|
|
591 /* Set the page table entry for a page. */
|
|
592
|
|
593 static void
|
|
594 set_page_table_entry (void *p, page_entry *entry)
|
|
595 {
|
|
596 page_entry ***base;
|
|
597 size_t L1, L2;
|
|
598
|
|
599 #if HOST_BITS_PER_PTR <= 32
|
|
600 base = &G.lookup[0];
|
|
601 #else
|
|
602 page_table table;
|
|
603 size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
|
|
604 for (table = G.lookup; table; table = table->next)
|
|
605 if (table->high_bits == high_bits)
|
|
606 goto found;
|
|
607
|
|
608 /* Not found -- allocate a new table. */
|
|
609 table = XCNEW (struct page_table_chain);
|
|
610 table->next = G.lookup;
|
|
611 table->high_bits = high_bits;
|
|
612 G.lookup = table;
|
|
613 found:
|
|
614 base = &table->table[0];
|
|
615 #endif
|
|
616
|
|
617 /* Extract the level 1 and 2 indices. */
|
|
618 L1 = LOOKUP_L1 (p);
|
|
619 L2 = LOOKUP_L2 (p);
|
|
620
|
|
621 if (base[L1] == NULL)
|
|
622 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
|
|
623
|
|
624 base[L1][L2] = entry;
|
|
625 }
|
|
626
|
|
627 /* Prints the page-entry for object size ORDER, for debugging. */
|
|
628
|
|
629 void
|
|
630 debug_print_page_list (int order)
|
|
631 {
|
|
632 page_entry *p;
|
|
633 printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
|
|
634 (void *) G.page_tails[order]);
|
|
635 p = G.pages[order];
|
|
636 while (p != NULL)
|
|
637 {
|
|
638 printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
|
|
639 p->num_free_objects);
|
|
640 p = p->next;
|
|
641 }
|
|
642 printf ("NULL\n");
|
|
643 fflush (stdout);
|
|
644 }
|
|
645
|
|
646 #ifdef USING_MMAP
|
|
647 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
|
|
648 (if non-null). The ifdef structure here is intended to cause a
|
|
649 compile error unless exactly one of the HAVE_* is defined. */
|
|
650
|
|
651 static inline char *
|
|
652 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
|
|
653 {
|
|
654 #ifdef HAVE_MMAP_ANON
|
|
655 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
|
|
656 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
657 #endif
|
|
658 #ifdef HAVE_MMAP_DEV_ZERO
|
|
659 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
|
|
660 MAP_PRIVATE, G.dev_zero_fd, 0);
|
|
661 #endif
|
|
662
|
|
663 if (page == (char *) MAP_FAILED)
|
|
664 {
|
|
665 perror ("virtual memory exhausted");
|
|
666 exit (FATAL_EXIT_CODE);
|
|
667 }
|
|
668
|
|
669 /* Remember that we allocated this memory. */
|
|
670 G.bytes_mapped += size;
|
|
671
|
|
672 /* Pretend we don't have access to the allocated pages. We'll enable
|
|
673 access to smaller pieces of the area in ggc_alloc. Discard the
|
|
674 handle to avoid handle leak. */
|
|
675 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
|
|
676
|
|
677 return page;
|
|
678 }
|
|
679 #endif
|
|
680 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
681 /* Compute the index for this page into the page group. */
|
|
682
|
|
683 static inline size_t
|
|
684 page_group_index (char *allocation, char *page)
|
|
685 {
|
|
686 return (size_t) (page - allocation) >> G.lg_pagesize;
|
|
687 }
|
|
688
|
|
689 /* Set and clear the in_use bit for this page in the page group. */
|
|
690
|
|
691 static inline void
|
|
692 set_page_group_in_use (page_group *group, char *page)
|
|
693 {
|
|
694 group->in_use |= 1 << page_group_index (group->allocation, page);
|
|
695 }
|
|
696
|
|
697 static inline void
|
|
698 clear_page_group_in_use (page_group *group, char *page)
|
|
699 {
|
|
700 group->in_use &= ~(1 << page_group_index (group->allocation, page));
|
|
701 }
|
|
702 #endif
|
|
703
|
|
704 /* Allocate a new page for allocating objects of size 2^ORDER,
|
|
705 and return an entry for it. The entry is not added to the
|
|
706 appropriate page_table list. */
|
|
707
|
|
708 static inline struct page_entry *
|
|
709 alloc_page (unsigned order)
|
|
710 {
|
|
711 struct page_entry *entry, *p, **pp;
|
|
712 char *page;
|
|
713 size_t num_objects;
|
|
714 size_t bitmap_size;
|
|
715 size_t page_entry_size;
|
|
716 size_t entry_size;
|
|
717 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
718 page_group *group;
|
|
719 #endif
|
|
720
|
|
721 num_objects = OBJECTS_PER_PAGE (order);
|
|
722 bitmap_size = BITMAP_SIZE (num_objects + 1);
|
|
723 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
|
|
724 entry_size = num_objects * OBJECT_SIZE (order);
|
|
725 if (entry_size < G.pagesize)
|
|
726 entry_size = G.pagesize;
|
|
727
|
|
728 entry = NULL;
|
|
729 page = NULL;
|
|
730
|
|
731 /* Check the list of free pages for one we can use. */
|
|
732 for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
|
|
733 if (p->bytes == entry_size)
|
|
734 break;
|
|
735
|
|
736 if (p != NULL)
|
|
737 {
|
|
738 /* Recycle the allocated memory from this page ... */
|
|
739 *pp = p->next;
|
|
740 page = p->page;
|
|
741
|
|
742 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
743 group = p->group;
|
|
744 #endif
|
|
745
|
|
746 /* ... and, if possible, the page entry itself. */
|
|
747 if (p->order == order)
|
|
748 {
|
|
749 entry = p;
|
|
750 memset (entry, 0, page_entry_size);
|
|
751 }
|
|
752 else
|
|
753 free (p);
|
|
754 }
|
|
755 #ifdef USING_MMAP
|
|
756 else if (entry_size == G.pagesize)
|
|
757 {
|
|
758 /* We want just one page. Allocate a bunch of them and put the
|
|
759 extras on the freelist. (Can only do this optimization with
|
|
760 mmap for backing store.) */
|
|
761 struct page_entry *e, *f = G.free_pages;
|
|
762 int i;
|
|
763
|
|
764 page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
|
|
765
|
|
766 /* This loop counts down so that the chain will be in ascending
|
|
767 memory order. */
|
|
768 for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
|
|
769 {
|
|
770 e = XCNEWVAR (struct page_entry, page_entry_size);
|
|
771 e->order = order;
|
|
772 e->bytes = G.pagesize;
|
|
773 e->page = page + (i << G.lg_pagesize);
|
|
774 e->next = f;
|
|
775 f = e;
|
|
776 }
|
|
777
|
|
778 G.free_pages = f;
|
|
779 }
|
|
780 else
|
|
781 page = alloc_anon (NULL, entry_size);
|
|
782 #endif
|
|
783 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
784 else
|
|
785 {
|
|
786 /* Allocate a large block of memory and serve out the aligned
|
|
787 pages therein. This results in much less memory wastage
|
|
788 than the traditional implementation of valloc. */
|
|
789
|
|
790 char *allocation, *a, *enda;
|
|
791 size_t alloc_size, head_slop, tail_slop;
|
|
792 int multiple_pages = (entry_size == G.pagesize);
|
|
793
|
|
794 if (multiple_pages)
|
|
795 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
|
|
796 else
|
|
797 alloc_size = entry_size + G.pagesize - 1;
|
|
798 allocation = XNEWVEC (char, alloc_size);
|
|
799
|
|
800 page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
|
|
801 head_slop = page - allocation;
|
|
802 if (multiple_pages)
|
|
803 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
|
|
804 else
|
|
805 tail_slop = alloc_size - entry_size - head_slop;
|
|
806 enda = allocation + alloc_size - tail_slop;
|
|
807
|
|
808 /* We allocated N pages, which are likely not aligned, leaving
|
|
809 us with N-1 usable pages. We plan to place the page_group
|
|
810 structure somewhere in the slop. */
|
|
811 if (head_slop >= sizeof (page_group))
|
|
812 group = (page_group *)page - 1;
|
|
813 else
|
|
814 {
|
|
815 /* We magically got an aligned allocation. Too bad, we have
|
|
816 to waste a page anyway. */
|
|
817 if (tail_slop == 0)
|
|
818 {
|
|
819 enda -= G.pagesize;
|
|
820 tail_slop += G.pagesize;
|
|
821 }
|
|
822 gcc_assert (tail_slop >= sizeof (page_group));
|
|
823 group = (page_group *)enda;
|
|
824 tail_slop -= sizeof (page_group);
|
|
825 }
|
|
826
|
|
827 /* Remember that we allocated this memory. */
|
|
828 group->next = G.page_groups;
|
|
829 group->allocation = allocation;
|
|
830 group->alloc_size = alloc_size;
|
|
831 group->in_use = 0;
|
|
832 G.page_groups = group;
|
|
833 G.bytes_mapped += alloc_size;
|
|
834
|
|
835 /* If we allocated multiple pages, put the rest on the free list. */
|
|
836 if (multiple_pages)
|
|
837 {
|
|
838 struct page_entry *e, *f = G.free_pages;
|
|
839 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
|
|
840 {
|
|
841 e = XCNEWVAR (struct page_entry, page_entry_size);
|
|
842 e->order = order;
|
|
843 e->bytes = G.pagesize;
|
|
844 e->page = a;
|
|
845 e->group = group;
|
|
846 e->next = f;
|
|
847 f = e;
|
|
848 }
|
|
849 G.free_pages = f;
|
|
850 }
|
|
851 }
|
|
852 #endif
|
|
853
|
|
854 if (entry == NULL)
|
|
855 entry = XCNEWVAR (struct page_entry, page_entry_size);
|
|
856
|
|
857 entry->bytes = entry_size;
|
|
858 entry->page = page;
|
|
859 entry->context_depth = G.context_depth;
|
|
860 entry->order = order;
|
|
861 entry->num_free_objects = num_objects;
|
|
862 entry->next_bit_hint = 1;
|
|
863
|
|
864 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
|
|
865
|
|
866 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
867 entry->group = group;
|
|
868 set_page_group_in_use (group, page);
|
|
869 #endif
|
|
870
|
|
871 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
|
|
872 increment the hint. */
|
|
873 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
|
|
874 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
|
|
875
|
|
876 set_page_table_entry (page, entry);
|
|
877
|
|
878 if (GGC_DEBUG_LEVEL >= 2)
|
|
879 fprintf (G.debug_file,
|
|
880 "Allocating page at %p, object size=%lu, data %p-%p\n",
|
|
881 (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
|
|
882 page + entry_size - 1);
|
|
883
|
|
884 return entry;
|
|
885 }
|
|
886
|
|
887 /* Adjust the size of G.depth so that no index greater than the one
|
|
888 used by the top of the G.by_depth is used. */
|
|
889
|
|
890 static inline void
|
|
891 adjust_depth (void)
|
|
892 {
|
|
893 page_entry *top;
|
|
894
|
|
895 if (G.by_depth_in_use)
|
|
896 {
|
|
897 top = G.by_depth[G.by_depth_in_use-1];
|
|
898
|
|
899 /* Peel back indices in depth that index into by_depth, so that
|
|
900 as new elements are added to by_depth, we note the indices
|
|
901 of those elements, if they are for new context depths. */
|
|
902 while (G.depth_in_use > (size_t)top->context_depth+1)
|
|
903 --G.depth_in_use;
|
|
904 }
|
|
905 }
|
|
906
|
|
907 /* For a page that is no longer needed, put it on the free page list. */
|
|
908
|
|
909 static void
|
|
910 free_page (page_entry *entry)
|
|
911 {
|
|
912 if (GGC_DEBUG_LEVEL >= 2)
|
|
913 fprintf (G.debug_file,
|
|
914 "Deallocating page at %p, data %p-%p\n", (void *) entry,
|
|
915 entry->page, entry->page + entry->bytes - 1);
|
|
916
|
|
917 /* Mark the page as inaccessible. Discard the handle to avoid handle
|
|
918 leak. */
|
|
919 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
|
|
920
|
|
921 set_page_table_entry (entry->page, NULL);
|
|
922
|
|
923 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
924 clear_page_group_in_use (entry->group, entry->page);
|
|
925 #endif
|
|
926
|
|
927 if (G.by_depth_in_use > 1)
|
|
928 {
|
|
929 page_entry *top = G.by_depth[G.by_depth_in_use-1];
|
|
930 int i = entry->index_by_depth;
|
|
931
|
|
932 /* We cannot free a page from a context deeper than the current
|
|
933 one. */
|
|
934 gcc_assert (entry->context_depth == top->context_depth);
|
|
935
|
|
936 /* Put top element into freed slot. */
|
|
937 G.by_depth[i] = top;
|
|
938 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
|
|
939 top->index_by_depth = i;
|
|
940 }
|
|
941 --G.by_depth_in_use;
|
|
942
|
|
943 adjust_depth ();
|
|
944
|
|
945 entry->next = G.free_pages;
|
|
946 G.free_pages = entry;
|
|
947 }
|
|
948
|
|
949 /* Release the free page cache to the system. */
|
|
950
|
|
951 static void
|
|
952 release_pages (void)
|
|
953 {
|
|
954 #ifdef USING_MMAP
|
|
955 page_entry *p, *next;
|
|
956 char *start;
|
|
957 size_t len;
|
|
958
|
|
959 /* Gather up adjacent pages so they are unmapped together. */
|
|
960 p = G.free_pages;
|
|
961
|
|
962 while (p)
|
|
963 {
|
|
964 start = p->page;
|
|
965 next = p->next;
|
|
966 len = p->bytes;
|
|
967 free (p);
|
|
968 p = next;
|
|
969
|
|
970 while (p && p->page == start + len)
|
|
971 {
|
|
972 next = p->next;
|
|
973 len += p->bytes;
|
|
974 free (p);
|
|
975 p = next;
|
|
976 }
|
|
977
|
|
978 munmap (start, len);
|
|
979 G.bytes_mapped -= len;
|
|
980 }
|
|
981
|
|
982 G.free_pages = NULL;
|
|
983 #endif
|
|
984 #ifdef USING_MALLOC_PAGE_GROUPS
|
|
985 page_entry **pp, *p;
|
|
986 page_group **gp, *g;
|
|
987
|
|
988 /* Remove all pages from free page groups from the list. */
|
|
989 pp = &G.free_pages;
|
|
990 while ((p = *pp) != NULL)
|
|
991 if (p->group->in_use == 0)
|
|
992 {
|
|
993 *pp = p->next;
|
|
994 free (p);
|
|
995 }
|
|
996 else
|
|
997 pp = &p->next;
|
|
998
|
|
999 /* Remove all free page groups, and release the storage. */
|
|
1000 gp = &G.page_groups;
|
|
1001 while ((g = *gp) != NULL)
|
|
1002 if (g->in_use == 0)
|
|
1003 {
|
|
1004 *gp = g->next;
|
|
1005 G.bytes_mapped -= g->alloc_size;
|
|
1006 free (g->allocation);
|
|
1007 }
|
|
1008 else
|
|
1009 gp = &g->next;
|
|
1010 #endif
|
|
1011 }
|
|
1012
|
|
1013 /* This table provides a fast way to determine ceil(log_2(size)) for
|
|
1014 allocation requests. The minimum allocation size is eight bytes. */
|
|
1015 #define NUM_SIZE_LOOKUP 512
|
|
1016 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
|
|
1017 {
|
|
1018 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
|
|
1019 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
|
|
1020 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
|
1021 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
|
|
1022 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
|
1023 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
|
1024 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
|
1025 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
|
1026 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1027 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1028 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1029 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1030 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1031 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1032 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1033 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
|
1034 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1035 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1036 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1037 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1038 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1039 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1040 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1041 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1042 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1043 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1044 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1045 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1046 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1047 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1048 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
|
1049 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
|
|
1050 };
|
|
1051
|
|
1052 /* Typed allocation function. Does nothing special in this collector. */
|
|
1053
|
|
1054 void *
|
|
1055 ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
|
|
1056 MEM_STAT_DECL)
|
|
1057 {
|
|
1058 return ggc_alloc_stat (size PASS_MEM_STAT);
|
|
1059 }
|
|
1060
|
|
1061 /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
|
|
1062
|
|
1063 void *
|
|
1064 ggc_alloc_stat (size_t size MEM_STAT_DECL)
|
|
1065 {
|
|
1066 size_t order, word, bit, object_offset, object_size;
|
|
1067 struct page_entry *entry;
|
|
1068 void *result;
|
|
1069
|
|
1070 if (size < NUM_SIZE_LOOKUP)
|
|
1071 {
|
|
1072 order = size_lookup[size];
|
|
1073 object_size = OBJECT_SIZE (order);
|
|
1074 }
|
|
1075 else
|
|
1076 {
|
|
1077 order = 10;
|
|
1078 while (size > (object_size = OBJECT_SIZE (order)))
|
|
1079 order++;
|
|
1080 }
|
|
1081
|
|
1082 /* If there are non-full pages for this size allocation, they are at
|
|
1083 the head of the list. */
|
|
1084 entry = G.pages[order];
|
|
1085
|
|
1086 /* If there is no page for this object size, or all pages in this
|
|
1087 context are full, allocate a new page. */
|
|
1088 if (entry == NULL || entry->num_free_objects == 0)
|
|
1089 {
|
|
1090 struct page_entry *new_entry;
|
|
1091 new_entry = alloc_page (order);
|
|
1092
|
|
1093 new_entry->index_by_depth = G.by_depth_in_use;
|
|
1094 push_by_depth (new_entry, 0);
|
|
1095
|
|
1096 /* We can skip context depths, if we do, make sure we go all the
|
|
1097 way to the new depth. */
|
|
1098 while (new_entry->context_depth >= G.depth_in_use)
|
|
1099 push_depth (G.by_depth_in_use-1);
|
|
1100
|
|
1101 /* If this is the only entry, it's also the tail. If it is not
|
|
1102 the only entry, then we must update the PREV pointer of the
|
|
1103 ENTRY (G.pages[order]) to point to our new page entry. */
|
|
1104 if (entry == NULL)
|
|
1105 G.page_tails[order] = new_entry;
|
|
1106 else
|
|
1107 entry->prev = new_entry;
|
|
1108
|
|
1109 /* Put new pages at the head of the page list. By definition the
|
|
1110 entry at the head of the list always has a NULL pointer. */
|
|
1111 new_entry->next = entry;
|
|
1112 new_entry->prev = NULL;
|
|
1113 entry = new_entry;
|
|
1114 G.pages[order] = new_entry;
|
|
1115
|
|
1116 /* For a new page, we know the word and bit positions (in the
|
|
1117 in_use bitmap) of the first available object -- they're zero. */
|
|
1118 new_entry->next_bit_hint = 1;
|
|
1119 word = 0;
|
|
1120 bit = 0;
|
|
1121 object_offset = 0;
|
|
1122 }
|
|
1123 else
|
|
1124 {
|
|
1125 /* First try to use the hint left from the previous allocation
|
|
1126 to locate a clear bit in the in-use bitmap. We've made sure
|
|
1127 that the one-past-the-end bit is always set, so if the hint
|
|
1128 has run over, this test will fail. */
|
|
1129 unsigned hint = entry->next_bit_hint;
|
|
1130 word = hint / HOST_BITS_PER_LONG;
|
|
1131 bit = hint % HOST_BITS_PER_LONG;
|
|
1132
|
|
1133 /* If the hint didn't work, scan the bitmap from the beginning. */
|
|
1134 if ((entry->in_use_p[word] >> bit) & 1)
|
|
1135 {
|
|
1136 word = bit = 0;
|
|
1137 while (~entry->in_use_p[word] == 0)
|
|
1138 ++word;
|
|
1139
|
|
1140 #if GCC_VERSION >= 3004
|
|
1141 bit = __builtin_ctzl (~entry->in_use_p[word]);
|
|
1142 #else
|
|
1143 while ((entry->in_use_p[word] >> bit) & 1)
|
|
1144 ++bit;
|
|
1145 #endif
|
|
1146
|
|
1147 hint = word * HOST_BITS_PER_LONG + bit;
|
|
1148 }
|
|
1149
|
|
1150 /* Next time, try the next bit. */
|
|
1151 entry->next_bit_hint = hint + 1;
|
|
1152
|
|
1153 object_offset = hint * object_size;
|
|
1154 }
|
|
1155
|
|
1156 /* Set the in-use bit. */
|
|
1157 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
|
|
1158
|
|
1159 /* Keep a running total of the number of free objects. If this page
|
|
1160 fills up, we may have to move it to the end of the list if the
|
|
1161 next page isn't full. If the next page is full, all subsequent
|
|
1162 pages are full, so there's no need to move it. */
|
|
1163 if (--entry->num_free_objects == 0
|
|
1164 && entry->next != NULL
|
|
1165 && entry->next->num_free_objects > 0)
|
|
1166 {
|
|
1167 /* We have a new head for the list. */
|
|
1168 G.pages[order] = entry->next;
|
|
1169
|
|
1170 /* We are moving ENTRY to the end of the page table list.
|
|
1171 The new page at the head of the list will have NULL in
|
|
1172 its PREV field and ENTRY will have NULL in its NEXT field. */
|
|
1173 entry->next->prev = NULL;
|
|
1174 entry->next = NULL;
|
|
1175
|
|
1176 /* Append ENTRY to the tail of the list. */
|
|
1177 entry->prev = G.page_tails[order];
|
|
1178 G.page_tails[order]->next = entry;
|
|
1179 G.page_tails[order] = entry;
|
|
1180 }
|
|
1181
|
|
1182 /* Calculate the object's address. */
|
|
1183 result = entry->page + object_offset;
|
|
1184 #ifdef GATHER_STATISTICS
|
|
1185 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
|
|
1186 result PASS_MEM_STAT);
|
|
1187 #endif
|
|
1188
|
|
1189 #ifdef ENABLE_GC_CHECKING
|
|
1190 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
|
|
1191 exact same semantics in presence of memory bugs, regardless of
|
|
1192 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
|
|
1193 handle to avoid handle leak. */
|
|
1194 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
|
|
1195
|
|
1196 /* `Poison' the entire allocated object, including any padding at
|
|
1197 the end. */
|
|
1198 memset (result, 0xaf, object_size);
|
|
1199
|
|
1200 /* Make the bytes after the end of the object unaccessible. Discard the
|
|
1201 handle to avoid handle leak. */
|
|
1202 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
|
|
1203 object_size - size));
|
|
1204 #endif
|
|
1205
|
|
1206 /* Tell Valgrind that the memory is there, but its content isn't
|
|
1207 defined. The bytes at the end of the object are still marked
|
|
1208 unaccessible. */
|
|
1209 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
|
|
1210
|
|
1211 /* Keep track of how many bytes are being allocated. This
|
|
1212 information is used in deciding when to collect. */
|
|
1213 G.allocated += object_size;
|
|
1214
|
|
1215 /* For timevar statistics. */
|
|
1216 timevar_ggc_mem_total += object_size;
|
|
1217
|
|
1218 #ifdef GATHER_STATISTICS
|
|
1219 {
|
|
1220 size_t overhead = object_size - size;
|
|
1221
|
|
1222 G.stats.total_overhead += overhead;
|
|
1223 G.stats.total_allocated += object_size;
|
|
1224 G.stats.total_overhead_per_order[order] += overhead;
|
|
1225 G.stats.total_allocated_per_order[order] += object_size;
|
|
1226
|
|
1227 if (size <= 32)
|
|
1228 {
|
|
1229 G.stats.total_overhead_under32 += overhead;
|
|
1230 G.stats.total_allocated_under32 += object_size;
|
|
1231 }
|
|
1232 if (size <= 64)
|
|
1233 {
|
|
1234 G.stats.total_overhead_under64 += overhead;
|
|
1235 G.stats.total_allocated_under64 += object_size;
|
|
1236 }
|
|
1237 if (size <= 128)
|
|
1238 {
|
|
1239 G.stats.total_overhead_under128 += overhead;
|
|
1240 G.stats.total_allocated_under128 += object_size;
|
|
1241 }
|
|
1242 }
|
|
1243 #endif
|
|
1244
|
|
1245 if (GGC_DEBUG_LEVEL >= 3)
|
|
1246 fprintf (G.debug_file,
|
|
1247 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
|
|
1248 (unsigned long) size, (unsigned long) object_size, result,
|
|
1249 (void *) entry);
|
|
1250
|
|
1251 return result;
|
|
1252 }
|
|
1253
|
|
1254 /* Mark function for strings. */
|
|
1255
|
|
1256 void
|
|
1257 gt_ggc_m_S (const void *p)
|
|
1258 {
|
|
1259 page_entry *entry;
|
|
1260 unsigned bit, word;
|
|
1261 unsigned long mask;
|
|
1262 unsigned long offset;
|
|
1263
|
|
1264 if (!p || !ggc_allocated_p (p))
|
|
1265 return;
|
|
1266
|
|
1267 /* Look up the page on which the object is alloced. . */
|
|
1268 entry = lookup_page_table_entry (p);
|
|
1269 gcc_assert (entry);
|
|
1270
|
|
1271 /* Calculate the index of the object on the page; this is its bit
|
|
1272 position in the in_use_p bitmap. Note that because a char* might
|
|
1273 point to the middle of an object, we need special code here to
|
|
1274 make sure P points to the start of an object. */
|
|
1275 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
|
|
1276 if (offset)
|
|
1277 {
|
|
1278 /* Here we've seen a char* which does not point to the beginning
|
|
1279 of an allocated object. We assume it points to the middle of
|
|
1280 a STRING_CST. */
|
|
1281 gcc_assert (offset == offsetof (struct tree_string, str));
|
|
1282 p = ((const char *) p) - offset;
|
|
1283 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
|
|
1284 return;
|
|
1285 }
|
|
1286
|
|
1287 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
|
|
1288 word = bit / HOST_BITS_PER_LONG;
|
|
1289 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
|
|
1290
|
|
1291 /* If the bit was previously set, skip it. */
|
|
1292 if (entry->in_use_p[word] & mask)
|
|
1293 return;
|
|
1294
|
|
1295 /* Otherwise set it, and decrement the free object count. */
|
|
1296 entry->in_use_p[word] |= mask;
|
|
1297 entry->num_free_objects -= 1;
|
|
1298
|
|
1299 if (GGC_DEBUG_LEVEL >= 4)
|
|
1300 fprintf (G.debug_file, "Marking %p\n", p);
|
|
1301
|
|
1302 return;
|
|
1303 }
|
|
1304
|
|
1305 /* If P is not marked, marks it and return false. Otherwise return true.
|
|
1306 P must have been allocated by the GC allocator; it mustn't point to
|
|
1307 static objects, stack variables, or memory allocated with malloc. */
|
|
1308
|
|
1309 int
|
|
1310 ggc_set_mark (const void *p)
|
|
1311 {
|
|
1312 page_entry *entry;
|
|
1313 unsigned bit, word;
|
|
1314 unsigned long mask;
|
|
1315
|
|
1316 /* Look up the page on which the object is alloced. If the object
|
|
1317 wasn't allocated by the collector, we'll probably die. */
|
|
1318 entry = lookup_page_table_entry (p);
|
|
1319 gcc_assert (entry);
|
|
1320
|
|
1321 /* Calculate the index of the object on the page; this is its bit
|
|
1322 position in the in_use_p bitmap. */
|
|
1323 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
|
|
1324 word = bit / HOST_BITS_PER_LONG;
|
|
1325 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
|
|
1326
|
|
1327 /* If the bit was previously set, skip it. */
|
|
1328 if (entry->in_use_p[word] & mask)
|
|
1329 return 1;
|
|
1330
|
|
1331 /* Otherwise set it, and decrement the free object count. */
|
|
1332 entry->in_use_p[word] |= mask;
|
|
1333 entry->num_free_objects -= 1;
|
|
1334
|
|
1335 if (GGC_DEBUG_LEVEL >= 4)
|
|
1336 fprintf (G.debug_file, "Marking %p\n", p);
|
|
1337
|
|
1338 return 0;
|
|
1339 }
|
|
1340
|
|
1341 /* Return 1 if P has been marked, zero otherwise.
|
|
1342 P must have been allocated by the GC allocator; it mustn't point to
|
|
1343 static objects, stack variables, or memory allocated with malloc. */
|
|
1344
|
|
1345 int
|
|
1346 ggc_marked_p (const void *p)
|
|
1347 {
|
|
1348 page_entry *entry;
|
|
1349 unsigned bit, word;
|
|
1350 unsigned long mask;
|
|
1351
|
|
1352 /* Look up the page on which the object is alloced. If the object
|
|
1353 wasn't allocated by the collector, we'll probably die. */
|
|
1354 entry = lookup_page_table_entry (p);
|
|
1355 gcc_assert (entry);
|
|
1356
|
|
1357 /* Calculate the index of the object on the page; this is its bit
|
|
1358 position in the in_use_p bitmap. */
|
|
1359 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
|
|
1360 word = bit / HOST_BITS_PER_LONG;
|
|
1361 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
|
|
1362
|
|
1363 return (entry->in_use_p[word] & mask) != 0;
|
|
1364 }
|
|
1365
|
|
1366 /* Return the size of the gc-able object P. */
|
|
1367
|
|
1368 size_t
|
|
1369 ggc_get_size (const void *p)
|
|
1370 {
|
|
1371 page_entry *pe = lookup_page_table_entry (p);
|
|
1372 return OBJECT_SIZE (pe->order);
|
|
1373 }
|
|
1374
|
|
1375 /* Release the memory for object P. */
|
|
1376
|
|
1377 void
|
|
1378 ggc_free (void *p)
|
|
1379 {
|
|
1380 page_entry *pe = lookup_page_table_entry (p);
|
|
1381 size_t order = pe->order;
|
|
1382 size_t size = OBJECT_SIZE (order);
|
|
1383
|
|
1384 #ifdef GATHER_STATISTICS
|
|
1385 ggc_free_overhead (p);
|
|
1386 #endif
|
|
1387
|
|
1388 if (GGC_DEBUG_LEVEL >= 3)
|
|
1389 fprintf (G.debug_file,
|
|
1390 "Freeing object, actual size=%lu, at %p on %p\n",
|
|
1391 (unsigned long) size, p, (void *) pe);
|
|
1392
|
|
1393 #ifdef ENABLE_GC_CHECKING
|
|
1394 /* Poison the data, to indicate the data is garbage. */
|
|
1395 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
|
|
1396 memset (p, 0xa5, size);
|
|
1397 #endif
|
|
1398 /* Let valgrind know the object is free. */
|
|
1399 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
|
|
1400
|
|
1401 #ifdef ENABLE_GC_ALWAYS_COLLECT
|
|
1402 /* In the completely-anal-checking mode, we do *not* immediately free
|
|
1403 the data, but instead verify that the data is *actually* not
|
|
1404 reachable the next time we collect. */
|
|
1405 {
|
|
1406 struct free_object *fo = XNEW (struct free_object);
|
|
1407 fo->object = p;
|
|
1408 fo->next = G.free_object_list;
|
|
1409 G.free_object_list = fo;
|
|
1410 }
|
|
1411 #else
|
|
1412 {
|
|
1413 unsigned int bit_offset, word, bit;
|
|
1414
|
|
1415 G.allocated -= size;
|
|
1416
|
|
1417 /* Mark the object not-in-use. */
|
|
1418 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
|
|
1419 word = bit_offset / HOST_BITS_PER_LONG;
|
|
1420 bit = bit_offset % HOST_BITS_PER_LONG;
|
|
1421 pe->in_use_p[word] &= ~(1UL << bit);
|
|
1422
|
|
1423 if (pe->num_free_objects++ == 0)
|
|
1424 {
|
|
1425 page_entry *p, *q;
|
|
1426
|
|
1427 /* If the page is completely full, then it's supposed to
|
|
1428 be after all pages that aren't. Since we've freed one
|
|
1429 object from a page that was full, we need to move the
|
|
1430 page to the head of the list.
|
|
1431
|
|
1432 PE is the node we want to move. Q is the previous node
|
|
1433 and P is the next node in the list. */
|
|
1434 q = pe->prev;
|
|
1435 if (q && q->num_free_objects == 0)
|
|
1436 {
|
|
1437 p = pe->next;
|
|
1438
|
|
1439 q->next = p;
|
|
1440
|
|
1441 /* If PE was at the end of the list, then Q becomes the
|
|
1442 new end of the list. If PE was not the end of the
|
|
1443 list, then we need to update the PREV field for P. */
|
|
1444 if (!p)
|
|
1445 G.page_tails[order] = q;
|
|
1446 else
|
|
1447 p->prev = q;
|
|
1448
|
|
1449 /* Move PE to the head of the list. */
|
|
1450 pe->next = G.pages[order];
|
|
1451 pe->prev = NULL;
|
|
1452 G.pages[order]->prev = pe;
|
|
1453 G.pages[order] = pe;
|
|
1454 }
|
|
1455
|
|
1456 /* Reset the hint bit to point to the only free object. */
|
|
1457 pe->next_bit_hint = bit_offset;
|
|
1458 }
|
|
1459 }
|
|
1460 #endif
|
|
1461 }
|
|
1462
|
|
1463 /* Subroutine of init_ggc which computes the pair of numbers used to
|
|
1464 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
|
|
1465
|
|
1466 This algorithm is taken from Granlund and Montgomery's paper
|
|
1467 "Division by Invariant Integers using Multiplication"
|
|
1468 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
|
|
1469 constants). */
|
|
1470
|
|
1471 static void
|
|
1472 compute_inverse (unsigned order)
|
|
1473 {
|
|
1474 size_t size, inv;
|
|
1475 unsigned int e;
|
|
1476
|
|
1477 size = OBJECT_SIZE (order);
|
|
1478 e = 0;
|
|
1479 while (size % 2 == 0)
|
|
1480 {
|
|
1481 e++;
|
|
1482 size >>= 1;
|
|
1483 }
|
|
1484
|
|
1485 inv = size;
|
|
1486 while (inv * size != 1)
|
|
1487 inv = inv * (2 - inv*size);
|
|
1488
|
|
1489 DIV_MULT (order) = inv;
|
|
1490 DIV_SHIFT (order) = e;
|
|
1491 }
|
|
1492
|
|
1493 /* Initialize the ggc-mmap allocator. */
|
|
1494 void
|
|
1495 init_ggc (void)
|
|
1496 {
|
|
1497 unsigned order;
|
|
1498
|
|
1499 G.pagesize = getpagesize();
|
|
1500 G.lg_pagesize = exact_log2 (G.pagesize);
|
|
1501
|
|
1502 #ifdef HAVE_MMAP_DEV_ZERO
|
|
1503 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
|
|
1504 if (G.dev_zero_fd == -1)
|
|
1505 internal_error ("open /dev/zero: %m");
|
|
1506 #endif
|
|
1507
|
|
1508 #if 0
|
|
1509 G.debug_file = fopen ("ggc-mmap.debug", "w");
|
|
1510 #else
|
|
1511 G.debug_file = stdout;
|
|
1512 #endif
|
|
1513
|
|
1514 #ifdef USING_MMAP
|
|
1515 /* StunOS has an amazing off-by-one error for the first mmap allocation
|
|
1516 after fiddling with RLIMIT_STACK. The result, as hard as it is to
|
|
1517 believe, is an unaligned page allocation, which would cause us to
|
|
1518 hork badly if we tried to use it. */
|
|
1519 {
|
|
1520 char *p = alloc_anon (NULL, G.pagesize);
|
|
1521 struct page_entry *e;
|
|
1522 if ((size_t)p & (G.pagesize - 1))
|
|
1523 {
|
|
1524 /* How losing. Discard this one and try another. If we still
|
|
1525 can't get something useful, give up. */
|
|
1526
|
|
1527 p = alloc_anon (NULL, G.pagesize);
|
|
1528 gcc_assert (!((size_t)p & (G.pagesize - 1)));
|
|
1529 }
|
|
1530
|
|
1531 /* We have a good page, might as well hold onto it... */
|
|
1532 e = XCNEW (struct page_entry);
|
|
1533 e->bytes = G.pagesize;
|
|
1534 e->page = p;
|
|
1535 e->next = G.free_pages;
|
|
1536 G.free_pages = e;
|
|
1537 }
|
|
1538 #endif
|
|
1539
|
|
1540 /* Initialize the object size table. */
|
|
1541 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
|
|
1542 object_size_table[order] = (size_t) 1 << order;
|
|
1543 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
|
|
1544 {
|
|
1545 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
|
|
1546
|
|
1547 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
|
|
1548 so that we're sure of getting aligned memory. */
|
|
1549 s = ROUND_UP (s, MAX_ALIGNMENT);
|
|
1550 object_size_table[order] = s;
|
|
1551 }
|
|
1552
|
|
1553 /* Initialize the objects-per-page and inverse tables. */
|
|
1554 for (order = 0; order < NUM_ORDERS; ++order)
|
|
1555 {
|
|
1556 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
|
|
1557 if (objects_per_page_table[order] == 0)
|
|
1558 objects_per_page_table[order] = 1;
|
|
1559 compute_inverse (order);
|
|
1560 }
|
|
1561
|
|
1562 /* Reset the size_lookup array to put appropriately sized objects in
|
|
1563 the special orders. All objects bigger than the previous power
|
|
1564 of two, but no greater than the special size, should go in the
|
|
1565 new order. */
|
|
1566 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
|
|
1567 {
|
|
1568 int o;
|
|
1569 int i;
|
|
1570
|
|
1571 i = OBJECT_SIZE (order);
|
|
1572 if (i >= NUM_SIZE_LOOKUP)
|
|
1573 continue;
|
|
1574
|
|
1575 for (o = size_lookup[i]; o == size_lookup [i]; --i)
|
|
1576 size_lookup[i] = order;
|
|
1577 }
|
|
1578
|
|
1579 G.depth_in_use = 0;
|
|
1580 G.depth_max = 10;
|
|
1581 G.depth = XNEWVEC (unsigned int, G.depth_max);
|
|
1582
|
|
1583 G.by_depth_in_use = 0;
|
|
1584 G.by_depth_max = INITIAL_PTE_COUNT;
|
|
1585 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
|
|
1586 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
|
|
1587 }
|
|
1588
|
|
1589 /* Start a new GGC zone. */
|
|
1590
|
|
1591 struct alloc_zone *
|
|
1592 new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
|
|
1593 {
|
|
1594 return NULL;
|
|
1595 }
|
|
1596
|
|
1597 /* Destroy a GGC zone. */
|
|
1598 void
|
|
1599 destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
|
|
1600 {
|
|
1601 }
|
|
1602
|
|
1603 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
|
|
1604 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
|
|
1605
|
|
1606 static void
|
|
1607 ggc_recalculate_in_use_p (page_entry *p)
|
|
1608 {
|
|
1609 unsigned int i;
|
|
1610 size_t num_objects;
|
|
1611
|
|
1612 /* Because the past-the-end bit in in_use_p is always set, we
|
|
1613 pretend there is one additional object. */
|
|
1614 num_objects = OBJECTS_IN_PAGE (p) + 1;
|
|
1615
|
|
1616 /* Reset the free object count. */
|
|
1617 p->num_free_objects = num_objects;
|
|
1618
|
|
1619 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
|
|
1620 for (i = 0;
|
|
1621 i < CEIL (BITMAP_SIZE (num_objects),
|
|
1622 sizeof (*p->in_use_p));
|
|
1623 ++i)
|
|
1624 {
|
|
1625 unsigned long j;
|
|
1626
|
|
1627 /* Something is in use if it is marked, or if it was in use in a
|
|
1628 context further down the context stack. */
|
|
1629 p->in_use_p[i] |= save_in_use_p (p)[i];
|
|
1630
|
|
1631 /* Decrement the free object count for every object allocated. */
|
|
1632 for (j = p->in_use_p[i]; j; j >>= 1)
|
|
1633 p->num_free_objects -= (j & 1);
|
|
1634 }
|
|
1635
|
|
1636 gcc_assert (p->num_free_objects < num_objects);
|
|
1637 }
|
|
1638
|
|
1639 /* Unmark all objects. */
|
|
1640
|
|
1641 static void
|
|
1642 clear_marks (void)
|
|
1643 {
|
|
1644 unsigned order;
|
|
1645
|
|
1646 for (order = 2; order < NUM_ORDERS; order++)
|
|
1647 {
|
|
1648 page_entry *p;
|
|
1649
|
|
1650 for (p = G.pages[order]; p != NULL; p = p->next)
|
|
1651 {
|
|
1652 size_t num_objects = OBJECTS_IN_PAGE (p);
|
|
1653 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
|
|
1654
|
|
1655 /* The data should be page-aligned. */
|
|
1656 gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
|
|
1657
|
|
1658 /* Pages that aren't in the topmost context are not collected;
|
|
1659 nevertheless, we need their in-use bit vectors to store GC
|
|
1660 marks. So, back them up first. */
|
|
1661 if (p->context_depth < G.context_depth)
|
|
1662 {
|
|
1663 if (! save_in_use_p (p))
|
|
1664 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
|
|
1665 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
|
|
1666 }
|
|
1667
|
|
1668 /* Reset reset the number of free objects and clear the
|
|
1669 in-use bits. These will be adjusted by mark_obj. */
|
|
1670 p->num_free_objects = num_objects;
|
|
1671 memset (p->in_use_p, 0, bitmap_size);
|
|
1672
|
|
1673 /* Make sure the one-past-the-end bit is always set. */
|
|
1674 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
|
|
1675 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
|
|
1676 }
|
|
1677 }
|
|
1678 }
|
|
1679
|
|
1680 /* Free all empty pages. Partially empty pages need no attention
|
|
1681 because the `mark' bit doubles as an `unused' bit. */
|
|
1682
|
|
1683 static void
|
|
1684 sweep_pages (void)
|
|
1685 {
|
|
1686 unsigned order;
|
|
1687
|
|
1688 for (order = 2; order < NUM_ORDERS; order++)
|
|
1689 {
|
|
1690 /* The last page-entry to consider, regardless of entries
|
|
1691 placed at the end of the list. */
|
|
1692 page_entry * const last = G.page_tails[order];
|
|
1693
|
|
1694 size_t num_objects;
|
|
1695 size_t live_objects;
|
|
1696 page_entry *p, *previous;
|
|
1697 int done;
|
|
1698
|
|
1699 p = G.pages[order];
|
|
1700 if (p == NULL)
|
|
1701 continue;
|
|
1702
|
|
1703 previous = NULL;
|
|
1704 do
|
|
1705 {
|
|
1706 page_entry *next = p->next;
|
|
1707
|
|
1708 /* Loop until all entries have been examined. */
|
|
1709 done = (p == last);
|
|
1710
|
|
1711 num_objects = OBJECTS_IN_PAGE (p);
|
|
1712
|
|
1713 /* Add all live objects on this page to the count of
|
|
1714 allocated memory. */
|
|
1715 live_objects = num_objects - p->num_free_objects;
|
|
1716
|
|
1717 G.allocated += OBJECT_SIZE (order) * live_objects;
|
|
1718
|
|
1719 /* Only objects on pages in the topmost context should get
|
|
1720 collected. */
|
|
1721 if (p->context_depth < G.context_depth)
|
|
1722 ;
|
|
1723
|
|
1724 /* Remove the page if it's empty. */
|
|
1725 else if (live_objects == 0)
|
|
1726 {
|
|
1727 /* If P was the first page in the list, then NEXT
|
|
1728 becomes the new first page in the list, otherwise
|
|
1729 splice P out of the forward pointers. */
|
|
1730 if (! previous)
|
|
1731 G.pages[order] = next;
|
|
1732 else
|
|
1733 previous->next = next;
|
|
1734
|
|
1735 /* Splice P out of the back pointers too. */
|
|
1736 if (next)
|
|
1737 next->prev = previous;
|
|
1738
|
|
1739 /* Are we removing the last element? */
|
|
1740 if (p == G.page_tails[order])
|
|
1741 G.page_tails[order] = previous;
|
|
1742 free_page (p);
|
|
1743 p = previous;
|
|
1744 }
|
|
1745
|
|
1746 /* If the page is full, move it to the end. */
|
|
1747 else if (p->num_free_objects == 0)
|
|
1748 {
|
|
1749 /* Don't move it if it's already at the end. */
|
|
1750 if (p != G.page_tails[order])
|
|
1751 {
|
|
1752 /* Move p to the end of the list. */
|
|
1753 p->next = NULL;
|
|
1754 p->prev = G.page_tails[order];
|
|
1755 G.page_tails[order]->next = p;
|
|
1756
|
|
1757 /* Update the tail pointer... */
|
|
1758 G.page_tails[order] = p;
|
|
1759
|
|
1760 /* ... and the head pointer, if necessary. */
|
|
1761 if (! previous)
|
|
1762 G.pages[order] = next;
|
|
1763 else
|
|
1764 previous->next = next;
|
|
1765
|
|
1766 /* And update the backpointer in NEXT if necessary. */
|
|
1767 if (next)
|
|
1768 next->prev = previous;
|
|
1769
|
|
1770 p = previous;
|
|
1771 }
|
|
1772 }
|
|
1773
|
|
1774 /* If we've fallen through to here, it's a page in the
|
|
1775 topmost context that is neither full nor empty. Such a
|
|
1776 page must precede pages at lesser context depth in the
|
|
1777 list, so move it to the head. */
|
|
1778 else if (p != G.pages[order])
|
|
1779 {
|
|
1780 previous->next = p->next;
|
|
1781
|
|
1782 /* Update the backchain in the next node if it exists. */
|
|
1783 if (p->next)
|
|
1784 p->next->prev = previous;
|
|
1785
|
|
1786 /* Move P to the head of the list. */
|
|
1787 p->next = G.pages[order];
|
|
1788 p->prev = NULL;
|
|
1789 G.pages[order]->prev = p;
|
|
1790
|
|
1791 /* Update the head pointer. */
|
|
1792 G.pages[order] = p;
|
|
1793
|
|
1794 /* Are we moving the last element? */
|
|
1795 if (G.page_tails[order] == p)
|
|
1796 G.page_tails[order] = previous;
|
|
1797 p = previous;
|
|
1798 }
|
|
1799
|
|
1800 previous = p;
|
|
1801 p = next;
|
|
1802 }
|
|
1803 while (! done);
|
|
1804
|
|
1805 /* Now, restore the in_use_p vectors for any pages from contexts
|
|
1806 other than the current one. */
|
|
1807 for (p = G.pages[order]; p; p = p->next)
|
|
1808 if (p->context_depth != G.context_depth)
|
|
1809 ggc_recalculate_in_use_p (p);
|
|
1810 }
|
|
1811 }
|
|
1812
|
|
1813 #ifdef ENABLE_GC_CHECKING
|
|
1814 /* Clobber all free objects. */
|
|
1815
|
|
1816 static void
|
|
1817 poison_pages (void)
|
|
1818 {
|
|
1819 unsigned order;
|
|
1820
|
|
1821 for (order = 2; order < NUM_ORDERS; order++)
|
|
1822 {
|
|
1823 size_t size = OBJECT_SIZE (order);
|
|
1824 page_entry *p;
|
|
1825
|
|
1826 for (p = G.pages[order]; p != NULL; p = p->next)
|
|
1827 {
|
|
1828 size_t num_objects;
|
|
1829 size_t i;
|
|
1830
|
|
1831 if (p->context_depth != G.context_depth)
|
|
1832 /* Since we don't do any collection for pages in pushed
|
|
1833 contexts, there's no need to do any poisoning. And
|
|
1834 besides, the IN_USE_P array isn't valid until we pop
|
|
1835 contexts. */
|
|
1836 continue;
|
|
1837
|
|
1838 num_objects = OBJECTS_IN_PAGE (p);
|
|
1839 for (i = 0; i < num_objects; i++)
|
|
1840 {
|
|
1841 size_t word, bit;
|
|
1842 word = i / HOST_BITS_PER_LONG;
|
|
1843 bit = i % HOST_BITS_PER_LONG;
|
|
1844 if (((p->in_use_p[word] >> bit) & 1) == 0)
|
|
1845 {
|
|
1846 char *object = p->page + i * size;
|
|
1847
|
|
1848 /* Keep poison-by-write when we expect to use Valgrind,
|
|
1849 so the exact same memory semantics is kept, in case
|
|
1850 there are memory errors. We override this request
|
|
1851 below. */
|
|
1852 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
|
|
1853 size));
|
|
1854 memset (object, 0xa5, size);
|
|
1855
|
|
1856 /* Drop the handle to avoid handle leak. */
|
|
1857 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
|
|
1858 }
|
|
1859 }
|
|
1860 }
|
|
1861 }
|
|
1862 }
|
|
1863 #else
|
|
1864 #define poison_pages()
|
|
1865 #endif
|
|
1866
|
|
1867 #ifdef ENABLE_GC_ALWAYS_COLLECT
|
|
1868 /* Validate that the reportedly free objects actually are. */
|
|
1869
|
|
1870 static void
|
|
1871 validate_free_objects (void)
|
|
1872 {
|
|
1873 struct free_object *f, *next, *still_free = NULL;
|
|
1874
|
|
1875 for (f = G.free_object_list; f ; f = next)
|
|
1876 {
|
|
1877 page_entry *pe = lookup_page_table_entry (f->object);
|
|
1878 size_t bit, word;
|
|
1879
|
|
1880 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
|
|
1881 word = bit / HOST_BITS_PER_LONG;
|
|
1882 bit = bit % HOST_BITS_PER_LONG;
|
|
1883 next = f->next;
|
|
1884
|
|
1885 /* Make certain it isn't visible from any root. Notice that we
|
|
1886 do this check before sweep_pages merges save_in_use_p. */
|
|
1887 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
|
|
1888
|
|
1889 /* If the object comes from an outer context, then retain the
|
|
1890 free_object entry, so that we can verify that the address
|
|
1891 isn't live on the stack in some outer context. */
|
|
1892 if (pe->context_depth != G.context_depth)
|
|
1893 {
|
|
1894 f->next = still_free;
|
|
1895 still_free = f;
|
|
1896 }
|
|
1897 else
|
|
1898 free (f);
|
|
1899 }
|
|
1900
|
|
1901 G.free_object_list = still_free;
|
|
1902 }
|
|
1903 #else
|
|
1904 #define validate_free_objects()
|
|
1905 #endif
|
|
1906
|
|
1907 /* Top level mark-and-sweep routine. */
|
|
1908
|
|
1909 void
|
|
1910 ggc_collect (void)
|
|
1911 {
|
|
1912 /* Avoid frequent unnecessary work by skipping collection if the
|
|
1913 total allocations haven't expanded much since the last
|
|
1914 collection. */
|
|
1915 float allocated_last_gc =
|
|
1916 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
|
|
1917
|
|
1918 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
|
|
1919
|
|
1920 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
|
|
1921 return;
|
|
1922
|
|
1923 timevar_push (TV_GC);
|
|
1924 if (!quiet_flag)
|
|
1925 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
|
|
1926 if (GGC_DEBUG_LEVEL >= 2)
|
|
1927 fprintf (G.debug_file, "BEGIN COLLECTING\n");
|
|
1928
|
|
1929 /* Zero the total allocated bytes. This will be recalculated in the
|
|
1930 sweep phase. */
|
|
1931 G.allocated = 0;
|
|
1932
|
|
1933 /* Release the pages we freed the last time we collected, but didn't
|
|
1934 reuse in the interim. */
|
|
1935 release_pages ();
|
|
1936
|
|
1937 /* Indicate that we've seen collections at this context depth. */
|
|
1938 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
|
|
1939
|
|
1940 clear_marks ();
|
|
1941 ggc_mark_roots ();
|
|
1942 #ifdef GATHER_STATISTICS
|
|
1943 ggc_prune_overhead_list ();
|
|
1944 #endif
|
|
1945 poison_pages ();
|
|
1946 validate_free_objects ();
|
|
1947 sweep_pages ();
|
|
1948
|
|
1949 G.allocated_last_gc = G.allocated;
|
|
1950
|
|
1951 timevar_pop (TV_GC);
|
|
1952
|
|
1953 if (!quiet_flag)
|
|
1954 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
|
|
1955 if (GGC_DEBUG_LEVEL >= 2)
|
|
1956 fprintf (G.debug_file, "END COLLECTING\n");
|
|
1957 }
|
|
1958
|
|
1959 /* Print allocation statistics. */
|
|
1960 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
|
|
1961 ? (x) \
|
|
1962 : ((x) < 1024*1024*10 \
|
|
1963 ? (x) / 1024 \
|
|
1964 : (x) / (1024*1024))))
|
|
1965 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
|
|
1966
|
|
1967 void
|
|
1968 ggc_print_statistics (void)
|
|
1969 {
|
|
1970 struct ggc_statistics stats;
|
|
1971 unsigned int i;
|
|
1972 size_t total_overhead = 0;
|
|
1973
|
|
1974 /* Clear the statistics. */
|
|
1975 memset (&stats, 0, sizeof (stats));
|
|
1976
|
|
1977 /* Make sure collection will really occur. */
|
|
1978 G.allocated_last_gc = 0;
|
|
1979
|
|
1980 /* Collect and print the statistics common across collectors. */
|
|
1981 ggc_print_common_statistics (stderr, &stats);
|
|
1982
|
|
1983 /* Release free pages so that we will not count the bytes allocated
|
|
1984 there as part of the total allocated memory. */
|
|
1985 release_pages ();
|
|
1986
|
|
1987 /* Collect some information about the various sizes of
|
|
1988 allocation. */
|
|
1989 fprintf (stderr,
|
|
1990 "Memory still allocated at the end of the compilation process\n");
|
|
1991 fprintf (stderr, "%-5s %10s %10s %10s\n",
|
|
1992 "Size", "Allocated", "Used", "Overhead");
|
|
1993 for (i = 0; i < NUM_ORDERS; ++i)
|
|
1994 {
|
|
1995 page_entry *p;
|
|
1996 size_t allocated;
|
|
1997 size_t in_use;
|
|
1998 size_t overhead;
|
|
1999
|
|
2000 /* Skip empty entries. */
|
|
2001 if (!G.pages[i])
|
|
2002 continue;
|
|
2003
|
|
2004 overhead = allocated = in_use = 0;
|
|
2005
|
|
2006 /* Figure out the total number of bytes allocated for objects of
|
|
2007 this size, and how many of them are actually in use. Also figure
|
|
2008 out how much memory the page table is using. */
|
|
2009 for (p = G.pages[i]; p; p = p->next)
|
|
2010 {
|
|
2011 allocated += p->bytes;
|
|
2012 in_use +=
|
|
2013 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
|
|
2014
|
|
2015 overhead += (sizeof (page_entry) - sizeof (long)
|
|
2016 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
|
|
2017 }
|
|
2018 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
|
|
2019 (unsigned long) OBJECT_SIZE (i),
|
|
2020 SCALE (allocated), STAT_LABEL (allocated),
|
|
2021 SCALE (in_use), STAT_LABEL (in_use),
|
|
2022 SCALE (overhead), STAT_LABEL (overhead));
|
|
2023 total_overhead += overhead;
|
|
2024 }
|
|
2025 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
|
|
2026 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
|
|
2027 SCALE (G.allocated), STAT_LABEL(G.allocated),
|
|
2028 SCALE (total_overhead), STAT_LABEL (total_overhead));
|
|
2029
|
|
2030 #ifdef GATHER_STATISTICS
|
|
2031 {
|
|
2032 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
|
|
2033
|
|
2034 fprintf (stderr, "Total Overhead: %10lld\n",
|
|
2035 G.stats.total_overhead);
|
|
2036 fprintf (stderr, "Total Allocated: %10lld\n",
|
|
2037 G.stats.total_allocated);
|
|
2038
|
|
2039 fprintf (stderr, "Total Overhead under 32B: %10lld\n",
|
|
2040 G.stats.total_overhead_under32);
|
|
2041 fprintf (stderr, "Total Allocated under 32B: %10lld\n",
|
|
2042 G.stats.total_allocated_under32);
|
|
2043 fprintf (stderr, "Total Overhead under 64B: %10lld\n",
|
|
2044 G.stats.total_overhead_under64);
|
|
2045 fprintf (stderr, "Total Allocated under 64B: %10lld\n",
|
|
2046 G.stats.total_allocated_under64);
|
|
2047 fprintf (stderr, "Total Overhead under 128B: %10lld\n",
|
|
2048 G.stats.total_overhead_under128);
|
|
2049 fprintf (stderr, "Total Allocated under 128B: %10lld\n",
|
|
2050 G.stats.total_allocated_under128);
|
|
2051
|
|
2052 for (i = 0; i < NUM_ORDERS; i++)
|
|
2053 if (G.stats.total_allocated_per_order[i])
|
|
2054 {
|
|
2055 fprintf (stderr, "Total Overhead page size %7lu: %10lld\n",
|
|
2056 (unsigned long) OBJECT_SIZE (i),
|
|
2057 G.stats.total_overhead_per_order[i]);
|
|
2058 fprintf (stderr, "Total Allocated page size %7lu: %10lld\n",
|
|
2059 (unsigned long) OBJECT_SIZE (i),
|
|
2060 G.stats.total_allocated_per_order[i]);
|
|
2061 }
|
|
2062 }
|
|
2063 #endif
|
|
2064 }
|
|
2065
|
|
2066 struct ggc_pch_data
|
|
2067 {
|
|
2068 struct ggc_pch_ondisk
|
|
2069 {
|
|
2070 unsigned totals[NUM_ORDERS];
|
|
2071 } d;
|
|
2072 size_t base[NUM_ORDERS];
|
|
2073 size_t written[NUM_ORDERS];
|
|
2074 };
|
|
2075
|
|
2076 struct ggc_pch_data *
|
|
2077 init_ggc_pch (void)
|
|
2078 {
|
|
2079 return XCNEW (struct ggc_pch_data);
|
|
2080 }
|
|
2081
|
|
2082 void
|
|
2083 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
|
|
2084 size_t size, bool is_string ATTRIBUTE_UNUSED,
|
|
2085 enum gt_types_enum type ATTRIBUTE_UNUSED)
|
|
2086 {
|
|
2087 unsigned order;
|
|
2088
|
|
2089 if (size < NUM_SIZE_LOOKUP)
|
|
2090 order = size_lookup[size];
|
|
2091 else
|
|
2092 {
|
|
2093 order = 10;
|
|
2094 while (size > OBJECT_SIZE (order))
|
|
2095 order++;
|
|
2096 }
|
|
2097
|
|
2098 d->d.totals[order]++;
|
|
2099 }
|
|
2100
|
|
2101 size_t
|
|
2102 ggc_pch_total_size (struct ggc_pch_data *d)
|
|
2103 {
|
|
2104 size_t a = 0;
|
|
2105 unsigned i;
|
|
2106
|
|
2107 for (i = 0; i < NUM_ORDERS; i++)
|
|
2108 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
|
|
2109 return a;
|
|
2110 }
|
|
2111
|
|
2112 void
|
|
2113 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
|
|
2114 {
|
|
2115 size_t a = (size_t) base;
|
|
2116 unsigned i;
|
|
2117
|
|
2118 for (i = 0; i < NUM_ORDERS; i++)
|
|
2119 {
|
|
2120 d->base[i] = a;
|
|
2121 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
|
|
2122 }
|
|
2123 }
|
|
2124
|
|
2125
|
|
2126 char *
|
|
2127 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
|
|
2128 size_t size, bool is_string ATTRIBUTE_UNUSED,
|
|
2129 enum gt_types_enum type ATTRIBUTE_UNUSED)
|
|
2130 {
|
|
2131 unsigned order;
|
|
2132 char *result;
|
|
2133
|
|
2134 if (size < NUM_SIZE_LOOKUP)
|
|
2135 order = size_lookup[size];
|
|
2136 else
|
|
2137 {
|
|
2138 order = 10;
|
|
2139 while (size > OBJECT_SIZE (order))
|
|
2140 order++;
|
|
2141 }
|
|
2142
|
|
2143 result = (char *) d->base[order];
|
|
2144 d->base[order] += OBJECT_SIZE (order);
|
|
2145 return result;
|
|
2146 }
|
|
2147
|
|
2148 void
|
|
2149 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
|
|
2150 FILE *f ATTRIBUTE_UNUSED)
|
|
2151 {
|
|
2152 /* Nothing to do. */
|
|
2153 }
|
|
2154
|
|
2155 void
|
|
2156 ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
|
|
2157 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
|
|
2158 size_t size, bool is_string ATTRIBUTE_UNUSED)
|
|
2159 {
|
|
2160 unsigned order;
|
|
2161 static const char emptyBytes[256];
|
|
2162
|
|
2163 if (size < NUM_SIZE_LOOKUP)
|
|
2164 order = size_lookup[size];
|
|
2165 else
|
|
2166 {
|
|
2167 order = 10;
|
|
2168 while (size > OBJECT_SIZE (order))
|
|
2169 order++;
|
|
2170 }
|
|
2171
|
|
2172 if (fwrite (x, size, 1, f) != 1)
|
|
2173 fatal_error ("can't write PCH file: %m");
|
|
2174
|
|
2175 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
|
|
2176 object out to OBJECT_SIZE(order). This happens for strings. */
|
|
2177
|
|
2178 if (size != OBJECT_SIZE (order))
|
|
2179 {
|
|
2180 unsigned padding = OBJECT_SIZE(order) - size;
|
|
2181
|
|
2182 /* To speed small writes, we use a nulled-out array that's larger
|
|
2183 than most padding requests as the source for our null bytes. This
|
|
2184 permits us to do the padding with fwrite() rather than fseek(), and
|
|
2185 limits the chance the OS may try to flush any outstanding writes. */
|
|
2186 if (padding <= sizeof(emptyBytes))
|
|
2187 {
|
|
2188 if (fwrite (emptyBytes, 1, padding, f) != padding)
|
|
2189 fatal_error ("can't write PCH file");
|
|
2190 }
|
|
2191 else
|
|
2192 {
|
|
2193 /* Larger than our buffer? Just default to fseek. */
|
|
2194 if (fseek (f, padding, SEEK_CUR) != 0)
|
|
2195 fatal_error ("can't write PCH file");
|
|
2196 }
|
|
2197 }
|
|
2198
|
|
2199 d->written[order]++;
|
|
2200 if (d->written[order] == d->d.totals[order]
|
|
2201 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
|
|
2202 G.pagesize),
|
|
2203 SEEK_CUR) != 0)
|
|
2204 fatal_error ("can't write PCH file: %m");
|
|
2205 }
|
|
2206
|
|
2207 void
|
|
2208 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
|
|
2209 {
|
|
2210 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
|
|
2211 fatal_error ("can't write PCH file: %m");
|
|
2212 free (d);
|
|
2213 }
|
|
2214
|
|
2215 /* Move the PCH PTE entries just added to the end of by_depth, to the
|
|
2216 front. */
|
|
2217
|
|
2218 static void
|
|
2219 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
|
|
2220 {
|
|
2221 unsigned i;
|
|
2222
|
|
2223 /* First, we swap the new entries to the front of the varrays. */
|
|
2224 page_entry **new_by_depth;
|
|
2225 unsigned long **new_save_in_use;
|
|
2226
|
|
2227 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
|
|
2228 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
|
|
2229
|
|
2230 memcpy (&new_by_depth[0],
|
|
2231 &G.by_depth[count_old_page_tables],
|
|
2232 count_new_page_tables * sizeof (void *));
|
|
2233 memcpy (&new_by_depth[count_new_page_tables],
|
|
2234 &G.by_depth[0],
|
|
2235 count_old_page_tables * sizeof (void *));
|
|
2236 memcpy (&new_save_in_use[0],
|
|
2237 &G.save_in_use[count_old_page_tables],
|
|
2238 count_new_page_tables * sizeof (void *));
|
|
2239 memcpy (&new_save_in_use[count_new_page_tables],
|
|
2240 &G.save_in_use[0],
|
|
2241 count_old_page_tables * sizeof (void *));
|
|
2242
|
|
2243 free (G.by_depth);
|
|
2244 free (G.save_in_use);
|
|
2245
|
|
2246 G.by_depth = new_by_depth;
|
|
2247 G.save_in_use = new_save_in_use;
|
|
2248
|
|
2249 /* Now update all the index_by_depth fields. */
|
|
2250 for (i = G.by_depth_in_use; i > 0; --i)
|
|
2251 {
|
|
2252 page_entry *p = G.by_depth[i-1];
|
|
2253 p->index_by_depth = i-1;
|
|
2254 }
|
|
2255
|
|
2256 /* And last, we update the depth pointers in G.depth. The first
|
|
2257 entry is already 0, and context 0 entries always start at index
|
|
2258 0, so there is nothing to update in the first slot. We need a
|
|
2259 second slot, only if we have old ptes, and if we do, they start
|
|
2260 at index count_new_page_tables. */
|
|
2261 if (count_old_page_tables)
|
|
2262 push_depth (count_new_page_tables);
|
|
2263 }
|
|
2264
|
|
2265 void
|
|
2266 ggc_pch_read (FILE *f, void *addr)
|
|
2267 {
|
|
2268 struct ggc_pch_ondisk d;
|
|
2269 unsigned i;
|
|
2270 char *offs = (char *) addr;
|
|
2271 unsigned long count_old_page_tables;
|
|
2272 unsigned long count_new_page_tables;
|
|
2273
|
|
2274 count_old_page_tables = G.by_depth_in_use;
|
|
2275
|
|
2276 /* We've just read in a PCH file. So, every object that used to be
|
|
2277 allocated is now free. */
|
|
2278 clear_marks ();
|
|
2279 #ifdef ENABLE_GC_CHECKING
|
|
2280 poison_pages ();
|
|
2281 #endif
|
|
2282 /* Since we free all the allocated objects, the free list becomes
|
|
2283 useless. Validate it now, which will also clear it. */
|
|
2284 validate_free_objects();
|
|
2285
|
|
2286 /* No object read from a PCH file should ever be freed. So, set the
|
|
2287 context depth to 1, and set the depth of all the currently-allocated
|
|
2288 pages to be 1 too. PCH pages will have depth 0. */
|
|
2289 gcc_assert (!G.context_depth);
|
|
2290 G.context_depth = 1;
|
|
2291 for (i = 0; i < NUM_ORDERS; i++)
|
|
2292 {
|
|
2293 page_entry *p;
|
|
2294 for (p = G.pages[i]; p != NULL; p = p->next)
|
|
2295 p->context_depth = G.context_depth;
|
|
2296 }
|
|
2297
|
|
2298 /* Allocate the appropriate page-table entries for the pages read from
|
|
2299 the PCH file. */
|
|
2300 if (fread (&d, sizeof (d), 1, f) != 1)
|
|
2301 fatal_error ("can't read PCH file: %m");
|
|
2302
|
|
2303 for (i = 0; i < NUM_ORDERS; i++)
|
|
2304 {
|
|
2305 struct page_entry *entry;
|
|
2306 char *pte;
|
|
2307 size_t bytes;
|
|
2308 size_t num_objs;
|
|
2309 size_t j;
|
|
2310
|
|
2311 if (d.totals[i] == 0)
|
|
2312 continue;
|
|
2313
|
|
2314 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
|
|
2315 num_objs = bytes / OBJECT_SIZE (i);
|
|
2316 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
|
|
2317 - sizeof (long)
|
|
2318 + BITMAP_SIZE (num_objs + 1)));
|
|
2319 entry->bytes = bytes;
|
|
2320 entry->page = offs;
|
|
2321 entry->context_depth = 0;
|
|
2322 offs += bytes;
|
|
2323 entry->num_free_objects = 0;
|
|
2324 entry->order = i;
|
|
2325
|
|
2326 for (j = 0;
|
|
2327 j + HOST_BITS_PER_LONG <= num_objs + 1;
|
|
2328 j += HOST_BITS_PER_LONG)
|
|
2329 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
|
|
2330 for (; j < num_objs + 1; j++)
|
|
2331 entry->in_use_p[j / HOST_BITS_PER_LONG]
|
|
2332 |= 1L << (j % HOST_BITS_PER_LONG);
|
|
2333
|
|
2334 for (pte = entry->page;
|
|
2335 pte < entry->page + entry->bytes;
|
|
2336 pte += G.pagesize)
|
|
2337 set_page_table_entry (pte, entry);
|
|
2338
|
|
2339 if (G.page_tails[i] != NULL)
|
|
2340 G.page_tails[i]->next = entry;
|
|
2341 else
|
|
2342 G.pages[i] = entry;
|
|
2343 G.page_tails[i] = entry;
|
|
2344
|
|
2345 /* We start off by just adding all the new information to the
|
|
2346 end of the varrays, later, we will move the new information
|
|
2347 to the front of the varrays, as the PCH page tables are at
|
|
2348 context 0. */
|
|
2349 push_by_depth (entry, 0);
|
|
2350 }
|
|
2351
|
|
2352 /* Now, we update the various data structures that speed page table
|
|
2353 handling. */
|
|
2354 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
|
|
2355
|
|
2356 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
|
|
2357
|
|
2358 /* Update the statistics. */
|
|
2359 G.allocated = G.allocated_last_gc = offs - (char *)addr;
|
|
2360 }
|