comparison gcc/ggc-common.c @ 55:77e2b8dfacca gcc-4.4.5

update it from 4.4.3 to 4.5.0
author ryoma <e075725@ie.u-ryukyu.ac.jp>
date Fri, 12 Feb 2010 23:39:51 +0900
parents a06113de4d67
children b7f97abdc517
comparison
equal deleted inserted replaced
52:c156f1bd5cd9 55:77e2b8dfacca
28 #include "ggc.h" 28 #include "ggc.h"
29 #include "toplev.h" 29 #include "toplev.h"
30 #include "params.h" 30 #include "params.h"
31 #include "hosthooks.h" 31 #include "hosthooks.h"
32 #include "hosthooks-def.h" 32 #include "hosthooks-def.h"
33 #include "plugin.h"
34 #include "vec.h"
33 35
34 #ifdef HAVE_SYS_RESOURCE_H 36 #ifdef HAVE_SYS_RESOURCE_H
35 # include <sys/resource.h> 37 # include <sys/resource.h>
36 #endif 38 #endif
37 39
38 #ifdef HAVE_MMAP_FILE 40 #ifdef HAVE_MMAP_FILE
39 # include <sys/mman.h> 41 # include <sys/mman.h>
40 # ifdef HAVE_MINCORE 42 # ifdef HAVE_MINCORE
41 /* This is on Solaris. */ 43 /* This is on Solaris. */
42 # include <sys/types.h> 44 # include <sys/types.h>
43 # endif 45 # endif
44 #endif 46 #endif
45 47
46 #ifndef MAP_FAILED 48 #ifndef MAP_FAILED
47 # define MAP_FAILED ((void *)-1) 49 # define MAP_FAILED ((void *)-1)
84 (*r->cb) (*slot); 86 (*r->cb) (*slot);
85 87
86 return 1; 88 return 1;
87 } 89 }
88 90
91
92 /* This extra vector of dynamically registered root_tab-s is used by
93 ggc_mark_roots and gives the ability to dynamically add new GGC root
94 tables, for instance from some plugins; this vector is on the heap
95 since it is used by GGC internally. */
96 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
97 DEF_VEC_P(const_ggc_root_tab_t);
98 DEF_VEC_ALLOC_P(const_ggc_root_tab_t, heap);
99 static VEC(const_ggc_root_tab_t, heap) *extra_root_vec;
100
101 /* Dynamically register a new GGC root table RT. This is useful for
102 plugins. */
103
104 void
105 ggc_register_root_tab (const struct ggc_root_tab* rt)
106 {
107 if (rt)
108 VEC_safe_push (const_ggc_root_tab_t, heap, extra_root_vec, rt);
109 }
110
111 /* This extra vector of dynamically registered cache_tab-s is used by
112 ggc_mark_roots and gives the ability to dynamically add new GGC cache
113 tables, for instance from some plugins; this vector is on the heap
114 since it is used by GGC internally. */
115 typedef const struct ggc_cache_tab *const_ggc_cache_tab_t;
116 DEF_VEC_P(const_ggc_cache_tab_t);
117 DEF_VEC_ALLOC_P(const_ggc_cache_tab_t, heap);
118 static VEC(const_ggc_cache_tab_t, heap) *extra_cache_vec;
119
120 /* Dynamically register a new GGC cache table CT. This is useful for
121 plugins. */
122
123 void
124 ggc_register_cache_tab (const struct ggc_cache_tab* ct)
125 {
126 if (ct)
127 VEC_safe_push (const_ggc_cache_tab_t, heap, extra_cache_vec, ct);
128 }
129
130 /* Scan a hash table that has objects which are to be deleted if they are not
131 already marked. */
132
133 static void
134 ggc_scan_cache_tab (const_ggc_cache_tab_t ctp)
135 {
136 const struct ggc_cache_tab *cti;
137
138 for (cti = ctp; cti->base != NULL; cti++)
139 if (*cti->base)
140 {
141 ggc_set_mark (*cti->base);
142 htab_traverse_noresize (*cti->base, ggc_htab_delete,
143 CONST_CAST (void *, (const void *)cti));
144 ggc_set_mark ((*cti->base)->entries);
145 }
146 }
147
89 /* Iterate through all registered roots and mark each element. */ 148 /* Iterate through all registered roots and mark each element. */
90 149
91 void 150 void
92 ggc_mark_roots (void) 151 ggc_mark_roots (void)
93 { 152 {
94 const struct ggc_root_tab *const *rt; 153 const struct ggc_root_tab *const *rt;
95 const struct ggc_root_tab *rti; 154 const struct ggc_root_tab *rti;
155 const_ggc_root_tab_t rtp;
96 const struct ggc_cache_tab *const *ct; 156 const struct ggc_cache_tab *const *ct;
97 const struct ggc_cache_tab *cti; 157 const_ggc_cache_tab_t ctp;
98 size_t i; 158 size_t i;
99 159
100 for (rt = gt_ggc_deletable_rtab; *rt; rt++) 160 for (rt = gt_ggc_deletable_rtab; *rt; rt++)
101 for (rti = *rt; rti->base != NULL; rti++) 161 for (rti = *rt; rti->base != NULL; rti++)
102 memset (rti->base, 0, rti->stride); 162 memset (rti->base, 0, rti->stride);
103 163
104 for (rt = gt_ggc_rtab; *rt; rt++) 164 for (rt = gt_ggc_rtab; *rt; rt++)
105 for (rti = *rt; rti->base != NULL; rti++) 165 for (rti = *rt; rti->base != NULL; rti++)
106 for (i = 0; i < rti->nelt; i++) 166 for (i = 0; i < rti->nelt; i++)
107 (*rti->cb)(*(void **)((char *)rti->base + rti->stride * i)); 167 (*rti->cb) (*(void **)((char *)rti->base + rti->stride * i));
168
169 for (i = 0; VEC_iterate (const_ggc_root_tab_t, extra_root_vec, i, rtp); i++)
170 {
171 for (rti = rtp; rti->base != NULL; rti++)
172 for (i = 0; i < rti->nelt; i++)
173 (*rti->cb) (*(void **) ((char *)rti->base + rti->stride * i));
174 }
108 175
109 if (ggc_protect_identifiers) 176 if (ggc_protect_identifiers)
110 ggc_mark_stringpool (); 177 ggc_mark_stringpool ();
111 178
112 /* Now scan all hash tables that have objects which are to be deleted if 179 /* Now scan all hash tables that have objects which are to be deleted if
113 they are not already marked. */ 180 they are not already marked. */
114 for (ct = gt_ggc_cache_rtab; *ct; ct++) 181 for (ct = gt_ggc_cache_rtab; *ct; ct++)
115 for (cti = *ct; cti->base != NULL; cti++) 182 ggc_scan_cache_tab (*ct);
116 if (*cti->base) 183
117 { 184 for (i = 0; VEC_iterate (const_ggc_cache_tab_t, extra_cache_vec, i, ctp); i++)
118 ggc_set_mark (*cti->base); 185 ggc_scan_cache_tab (ctp);
119 htab_traverse_noresize (*cti->base, ggc_htab_delete,
120 CONST_CAST (void *, (const void *)cti));
121 ggc_set_mark ((*cti->base)->entries);
122 }
123 186
124 if (! ggc_protect_identifiers) 187 if (! ggc_protect_identifiers)
125 ggc_purge_stringpool (); 188 ggc_purge_stringpool ();
189
190 /* Some plugins may call ggc_set_mark from here. */
191 invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
126 } 192 }
127 193
128 /* Allocate a block of memory, then clear it. */ 194 /* Allocate a block of memory, then clear it. */
129 void * 195 void *
130 ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL) 196 ggc_alloc_cleared_stat (size_t size MEM_STAT_DECL)
455 521
456 mmi.size = ggc_pch_total_size (state.d); 522 mmi.size = ggc_pch_total_size (state.d);
457 523
458 /* Try to arrange things so that no relocation is necessary, but 524 /* Try to arrange things so that no relocation is necessary, but
459 don't try very hard. On most platforms, this will always work, 525 don't try very hard. On most platforms, this will always work,
460 and on the rest it's a lot of work to do better. 526 and on the rest it's a lot of work to do better.
461 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and 527 (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
462 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */ 528 HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
463 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f)); 529 mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
464 530
465 ggc_pch_this_base (state.d, mmi.preferred_base); 531 ggc_pch_this_base (state.d, mmi.preferred_base);
466 532
467 state.ptrs = XNEWVEC (struct ptr_data *, state.count); 533 state.ptrs = XNEWVEC (struct ptr_data *, state.count);
468 state.ptrs_i = 0; 534 state.ptrs_i = 0;
469 htab_traverse (saving_htab, call_alloc, &state); 535 htab_traverse (saving_htab, call_alloc, &state);
642 708
643 return ret; 709 return ret;
644 } 710 }
645 711
646 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present. 712 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
647 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at 713 Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
648 mapping the data at BASE, -1 if we couldn't. 714 mapping the data at BASE, -1 if we couldn't.
649 715
650 This version assumes that the kernel honors the START operand of mmap 716 This version assumes that the kernel honors the START operand of mmap
651 even without MAP_FIXED if START through START+SIZE are not currently 717 even without MAP_FIXED if START through START+SIZE are not currently
652 mapped with something. */ 718 mapped with something. */
734 /* The heuristic is RAM/8, with a lower bound of 4M and an upper 800 /* The heuristic is RAM/8, with a lower bound of 4M and an upper
735 bound of 128M (when RAM >= 1GB). */ 801 bound of 128M (when RAM >= 1GB). */
736 phys_kbytes /= 8; 802 phys_kbytes /= 8;
737 803
738 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS) 804 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
739 /* Try not to overrun the RSS limit while doing garbage collection. 805 /* Try not to overrun the RSS limit while doing garbage collection.
740 The RSS limit is only advisory, so no margin is subtracted. */ 806 The RSS limit is only advisory, so no margin is subtracted. */
741 { 807 {
742 struct rlimit rlim; 808 struct rlimit rlim;
743 if (getrlimit (RLIMIT_RSS, &rlim) == 0 809 if (getrlimit (RLIMIT_RSS, &rlim) == 0
744 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY) 810 && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
843 loc.line = line; 909 loc.line = line;
844 loc.function = function; 910 loc.function = function;
845 if (!loc_hash) 911 if (!loc_hash)
846 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); 912 loc_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL);
847 913
848 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, 1); 914 slot = (struct loc_descriptor **) htab_find_slot (loc_hash, &loc, INSERT);
849 if (*slot) 915 if (*slot)
850 return *slot; 916 return *slot;
851 *slot = XCNEW (struct loc_descriptor); 917 *slot = XCNEW (struct loc_descriptor);
852 (*slot)->file = name; 918 (*slot)->file = name;
853 (*slot)->line = line; 919 (*slot)->line = line;