Mercurial > hg > CbC > CbC_gcc
annotate gcc/unwind-dw2-fde.c @ 86:12b3180c7d07
modify calls.c.
author | Nobuyasu Oshiro <dimolto@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Thu, 15 Dec 2011 02:37:13 +0900 |
parents | b7f97abdc517 |
children |
rev | line source |
---|---|
0 | 1 /* Subroutines needed for unwinding stack frames for exception handling. */ |
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008, | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
3 2009, 2010 Free Software Foundation, Inc. |
0 | 4 Contributed by Jason Merrill <jason@cygnus.com>. |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify it under | |
9 the terms of the GNU General Public License as published by the Free | |
10 Software Foundation; either version 3, or (at your option) any later | |
11 version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 for more details. | |
17 | |
18 Under Section 7 of GPL version 3, you are granted additional | |
19 permissions described in the GCC Runtime Library Exception, version | |
20 3.1, as published by the Free Software Foundation. | |
21 | |
22 You should have received a copy of the GNU General Public License and | |
23 a copy of the GCC Runtime Library Exception along with this program; | |
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
25 <http://www.gnu.org/licenses/>. */ | |
26 | |
27 #ifndef _Unwind_Find_FDE | |
28 #include "tconfig.h" | |
29 #include "tsystem.h" | |
30 #include "coretypes.h" | |
31 #include "tm.h" | |
32 #include "dwarf2.h" | |
33 #include "unwind.h" | |
34 #define NO_BASE_OF_ENCODED_VALUE | |
35 #include "unwind-pe.h" | |
36 #include "unwind-dw2-fde.h" | |
37 #include "gthr.h" | |
38 #endif | |
39 | |
40 /* The unseen_objects list contains objects that have been registered | |
41 but not yet categorized in any way. The seen_objects list has had | |
42 its pc_begin and count fields initialized at minimum, and is sorted | |
43 by decreasing value of pc_begin. */ | |
44 static struct object *unseen_objects; | |
45 static struct object *seen_objects; | |
46 | |
47 #ifdef __GTHREAD_MUTEX_INIT | |
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT; | |
49 #else | |
50 static __gthread_mutex_t object_mutex; | |
51 #endif | |
52 | |
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION | |
54 static void | |
55 init_object_mutex (void) | |
56 { | |
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex); | |
58 } | |
59 | |
60 static void | |
61 init_object_mutex_once (void) | |
62 { | |
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT; | |
64 __gthread_once (&once, init_object_mutex); | |
65 } | |
66 #else | |
67 #define init_object_mutex_once() | |
68 #endif | |
69 | |
70 /* Called from crtbegin.o to register the unwind info for an object. */ | |
71 | |
72 void | |
73 __register_frame_info_bases (const void *begin, struct object *ob, | |
74 void *tbase, void *dbase) | |
75 { | |
76 /* If .eh_frame is empty, don't register at all. */ | |
77 if ((const uword *) begin == 0 || *(const uword *) begin == 0) | |
78 return; | |
79 | |
80 ob->pc_begin = (void *)-1; | |
81 ob->tbase = tbase; | |
82 ob->dbase = dbase; | |
83 ob->u.single = begin; | |
84 ob->s.i = 0; | |
85 ob->s.b.encoding = DW_EH_PE_omit; | |
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION | |
87 ob->fde_end = NULL; | |
88 #endif | |
89 | |
90 init_object_mutex_once (); | |
91 __gthread_mutex_lock (&object_mutex); | |
92 | |
93 ob->next = unseen_objects; | |
94 unseen_objects = ob; | |
95 | |
96 __gthread_mutex_unlock (&object_mutex); | |
97 } | |
98 | |
99 void | |
100 __register_frame_info (const void *begin, struct object *ob) | |
101 { | |
102 __register_frame_info_bases (begin, ob, 0, 0); | |
103 } | |
104 | |
105 void | |
106 __register_frame (void *begin) | |
107 { | |
108 struct object *ob; | |
109 | |
110 /* If .eh_frame is empty, don't register at all. */ | |
111 if (*(uword *) begin == 0) | |
112 return; | |
113 | |
114 ob = malloc (sizeof (struct object)); | |
115 __register_frame_info (begin, ob); | |
116 } | |
117 | |
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries | |
119 for different translation units. Called from the file generated by | |
120 collect2. */ | |
121 | |
122 void | |
123 __register_frame_info_table_bases (void *begin, struct object *ob, | |
124 void *tbase, void *dbase) | |
125 { | |
126 ob->pc_begin = (void *)-1; | |
127 ob->tbase = tbase; | |
128 ob->dbase = dbase; | |
129 ob->u.array = begin; | |
130 ob->s.i = 0; | |
131 ob->s.b.from_array = 1; | |
132 ob->s.b.encoding = DW_EH_PE_omit; | |
133 | |
134 init_object_mutex_once (); | |
135 __gthread_mutex_lock (&object_mutex); | |
136 | |
137 ob->next = unseen_objects; | |
138 unseen_objects = ob; | |
139 | |
140 __gthread_mutex_unlock (&object_mutex); | |
141 } | |
142 | |
143 void | |
144 __register_frame_info_table (void *begin, struct object *ob) | |
145 { | |
146 __register_frame_info_table_bases (begin, ob, 0, 0); | |
147 } | |
148 | |
149 void | |
150 __register_frame_table (void *begin) | |
151 { | |
152 struct object *ob = malloc (sizeof (struct object)); | |
153 __register_frame_info_table (begin, ob); | |
154 } | |
155 | |
156 /* Called from crtbegin.o to deregister the unwind info for an object. */ | |
157 /* ??? Glibc has for a while now exported __register_frame_info and | |
158 __deregister_frame_info. If we call __register_frame_info_bases | |
159 from crtbegin (wherein it is declared weak), and this object does | |
160 not get pulled from libgcc.a for other reasons, then the | |
161 invocation of __deregister_frame_info will be resolved from glibc. | |
162 Since the registration did not happen there, we'll die. | |
163 | |
164 Therefore, declare a new deregistration entry point that does the | |
165 exact same thing, but will resolve to the same library as | |
166 implements __register_frame_info_bases. */ | |
167 | |
168 void * | |
169 __deregister_frame_info_bases (const void *begin) | |
170 { | |
171 struct object **p; | |
172 struct object *ob = 0; | |
173 | |
174 /* If .eh_frame is empty, we haven't registered. */ | |
175 if ((const uword *) begin == 0 || *(const uword *) begin == 0) | |
176 return ob; | |
177 | |
178 init_object_mutex_once (); | |
179 __gthread_mutex_lock (&object_mutex); | |
180 | |
181 for (p = &unseen_objects; *p ; p = &(*p)->next) | |
182 if ((*p)->u.single == begin) | |
183 { | |
184 ob = *p; | |
185 *p = ob->next; | |
186 goto out; | |
187 } | |
188 | |
189 for (p = &seen_objects; *p ; p = &(*p)->next) | |
190 if ((*p)->s.b.sorted) | |
191 { | |
192 if ((*p)->u.sort->orig_data == begin) | |
193 { | |
194 ob = *p; | |
195 *p = ob->next; | |
196 free (ob->u.sort); | |
197 goto out; | |
198 } | |
199 } | |
200 else | |
201 { | |
202 if ((*p)->u.single == begin) | |
203 { | |
204 ob = *p; | |
205 *p = ob->next; | |
206 goto out; | |
207 } | |
208 } | |
209 | |
210 out: | |
211 __gthread_mutex_unlock (&object_mutex); | |
212 gcc_assert (ob); | |
213 return (void *) ob; | |
214 } | |
215 | |
216 void * | |
217 __deregister_frame_info (const void *begin) | |
218 { | |
219 return __deregister_frame_info_bases (begin); | |
220 } | |
221 | |
222 void | |
223 __deregister_frame (void *begin) | |
224 { | |
225 /* If .eh_frame is empty, we haven't registered. */ | |
226 if (*(uword *) begin != 0) | |
227 free (__deregister_frame_info (begin)); | |
228 } | |
229 | |
230 | |
231 /* Like base_of_encoded_value, but take the base from a struct object | |
232 instead of an _Unwind_Context. */ | |
233 | |
234 static _Unwind_Ptr | |
235 base_from_object (unsigned char encoding, struct object *ob) | |
236 { | |
237 if (encoding == DW_EH_PE_omit) | |
238 return 0; | |
239 | |
240 switch (encoding & 0x70) | |
241 { | |
242 case DW_EH_PE_absptr: | |
243 case DW_EH_PE_pcrel: | |
244 case DW_EH_PE_aligned: | |
245 return 0; | |
246 | |
247 case DW_EH_PE_textrel: | |
248 return (_Unwind_Ptr) ob->tbase; | |
249 case DW_EH_PE_datarel: | |
250 return (_Unwind_Ptr) ob->dbase; | |
251 default: | |
252 gcc_unreachable (); | |
253 } | |
254 } | |
255 | |
256 /* Return the FDE pointer encoding from the CIE. */ | |
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */ | |
258 | |
259 static int | |
260 get_cie_encoding (const struct dwarf_cie *cie) | |
261 { | |
262 const unsigned char *aug, *p; | |
263 _Unwind_Ptr dummy; | |
264 _uleb128_t utmp; | |
265 _sleb128_t stmp; | |
266 | |
267 aug = cie->augmentation; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
268 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
269 if (__builtin_expect (cie->version >= 4, 0)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
270 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
271 if (p[0] != sizeof (void *) || p[1] != 0) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
272 return DW_EH_PE_omit; /* We are not prepared to handle unexpected |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
273 address sizes or segment selectors. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
274 p += 2; /* Skip address size and segment size. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
275 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
276 |
0 | 277 if (aug[0] != 'z') |
278 return DW_EH_PE_absptr; | |
279 | |
280 p = read_uleb128 (p, &utmp); /* Skip code alignment. */ | |
281 p = read_sleb128 (p, &stmp); /* Skip data alignment. */ | |
282 if (cie->version == 1) /* Skip return address column. */ | |
283 p++; | |
284 else | |
285 p = read_uleb128 (p, &utmp); | |
286 | |
287 aug++; /* Skip 'z' */ | |
288 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */ | |
289 while (1) | |
290 { | |
291 /* This is what we're looking for. */ | |
292 if (*aug == 'R') | |
293 return *p; | |
294 /* Personality encoding and pointer. */ | |
295 else if (*aug == 'P') | |
296 { | |
297 /* ??? Avoid dereferencing indirect pointers, since we're | |
298 faking the base address. Gotta keep DW_EH_PE_aligned | |
299 intact, however. */ | |
300 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy); | |
301 } | |
302 /* LSDA encoding. */ | |
303 else if (*aug == 'L') | |
304 p++; | |
305 /* Otherwise end of string, or unknown augmentation. */ | |
306 else | |
307 return DW_EH_PE_absptr; | |
308 aug++; | |
309 } | |
310 } | |
311 | |
312 static inline int | |
313 get_fde_encoding (const struct dwarf_fde *f) | |
314 { | |
315 return get_cie_encoding (get_cie (f)); | |
316 } | |
317 | |
318 | |
319 /* Sorting an array of FDEs by address. | |
320 (Ideally we would have the linker sort the FDEs so we don't have to do | |
321 it at run time. But the linkers are not yet prepared for this.) */ | |
322 | |
323 /* Comparison routines. Three variants of increasing complexity. */ | |
324 | |
325 static int | |
326 fde_unencoded_compare (struct object *ob __attribute__((unused)), | |
327 const fde *x, const fde *y) | |
328 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
329 _Unwind_Ptr x_ptr, y_ptr; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
330 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
331 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr)); |
0 | 332 |
333 if (x_ptr > y_ptr) | |
334 return 1; | |
335 if (x_ptr < y_ptr) | |
336 return -1; | |
337 return 0; | |
338 } | |
339 | |
340 static int | |
341 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y) | |
342 { | |
343 _Unwind_Ptr base, x_ptr, y_ptr; | |
344 | |
345 base = base_from_object (ob->s.b.encoding, ob); | |
346 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr); | |
347 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr); | |
348 | |
349 if (x_ptr > y_ptr) | |
350 return 1; | |
351 if (x_ptr < y_ptr) | |
352 return -1; | |
353 return 0; | |
354 } | |
355 | |
356 static int | |
357 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y) | |
358 { | |
359 int x_encoding, y_encoding; | |
360 _Unwind_Ptr x_ptr, y_ptr; | |
361 | |
362 x_encoding = get_fde_encoding (x); | |
363 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob), | |
364 x->pc_begin, &x_ptr); | |
365 | |
366 y_encoding = get_fde_encoding (y); | |
367 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob), | |
368 y->pc_begin, &y_ptr); | |
369 | |
370 if (x_ptr > y_ptr) | |
371 return 1; | |
372 if (x_ptr < y_ptr) | |
373 return -1; | |
374 return 0; | |
375 } | |
376 | |
377 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *); | |
378 | |
379 | |
380 /* This is a special mix of insertion sort and heap sort, optimized for | |
381 the data sets that actually occur. They look like | |
382 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130. | |
383 I.e. a linearly increasing sequence (coming from functions in the text | |
384 section), with additionally a few unordered elements (coming from functions | |
385 in gnu_linkonce sections) whose values are higher than the values in the | |
386 surrounding linear sequence (but not necessarily higher than the values | |
387 at the end of the linear sequence!). | |
388 The worst-case total run time is O(N) + O(n log (n)), where N is the | |
389 total number of FDEs and n is the number of erratic ones. */ | |
390 | |
391 struct fde_accumulator | |
392 { | |
393 struct fde_vector *linear; | |
394 struct fde_vector *erratic; | |
395 }; | |
396 | |
397 static inline int | |
398 start_fde_sort (struct fde_accumulator *accu, size_t count) | |
399 { | |
400 size_t size; | |
401 if (! count) | |
402 return 0; | |
403 | |
404 size = sizeof (struct fde_vector) + sizeof (const fde *) * count; | |
405 if ((accu->linear = malloc (size))) | |
406 { | |
407 accu->linear->count = 0; | |
408 if ((accu->erratic = malloc (size))) | |
409 accu->erratic->count = 0; | |
410 return 1; | |
411 } | |
412 else | |
413 return 0; | |
414 } | |
415 | |
416 static inline void | |
417 fde_insert (struct fde_accumulator *accu, const fde *this_fde) | |
418 { | |
419 if (accu->linear) | |
420 accu->linear->array[accu->linear->count++] = this_fde; | |
421 } | |
422 | |
423 /* Split LINEAR into a linear sequence with low values and an erratic | |
424 sequence with high values, put the linear one (of longest possible | |
425 length) into LINEAR and the erratic one into ERRATIC. This is O(N). | |
426 | |
427 Because the longest linear sequence we are trying to locate within the | |
428 incoming LINEAR array can be interspersed with (high valued) erratic | |
429 entries. We construct a chain indicating the sequenced entries. | |
430 To avoid having to allocate this chain, we overlay it onto the space of | |
431 the ERRATIC array during construction. A final pass iterates over the | |
432 chain to determine what should be placed in the ERRATIC array, and | |
433 what is the linear sequence. This overlay is safe from aliasing. */ | |
434 | |
435 static inline void | |
436 fde_split (struct object *ob, fde_compare_t fde_compare, | |
437 struct fde_vector *linear, struct fde_vector *erratic) | |
438 { | |
439 static const fde *marker; | |
440 size_t count = linear->count; | |
441 const fde *const *chain_end = ▮ | |
442 size_t i, j, k; | |
443 | |
444 /* This should optimize out, but it is wise to make sure this assumption | |
445 is correct. Should these have different sizes, we cannot cast between | |
446 them and the overlaying onto ERRATIC will not work. */ | |
447 gcc_assert (sizeof (const fde *) == sizeof (const fde **)); | |
448 | |
449 for (i = 0; i < count; i++) | |
450 { | |
451 const fde *const *probe; | |
452 | |
453 for (probe = chain_end; | |
454 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0; | |
455 probe = chain_end) | |
456 { | |
457 chain_end = (const fde *const*) erratic->array[probe - linear->array]; | |
458 erratic->array[probe - linear->array] = NULL; | |
459 } | |
460 erratic->array[i] = (const fde *) chain_end; | |
461 chain_end = &linear->array[i]; | |
462 } | |
463 | |
464 /* Each entry in LINEAR which is part of the linear sequence we have | |
465 discovered will correspond to a non-NULL entry in the chain we built in | |
466 the ERRATIC array. */ | |
467 for (i = j = k = 0; i < count; i++) | |
468 if (erratic->array[i]) | |
469 linear->array[j++] = linear->array[i]; | |
470 else | |
471 erratic->array[k++] = linear->array[i]; | |
472 linear->count = j; | |
473 erratic->count = k; | |
474 } | |
475 | |
476 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0) | |
477 | |
478 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly | |
479 for the first (root) node; push it down to its rightful place. */ | |
480 | |
481 static void | |
482 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a, | |
483 int lo, int hi) | |
484 { | |
485 int i, j; | |
486 | |
487 for (i = lo, j = 2*i+1; | |
488 j < hi; | |
489 j = 2*i+1) | |
490 { | |
491 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0) | |
492 ++j; | |
493 | |
494 if (fde_compare (ob, a[i], a[j]) < 0) | |
495 { | |
496 SWAP (a[i], a[j]); | |
497 i = j; | |
498 } | |
499 else | |
500 break; | |
501 } | |
502 } | |
503 | |
504 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must | |
505 use a name that does not conflict. */ | |
506 | |
507 static void | |
508 frame_heapsort (struct object *ob, fde_compare_t fde_compare, | |
509 struct fde_vector *erratic) | |
510 { | |
511 /* For a description of this algorithm, see: | |
512 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed., | |
513 p. 60-61. */ | |
514 const fde ** a = erratic->array; | |
515 /* A portion of the array is called a "heap" if for all i>=0: | |
516 If i and 2i+1 are valid indices, then a[i] >= a[2i+1]. | |
517 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */ | |
518 size_t n = erratic->count; | |
519 int m; | |
520 | |
521 /* Expand our heap incrementally from the end of the array, heapifying | |
522 each resulting semi-heap as we go. After each step, a[m] is the top | |
523 of a heap. */ | |
524 for (m = n/2-1; m >= 0; --m) | |
525 frame_downheap (ob, fde_compare, a, m, n); | |
526 | |
527 /* Shrink our heap incrementally from the end of the array, first | |
528 swapping out the largest element a[0] and then re-heapifying the | |
529 resulting semi-heap. After each step, a[0..m) is a heap. */ | |
530 for (m = n-1; m >= 1; --m) | |
531 { | |
532 SWAP (a[0], a[m]); | |
533 frame_downheap (ob, fde_compare, a, 0, m); | |
534 } | |
535 #undef SWAP | |
536 } | |
537 | |
538 /* Merge V1 and V2, both sorted, and put the result into V1. */ | |
539 static inline void | |
540 fde_merge (struct object *ob, fde_compare_t fde_compare, | |
541 struct fde_vector *v1, struct fde_vector *v2) | |
542 { | |
543 size_t i1, i2; | |
544 const fde * fde2; | |
545 | |
546 i2 = v2->count; | |
547 if (i2 > 0) | |
548 { | |
549 i1 = v1->count; | |
550 do | |
551 { | |
552 i2--; | |
553 fde2 = v2->array[i2]; | |
554 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0) | |
555 { | |
556 v1->array[i1+i2] = v1->array[i1-1]; | |
557 i1--; | |
558 } | |
559 v1->array[i1+i2] = fde2; | |
560 } | |
561 while (i2 > 0); | |
562 v1->count += v2->count; | |
563 } | |
564 } | |
565 | |
566 static inline void | |
567 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count) | |
568 { | |
569 fde_compare_t fde_compare; | |
570 | |
571 gcc_assert (!accu->linear || accu->linear->count == count); | |
572 | |
573 if (ob->s.b.mixed_encoding) | |
574 fde_compare = fde_mixed_encoding_compare; | |
575 else if (ob->s.b.encoding == DW_EH_PE_absptr) | |
576 fde_compare = fde_unencoded_compare; | |
577 else | |
578 fde_compare = fde_single_encoding_compare; | |
579 | |
580 if (accu->erratic) | |
581 { | |
582 fde_split (ob, fde_compare, accu->linear, accu->erratic); | |
583 gcc_assert (accu->linear->count + accu->erratic->count == count); | |
584 frame_heapsort (ob, fde_compare, accu->erratic); | |
585 fde_merge (ob, fde_compare, accu->linear, accu->erratic); | |
586 free (accu->erratic); | |
587 } | |
588 else | |
589 { | |
590 /* We've not managed to malloc an erratic array, | |
591 so heap sort in the linear one. */ | |
592 frame_heapsort (ob, fde_compare, accu->linear); | |
593 } | |
594 } | |
595 | |
596 | |
597 /* Update encoding, mixed_encoding, and pc_begin for OB for the | |
598 fde array beginning at THIS_FDE. Return the number of fdes | |
599 encountered along the way. */ | |
600 | |
601 static size_t | |
602 classify_object_over_fdes (struct object *ob, const fde *this_fde) | |
603 { | |
604 const struct dwarf_cie *last_cie = 0; | |
605 size_t count = 0; | |
606 int encoding = DW_EH_PE_absptr; | |
607 _Unwind_Ptr base = 0; | |
608 | |
609 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde)) | |
610 { | |
611 const struct dwarf_cie *this_cie; | |
612 _Unwind_Ptr mask, pc_begin; | |
613 | |
614 /* Skip CIEs. */ | |
615 if (this_fde->CIE_delta == 0) | |
616 continue; | |
617 | |
618 /* Determine the encoding for this FDE. Note mixed encoded | |
619 objects for later. */ | |
620 this_cie = get_cie (this_fde); | |
621 if (this_cie != last_cie) | |
622 { | |
623 last_cie = this_cie; | |
624 encoding = get_cie_encoding (this_cie); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
625 if (encoding == DW_EH_PE_omit) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
626 return -1; |
0 | 627 base = base_from_object (encoding, ob); |
628 if (ob->s.b.encoding == DW_EH_PE_omit) | |
629 ob->s.b.encoding = encoding; | |
630 else if (ob->s.b.encoding != encoding) | |
631 ob->s.b.mixed_encoding = 1; | |
632 } | |
633 | |
634 read_encoded_value_with_base (encoding, base, this_fde->pc_begin, | |
635 &pc_begin); | |
636 | |
637 /* Take care to ignore link-once functions that were removed. | |
638 In these cases, the function address will be NULL, but if | |
639 the encoding is smaller than a pointer a true NULL may not | |
640 be representable. Assume 0 in the representable bits is NULL. */ | |
641 mask = size_of_encoded_value (encoding); | |
642 if (mask < sizeof (void *)) | |
36 | 643 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1; |
0 | 644 else |
645 mask = -1; | |
646 | |
647 if ((pc_begin & mask) == 0) | |
648 continue; | |
649 | |
650 count += 1; | |
651 if ((void *) pc_begin < ob->pc_begin) | |
652 ob->pc_begin = (void *) pc_begin; | |
653 } | |
654 | |
655 return count; | |
656 } | |
657 | |
658 static void | |
659 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde) | |
660 { | |
661 const struct dwarf_cie *last_cie = 0; | |
662 int encoding = ob->s.b.encoding; | |
663 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob); | |
664 | |
665 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde)) | |
666 { | |
667 const struct dwarf_cie *this_cie; | |
668 | |
669 /* Skip CIEs. */ | |
670 if (this_fde->CIE_delta == 0) | |
671 continue; | |
672 | |
673 if (ob->s.b.mixed_encoding) | |
674 { | |
675 /* Determine the encoding for this FDE. Note mixed encoded | |
676 objects for later. */ | |
677 this_cie = get_cie (this_fde); | |
678 if (this_cie != last_cie) | |
679 { | |
680 last_cie = this_cie; | |
681 encoding = get_cie_encoding (this_cie); | |
682 base = base_from_object (encoding, ob); | |
683 } | |
684 } | |
685 | |
686 if (encoding == DW_EH_PE_absptr) | |
687 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
688 _Unwind_Ptr ptr; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
689 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
690 if (ptr == 0) |
0 | 691 continue; |
692 } | |
693 else | |
694 { | |
695 _Unwind_Ptr pc_begin, mask; | |
696 | |
697 read_encoded_value_with_base (encoding, base, this_fde->pc_begin, | |
698 &pc_begin); | |
699 | |
700 /* Take care to ignore link-once functions that were removed. | |
701 In these cases, the function address will be NULL, but if | |
702 the encoding is smaller than a pointer a true NULL may not | |
703 be representable. Assume 0 in the representable bits is NULL. */ | |
704 mask = size_of_encoded_value (encoding); | |
705 if (mask < sizeof (void *)) | |
36 | 706 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1; |
0 | 707 else |
708 mask = -1; | |
709 | |
710 if ((pc_begin & mask) == 0) | |
711 continue; | |
712 } | |
713 | |
714 fde_insert (accu, this_fde); | |
715 } | |
716 } | |
717 | |
718 /* Set up a sorted array of pointers to FDEs for a loaded object. We | |
719 count up the entries before allocating the array because it's likely to | |
720 be faster. We can be called multiple times, should we have failed to | |
721 allocate a sorted fde array on a previous occasion. */ | |
722 | |
723 static inline void | |
724 init_object (struct object* ob) | |
725 { | |
726 struct fde_accumulator accu; | |
727 size_t count; | |
728 | |
729 count = ob->s.b.count; | |
730 if (count == 0) | |
731 { | |
732 if (ob->s.b.from_array) | |
733 { | |
734 fde **p = ob->u.array; | |
735 for (count = 0; *p; ++p) | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
736 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
737 size_t cur_count = classify_object_over_fdes (ob, *p); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
738 if (cur_count == (size_t) -1) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
739 goto unhandled_fdes; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
740 count += cur_count; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
741 } |
0 | 742 } |
743 else | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
744 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
745 count = classify_object_over_fdes (ob, ob->u.single); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
746 if (count == (size_t) -1) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
747 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
748 static const fde terminator; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
749 unhandled_fdes: |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
750 ob->s.i = 0; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
751 ob->s.b.encoding = DW_EH_PE_omit; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
752 ob->u.single = &terminator; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
753 return; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
754 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
755 } |
0 | 756 |
757 /* The count field we have in the main struct object is somewhat | |
758 limited, but should suffice for virtually all cases. If the | |
759 counted value doesn't fit, re-write a zero. The worst that | |
760 happens is that we re-count next time -- admittedly non-trivial | |
761 in that this implies some 2M fdes, but at least we function. */ | |
762 ob->s.b.count = count; | |
763 if (ob->s.b.count != count) | |
764 ob->s.b.count = 0; | |
765 } | |
766 | |
767 if (!start_fde_sort (&accu, count)) | |
768 return; | |
769 | |
770 if (ob->s.b.from_array) | |
771 { | |
772 fde **p; | |
773 for (p = ob->u.array; *p; ++p) | |
774 add_fdes (ob, &accu, *p); | |
775 } | |
776 else | |
777 add_fdes (ob, &accu, ob->u.single); | |
778 | |
779 end_fde_sort (ob, &accu, count); | |
780 | |
781 /* Save the original fde pointer, since this is the key by which the | |
782 DSO will deregister the object. */ | |
783 accu.linear->orig_data = ob->u.single; | |
784 ob->u.sort = accu.linear; | |
785 | |
786 ob->s.b.sorted = 1; | |
787 } | |
788 | |
789 /* A linear search through a set of FDEs for the given PC. This is | |
790 used when there was insufficient memory to allocate and sort an | |
791 array. */ | |
792 | |
793 static const fde * | |
794 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc) | |
795 { | |
796 const struct dwarf_cie *last_cie = 0; | |
797 int encoding = ob->s.b.encoding; | |
798 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob); | |
799 | |
800 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde)) | |
801 { | |
802 const struct dwarf_cie *this_cie; | |
803 _Unwind_Ptr pc_begin, pc_range; | |
804 | |
805 /* Skip CIEs. */ | |
806 if (this_fde->CIE_delta == 0) | |
807 continue; | |
808 | |
809 if (ob->s.b.mixed_encoding) | |
810 { | |
811 /* Determine the encoding for this FDE. Note mixed encoded | |
812 objects for later. */ | |
813 this_cie = get_cie (this_fde); | |
814 if (this_cie != last_cie) | |
815 { | |
816 last_cie = this_cie; | |
817 encoding = get_cie_encoding (this_cie); | |
818 base = base_from_object (encoding, ob); | |
819 } | |
820 } | |
821 | |
822 if (encoding == DW_EH_PE_absptr) | |
823 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
824 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
825 pc_begin = pc_array[0]; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
826 pc_range = pc_array[1]; |
0 | 827 if (pc_begin == 0) |
828 continue; | |
829 } | |
830 else | |
831 { | |
832 _Unwind_Ptr mask; | |
833 const unsigned char *p; | |
834 | |
835 p = read_encoded_value_with_base (encoding, base, | |
836 this_fde->pc_begin, &pc_begin); | |
837 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); | |
838 | |
839 /* Take care to ignore link-once functions that were removed. | |
840 In these cases, the function address will be NULL, but if | |
841 the encoding is smaller than a pointer a true NULL may not | |
842 be representable. Assume 0 in the representable bits is NULL. */ | |
843 mask = size_of_encoded_value (encoding); | |
844 if (mask < sizeof (void *)) | |
36 | 845 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1; |
0 | 846 else |
847 mask = -1; | |
848 | |
849 if ((pc_begin & mask) == 0) | |
850 continue; | |
851 } | |
852 | |
853 if ((_Unwind_Ptr) pc - pc_begin < pc_range) | |
854 return this_fde; | |
855 } | |
856 | |
857 return NULL; | |
858 } | |
859 | |
860 /* Binary search for an FDE containing the given PC. Here are three | |
861 implementations of increasing complexity. */ | |
862 | |
863 static inline const fde * | |
864 binary_search_unencoded_fdes (struct object *ob, void *pc) | |
865 { | |
866 struct fde_vector *vec = ob->u.sort; | |
867 size_t lo, hi; | |
868 | |
869 for (lo = 0, hi = vec->count; lo < hi; ) | |
870 { | |
871 size_t i = (lo + hi) / 2; | |
872 const fde *const f = vec->array[i]; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
873 void *pc_begin; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
874 uaddr pc_range; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
875 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
876 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr)); |
0 | 877 |
878 if (pc < pc_begin) | |
879 hi = i; | |
880 else if (pc >= pc_begin + pc_range) | |
881 lo = i + 1; | |
882 else | |
883 return f; | |
884 } | |
885 | |
886 return NULL; | |
887 } | |
888 | |
889 static inline const fde * | |
890 binary_search_single_encoding_fdes (struct object *ob, void *pc) | |
891 { | |
892 struct fde_vector *vec = ob->u.sort; | |
893 int encoding = ob->s.b.encoding; | |
894 _Unwind_Ptr base = base_from_object (encoding, ob); | |
895 size_t lo, hi; | |
896 | |
897 for (lo = 0, hi = vec->count; lo < hi; ) | |
898 { | |
899 size_t i = (lo + hi) / 2; | |
900 const fde *f = vec->array[i]; | |
901 _Unwind_Ptr pc_begin, pc_range; | |
902 const unsigned char *p; | |
903 | |
904 p = read_encoded_value_with_base (encoding, base, f->pc_begin, | |
905 &pc_begin); | |
906 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); | |
907 | |
908 if ((_Unwind_Ptr) pc < pc_begin) | |
909 hi = i; | |
910 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range) | |
911 lo = i + 1; | |
912 else | |
913 return f; | |
914 } | |
915 | |
916 return NULL; | |
917 } | |
918 | |
919 static inline const fde * | |
920 binary_search_mixed_encoding_fdes (struct object *ob, void *pc) | |
921 { | |
922 struct fde_vector *vec = ob->u.sort; | |
923 size_t lo, hi; | |
924 | |
925 for (lo = 0, hi = vec->count; lo < hi; ) | |
926 { | |
927 size_t i = (lo + hi) / 2; | |
928 const fde *f = vec->array[i]; | |
929 _Unwind_Ptr pc_begin, pc_range; | |
930 const unsigned char *p; | |
931 int encoding; | |
932 | |
933 encoding = get_fde_encoding (f); | |
934 p = read_encoded_value_with_base (encoding, | |
935 base_from_object (encoding, ob), | |
936 f->pc_begin, &pc_begin); | |
937 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); | |
938 | |
939 if ((_Unwind_Ptr) pc < pc_begin) | |
940 hi = i; | |
941 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range) | |
942 lo = i + 1; | |
943 else | |
944 return f; | |
945 } | |
946 | |
947 return NULL; | |
948 } | |
949 | |
950 static const fde * | |
951 search_object (struct object* ob, void *pc) | |
952 { | |
953 /* If the data hasn't been sorted, try to do this now. We may have | |
954 more memory available than last time we tried. */ | |
955 if (! ob->s.b.sorted) | |
956 { | |
957 init_object (ob); | |
958 | |
959 /* Despite the above comment, the normal reason to get here is | |
960 that we've not processed this object before. A quick range | |
961 check is in order. */ | |
962 if (pc < ob->pc_begin) | |
963 return NULL; | |
964 } | |
965 | |
966 if (ob->s.b.sorted) | |
967 { | |
968 if (ob->s.b.mixed_encoding) | |
969 return binary_search_mixed_encoding_fdes (ob, pc); | |
970 else if (ob->s.b.encoding == DW_EH_PE_absptr) | |
971 return binary_search_unencoded_fdes (ob, pc); | |
972 else | |
973 return binary_search_single_encoding_fdes (ob, pc); | |
974 } | |
975 else | |
976 { | |
977 /* Long slow laborious linear search, cos we've no memory. */ | |
978 if (ob->s.b.from_array) | |
979 { | |
980 fde **p; | |
981 for (p = ob->u.array; *p ; p++) | |
982 { | |
983 const fde *f = linear_search_fdes (ob, *p, pc); | |
984 if (f) | |
985 return f; | |
986 } | |
987 return NULL; | |
988 } | |
989 else | |
990 return linear_search_fdes (ob, ob->u.single, pc); | |
991 } | |
992 } | |
993 | |
994 const fde * | |
995 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) | |
996 { | |
997 struct object *ob; | |
998 const fde *f = NULL; | |
999 | |
1000 init_object_mutex_once (); | |
1001 __gthread_mutex_lock (&object_mutex); | |
1002 | |
1003 /* Linear search through the classified objects, to find the one | |
1004 containing the pc. Note that pc_begin is sorted descending, and | |
1005 we expect objects to be non-overlapping. */ | |
1006 for (ob = seen_objects; ob; ob = ob->next) | |
1007 if (pc >= ob->pc_begin) | |
1008 { | |
1009 f = search_object (ob, pc); | |
1010 if (f) | |
1011 goto fini; | |
1012 break; | |
1013 } | |
1014 | |
1015 /* Classify and search the objects we've not yet processed. */ | |
1016 while ((ob = unseen_objects)) | |
1017 { | |
1018 struct object **p; | |
1019 | |
1020 unseen_objects = ob->next; | |
1021 f = search_object (ob, pc); | |
1022 | |
1023 /* Insert the object into the classified list. */ | |
1024 for (p = &seen_objects; *p ; p = &(*p)->next) | |
1025 if ((*p)->pc_begin < ob->pc_begin) | |
1026 break; | |
1027 ob->next = *p; | |
1028 *p = ob; | |
1029 | |
1030 if (f) | |
1031 goto fini; | |
1032 } | |
1033 | |
1034 fini: | |
1035 __gthread_mutex_unlock (&object_mutex); | |
1036 | |
1037 if (f) | |
1038 { | |
1039 int encoding; | |
1040 _Unwind_Ptr func; | |
1041 | |
1042 bases->tbase = ob->tbase; | |
1043 bases->dbase = ob->dbase; | |
1044 | |
1045 encoding = ob->s.b.encoding; | |
1046 if (ob->s.b.mixed_encoding) | |
1047 encoding = get_fde_encoding (f); | |
1048 read_encoded_value_with_base (encoding, base_from_object (encoding, ob), | |
1049 f->pc_begin, &func); | |
1050 bases->func = (void *) func; | |
1051 } | |
1052 | |
1053 return f; | |
1054 } |