Mercurial > hg > CbC > CbC_gcc
comparison gcc/config/arm/unwind-arm.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* ARM EABI compliant unwinding routines. | |
2 Copyright (C) 2004, 2005, 2009 Free Software Foundation, Inc. | |
3 Contributed by Paul Brook | |
4 | |
5 This file is free software; you can redistribute it and/or modify it | |
6 under the terms of the GNU General Public License as published by the | |
7 Free Software Foundation; either version 3, or (at your option) any | |
8 later version. | |
9 | |
10 This file is distributed in the hope that it will be useful, but | |
11 WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 General Public License for more details. | |
14 | |
15 Under Section 7 of GPL version 3, you are granted additional | |
16 permissions described in the GCC Runtime Library Exception, version | |
17 3.1, as published by the Free Software Foundation. | |
18 | |
19 You should have received a copy of the GNU General Public License and | |
20 a copy of the GCC Runtime Library Exception along with this program; | |
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
22 <http://www.gnu.org/licenses/>. */ | |
23 | |
24 #include "unwind.h" | |
25 | |
26 /* We add a prototype for abort here to avoid creating a dependency on | |
27 target headers. */ | |
28 extern void abort (void); | |
29 | |
30 /* Definitions for C++ runtime support routines. We make these weak | |
31 declarations to avoid pulling in libsupc++ unnecessarily. */ | |
32 typedef unsigned char bool; | |
33 | |
34 typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */ | |
35 | |
36 void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp); | |
37 bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp); | |
38 bool __attribute__((weak)) __cxa_type_match(_Unwind_Control_Block *ucbp, | |
39 const type_info *rttip, | |
40 bool is_reference, | |
41 void **matched_object); | |
42 | |
43 _Unwind_Ptr __attribute__((weak)) | |
44 __gnu_Unwind_Find_exidx (_Unwind_Ptr, int *); | |
45 | |
46 /* Misc constants. */ | |
47 #define R_IP 12 | |
48 #define R_SP 13 | |
49 #define R_LR 14 | |
50 #define R_PC 15 | |
51 | |
52 #define EXIDX_CANTUNWIND 1 | |
53 #define uint32_highbit (((_uw) 1) << 31) | |
54 | |
55 #define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1) | |
56 #define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2) | |
57 #define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3) | |
58 #define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4) | |
59 | |
60 struct core_regs | |
61 { | |
62 _uw r[16]; | |
63 }; | |
64 | |
65 /* We use normal integer types here to avoid the compiler generating | |
66 coprocessor instructions. */ | |
67 struct vfp_regs | |
68 { | |
69 _uw64 d[16]; | |
70 _uw pad; | |
71 }; | |
72 | |
73 struct vfpv3_regs | |
74 { | |
75 /* Always populated via VSTM, so no need for the "pad" field from | |
76 vfp_regs (which is used to store the format word for FSTMX). */ | |
77 _uw64 d[16]; | |
78 }; | |
79 | |
80 struct fpa_reg | |
81 { | |
82 _uw w[3]; | |
83 }; | |
84 | |
85 struct fpa_regs | |
86 { | |
87 struct fpa_reg f[8]; | |
88 }; | |
89 | |
90 struct wmmxd_regs | |
91 { | |
92 _uw64 wd[16]; | |
93 }; | |
94 | |
95 struct wmmxc_regs | |
96 { | |
97 _uw wc[4]; | |
98 }; | |
99 | |
100 /* Unwind descriptors. */ | |
101 | |
102 typedef struct | |
103 { | |
104 _uw16 length; | |
105 _uw16 offset; | |
106 } EHT16; | |
107 | |
108 typedef struct | |
109 { | |
110 _uw length; | |
111 _uw offset; | |
112 } EHT32; | |
113 | |
114 /* The ABI specifies that the unwind routines may only use core registers, | |
115 except when actually manipulating coprocessor state. This allows | |
116 us to write one implementation that works on all platforms by | |
117 demand-saving coprocessor registers. | |
118 | |
119 During unwinding we hold the coprocessor state in the actual hardware | |
120 registers and allocate demand-save areas for use during phase1 | |
121 unwinding. */ | |
122 | |
123 typedef struct | |
124 { | |
125 /* The first fields must be the same as a phase2_vrs. */ | |
126 _uw demand_save_flags; | |
127 struct core_regs core; | |
128 _uw prev_sp; /* Only valid during forced unwinding. */ | |
129 struct vfp_regs vfp; | |
130 struct vfpv3_regs vfp_regs_16_to_31; | |
131 struct fpa_regs fpa; | |
132 struct wmmxd_regs wmmxd; | |
133 struct wmmxc_regs wmmxc; | |
134 } phase1_vrs; | |
135 | |
136 #define DEMAND_SAVE_VFP 1 /* VFP state has been saved if not set */ | |
137 #define DEMAND_SAVE_VFP_D 2 /* VFP state is for FLDMD/FSTMD if set */ | |
138 #define DEMAND_SAVE_VFP_V3 4 /* VFPv3 state for regs 16 .. 31 has | |
139 been saved if not set */ | |
140 #define DEMAND_SAVE_WMMXD 8 /* iWMMXt data registers have been | |
141 saved if not set. */ | |
142 #define DEMAND_SAVE_WMMXC 16 /* iWMMXt control registers have been | |
143 saved if not set. */ | |
144 | |
145 /* This must match the structure created by the assembly wrappers. */ | |
146 typedef struct | |
147 { | |
148 _uw demand_save_flags; | |
149 struct core_regs core; | |
150 } phase2_vrs; | |
151 | |
152 | |
153 /* An exception index table entry. */ | |
154 | |
155 typedef struct __EIT_entry | |
156 { | |
157 _uw fnoffset; | |
158 _uw content; | |
159 } __EIT_entry; | |
160 | |
161 /* Assembly helper functions. */ | |
162 | |
163 /* Restore core register state. Never returns. */ | |
164 void __attribute__((noreturn)) restore_core_regs (struct core_regs *); | |
165 | |
166 | |
167 /* Coprocessor register state manipulation functions. */ | |
168 | |
169 /* Routines for FLDMX/FSTMX format... */ | |
170 void __gnu_Unwind_Save_VFP (struct vfp_regs * p); | |
171 void __gnu_Unwind_Restore_VFP (struct vfp_regs * p); | |
172 void __gnu_Unwind_Save_WMMXD (struct wmmxd_regs * p); | |
173 void __gnu_Unwind_Restore_WMMXD (struct wmmxd_regs * p); | |
174 void __gnu_Unwind_Save_WMMXC (struct wmmxc_regs * p); | |
175 void __gnu_Unwind_Restore_WMMXC (struct wmmxc_regs * p); | |
176 | |
177 /* ...and those for FLDMD/FSTMD format... */ | |
178 void __gnu_Unwind_Save_VFP_D (struct vfp_regs * p); | |
179 void __gnu_Unwind_Restore_VFP_D (struct vfp_regs * p); | |
180 | |
181 /* ...and those for VLDM/VSTM format, saving/restoring only registers | |
182 16 through 31. */ | |
183 void __gnu_Unwind_Save_VFP_D_16_to_31 (struct vfpv3_regs * p); | |
184 void __gnu_Unwind_Restore_VFP_D_16_to_31 (struct vfpv3_regs * p); | |
185 | |
186 /* Restore coprocessor state after phase1 unwinding. */ | |
187 static void | |
188 restore_non_core_regs (phase1_vrs * vrs) | |
189 { | |
190 if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0) | |
191 { | |
192 if (vrs->demand_save_flags & DEMAND_SAVE_VFP_D) | |
193 __gnu_Unwind_Restore_VFP_D (&vrs->vfp); | |
194 else | |
195 __gnu_Unwind_Restore_VFP (&vrs->vfp); | |
196 } | |
197 | |
198 if ((vrs->demand_save_flags & DEMAND_SAVE_VFP_V3) == 0) | |
199 __gnu_Unwind_Restore_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31); | |
200 | |
201 if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXD) == 0) | |
202 __gnu_Unwind_Restore_WMMXD (&vrs->wmmxd); | |
203 if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXC) == 0) | |
204 __gnu_Unwind_Restore_WMMXC (&vrs->wmmxc); | |
205 } | |
206 | |
207 /* A better way to do this would probably be to compare the absolute address | |
208 with a segment relative relocation of the same symbol. */ | |
209 | |
210 extern int __text_start; | |
211 extern int __data_start; | |
212 | |
213 /* The exception index table location. */ | |
214 extern __EIT_entry __exidx_start; | |
215 extern __EIT_entry __exidx_end; | |
216 | |
217 /* ABI defined personality routines. */ | |
218 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr0 (_Unwind_State, | |
219 _Unwind_Control_Block *, _Unwind_Context *);// __attribute__((weak)); | |
220 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr1 (_Unwind_State, | |
221 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak)); | |
222 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr2 (_Unwind_State, | |
223 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak)); | |
224 | |
225 /* ABI defined routine to store a virtual register to memory. */ | |
226 | |
227 _Unwind_VRS_Result _Unwind_VRS_Get (_Unwind_Context *context, | |
228 _Unwind_VRS_RegClass regclass, | |
229 _uw regno, | |
230 _Unwind_VRS_DataRepresentation representation, | |
231 void *valuep) | |
232 { | |
233 phase1_vrs *vrs = (phase1_vrs *) context; | |
234 | |
235 switch (regclass) | |
236 { | |
237 case _UVRSC_CORE: | |
238 if (representation != _UVRSD_UINT32 | |
239 || regno > 15) | |
240 return _UVRSR_FAILED; | |
241 *(_uw *) valuep = vrs->core.r[regno]; | |
242 return _UVRSR_OK; | |
243 | |
244 case _UVRSC_VFP: | |
245 case _UVRSC_FPA: | |
246 case _UVRSC_WMMXD: | |
247 case _UVRSC_WMMXC: | |
248 return _UVRSR_NOT_IMPLEMENTED; | |
249 | |
250 default: | |
251 return _UVRSR_FAILED; | |
252 } | |
253 } | |
254 | |
255 | |
256 /* ABI defined function to load a virtual register from memory. */ | |
257 | |
258 _Unwind_VRS_Result _Unwind_VRS_Set (_Unwind_Context *context, | |
259 _Unwind_VRS_RegClass regclass, | |
260 _uw regno, | |
261 _Unwind_VRS_DataRepresentation representation, | |
262 void *valuep) | |
263 { | |
264 phase1_vrs *vrs = (phase1_vrs *) context; | |
265 | |
266 switch (regclass) | |
267 { | |
268 case _UVRSC_CORE: | |
269 if (representation != _UVRSD_UINT32 | |
270 || regno > 15) | |
271 return _UVRSR_FAILED; | |
272 | |
273 vrs->core.r[regno] = *(_uw *) valuep; | |
274 return _UVRSR_OK; | |
275 | |
276 case _UVRSC_VFP: | |
277 case _UVRSC_FPA: | |
278 case _UVRSC_WMMXD: | |
279 case _UVRSC_WMMXC: | |
280 return _UVRSR_NOT_IMPLEMENTED; | |
281 | |
282 default: | |
283 return _UVRSR_FAILED; | |
284 } | |
285 } | |
286 | |
287 | |
288 /* ABI defined function to pop registers off the stack. */ | |
289 | |
290 _Unwind_VRS_Result _Unwind_VRS_Pop (_Unwind_Context *context, | |
291 _Unwind_VRS_RegClass regclass, | |
292 _uw discriminator, | |
293 _Unwind_VRS_DataRepresentation representation) | |
294 { | |
295 phase1_vrs *vrs = (phase1_vrs *) context; | |
296 | |
297 switch (regclass) | |
298 { | |
299 case _UVRSC_CORE: | |
300 { | |
301 _uw *ptr; | |
302 _uw mask; | |
303 int i; | |
304 | |
305 if (representation != _UVRSD_UINT32) | |
306 return _UVRSR_FAILED; | |
307 | |
308 mask = discriminator & 0xffff; | |
309 ptr = (_uw *) vrs->core.r[R_SP]; | |
310 /* Pop the requested registers. */ | |
311 for (i = 0; i < 16; i++) | |
312 { | |
313 if (mask & (1 << i)) | |
314 vrs->core.r[i] = *(ptr++); | |
315 } | |
316 /* Writeback the stack pointer value if it wasn't restored. */ | |
317 if ((mask & (1 << R_SP)) == 0) | |
318 vrs->core.r[R_SP] = (_uw) ptr; | |
319 } | |
320 return _UVRSR_OK; | |
321 | |
322 case _UVRSC_VFP: | |
323 { | |
324 _uw start = discriminator >> 16; | |
325 _uw count = discriminator & 0xffff; | |
326 struct vfp_regs tmp; | |
327 struct vfpv3_regs tmp_16_to_31; | |
328 int tmp_count; | |
329 _uw *sp; | |
330 _uw *dest; | |
331 int num_vfpv3_regs = 0; | |
332 | |
333 /* We use an approximation here by bounding _UVRSD_DOUBLE | |
334 register numbers at 32 always, since we can't detect if | |
335 VFPv3 isn't present (in such a case the upper limit is 16). */ | |
336 if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE) | |
337 || start + count > (representation == _UVRSD_VFPX ? 16 : 32) | |
338 || (representation == _UVRSD_VFPX && start >= 16)) | |
339 return _UVRSR_FAILED; | |
340 | |
341 /* Check if we're being asked to pop VFPv3-only registers | |
342 (numbers 16 through 31). */ | |
343 if (start >= 16) | |
344 num_vfpv3_regs = count; | |
345 else if (start + count > 16) | |
346 num_vfpv3_regs = start + count - 16; | |
347 | |
348 if (num_vfpv3_regs && representation != _UVRSD_DOUBLE) | |
349 return _UVRSR_FAILED; | |
350 | |
351 /* Demand-save coprocessor registers for stage1. */ | |
352 if (start < 16 && (vrs->demand_save_flags & DEMAND_SAVE_VFP)) | |
353 { | |
354 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP; | |
355 | |
356 if (representation == _UVRSD_DOUBLE) | |
357 { | |
358 /* Save in FLDMD/FSTMD format. */ | |
359 vrs->demand_save_flags |= DEMAND_SAVE_VFP_D; | |
360 __gnu_Unwind_Save_VFP_D (&vrs->vfp); | |
361 } | |
362 else | |
363 { | |
364 /* Save in FLDMX/FSTMX format. */ | |
365 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_D; | |
366 __gnu_Unwind_Save_VFP (&vrs->vfp); | |
367 } | |
368 } | |
369 | |
370 if (num_vfpv3_regs > 0 | |
371 && (vrs->demand_save_flags & DEMAND_SAVE_VFP_V3)) | |
372 { | |
373 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_V3; | |
374 __gnu_Unwind_Save_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31); | |
375 } | |
376 | |
377 /* Restore the registers from the stack. Do this by saving the | |
378 current VFP registers to a memory area, moving the in-memory | |
379 values into that area, and restoring from the whole area. | |
380 For _UVRSD_VFPX we assume FSTMX standard format 1. */ | |
381 if (representation == _UVRSD_VFPX) | |
382 __gnu_Unwind_Save_VFP (&tmp); | |
383 else | |
384 { | |
385 /* Save registers 0 .. 15 if required. */ | |
386 if (start < 16) | |
387 __gnu_Unwind_Save_VFP_D (&tmp); | |
388 | |
389 /* Save VFPv3 registers 16 .. 31 if required. */ | |
390 if (num_vfpv3_regs) | |
391 __gnu_Unwind_Save_VFP_D_16_to_31 (&tmp_16_to_31); | |
392 } | |
393 | |
394 /* Work out how many registers below register 16 need popping. */ | |
395 tmp_count = num_vfpv3_regs > 0 ? 16 - start : count; | |
396 | |
397 /* Copy registers below 16, if needed. | |
398 The stack address is only guaranteed to be word aligned, so | |
399 we can't use doubleword copies. */ | |
400 sp = (_uw *) vrs->core.r[R_SP]; | |
401 if (tmp_count > 0) | |
402 { | |
403 tmp_count *= 2; | |
404 dest = (_uw *) &tmp.d[start]; | |
405 while (tmp_count--) | |
406 *(dest++) = *(sp++); | |
407 } | |
408 | |
409 /* Copy VFPv3 registers numbered >= 16, if needed. */ | |
410 if (num_vfpv3_regs > 0) | |
411 { | |
412 /* num_vfpv3_regs is needed below, so copy it. */ | |
413 int tmp_count_2 = num_vfpv3_regs * 2; | |
414 int vfpv3_start = start < 16 ? 16 : start; | |
415 | |
416 dest = (_uw *) &tmp_16_to_31.d[vfpv3_start - 16]; | |
417 while (tmp_count_2--) | |
418 *(dest++) = *(sp++); | |
419 } | |
420 | |
421 /* Skip the format word space if using FLDMX/FSTMX format. */ | |
422 if (representation == _UVRSD_VFPX) | |
423 sp++; | |
424 | |
425 /* Set the new stack pointer. */ | |
426 vrs->core.r[R_SP] = (_uw) sp; | |
427 | |
428 /* Reload the registers. */ | |
429 if (representation == _UVRSD_VFPX) | |
430 __gnu_Unwind_Restore_VFP (&tmp); | |
431 else | |
432 { | |
433 /* Restore registers 0 .. 15 if required. */ | |
434 if (start < 16) | |
435 __gnu_Unwind_Restore_VFP_D (&tmp); | |
436 | |
437 /* Restore VFPv3 registers 16 .. 31 if required. */ | |
438 if (num_vfpv3_regs > 0) | |
439 __gnu_Unwind_Restore_VFP_D_16_to_31 (&tmp_16_to_31); | |
440 } | |
441 } | |
442 return _UVRSR_OK; | |
443 | |
444 case _UVRSC_FPA: | |
445 return _UVRSR_NOT_IMPLEMENTED; | |
446 | |
447 case _UVRSC_WMMXD: | |
448 { | |
449 _uw start = discriminator >> 16; | |
450 _uw count = discriminator & 0xffff; | |
451 struct wmmxd_regs tmp; | |
452 _uw *sp; | |
453 _uw *dest; | |
454 | |
455 if ((representation != _UVRSD_UINT64) || start + count > 16) | |
456 return _UVRSR_FAILED; | |
457 | |
458 if (vrs->demand_save_flags & DEMAND_SAVE_WMMXD) | |
459 { | |
460 /* Demand-save resisters for stage1. */ | |
461 vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXD; | |
462 __gnu_Unwind_Save_WMMXD (&vrs->wmmxd); | |
463 } | |
464 | |
465 /* Restore the registers from the stack. Do this by saving the | |
466 current WMMXD registers to a memory area, moving the in-memory | |
467 values into that area, and restoring from the whole area. */ | |
468 __gnu_Unwind_Save_WMMXD (&tmp); | |
469 | |
470 /* The stack address is only guaranteed to be word aligned, so | |
471 we can't use doubleword copies. */ | |
472 sp = (_uw *) vrs->core.r[R_SP]; | |
473 dest = (_uw *) &tmp.wd[start]; | |
474 count *= 2; | |
475 while (count--) | |
476 *(dest++) = *(sp++); | |
477 | |
478 /* Set the new stack pointer. */ | |
479 vrs->core.r[R_SP] = (_uw) sp; | |
480 | |
481 /* Reload the registers. */ | |
482 __gnu_Unwind_Restore_WMMXD (&tmp); | |
483 } | |
484 return _UVRSR_OK; | |
485 | |
486 case _UVRSC_WMMXC: | |
487 { | |
488 int i; | |
489 struct wmmxc_regs tmp; | |
490 _uw *sp; | |
491 | |
492 if ((representation != _UVRSD_UINT32) || discriminator > 16) | |
493 return _UVRSR_FAILED; | |
494 | |
495 if (vrs->demand_save_flags & DEMAND_SAVE_WMMXC) | |
496 { | |
497 /* Demand-save resisters for stage1. */ | |
498 vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXC; | |
499 __gnu_Unwind_Save_WMMXC (&vrs->wmmxc); | |
500 } | |
501 | |
502 /* Restore the registers from the stack. Do this by saving the | |
503 current WMMXC registers to a memory area, moving the in-memory | |
504 values into that area, and restoring from the whole area. */ | |
505 __gnu_Unwind_Save_WMMXC (&tmp); | |
506 | |
507 sp = (_uw *) vrs->core.r[R_SP]; | |
508 for (i = 0; i < 4; i++) | |
509 if (discriminator & (1 << i)) | |
510 tmp.wc[i] = *(sp++); | |
511 | |
512 /* Set the new stack pointer. */ | |
513 vrs->core.r[R_SP] = (_uw) sp; | |
514 | |
515 /* Reload the registers. */ | |
516 __gnu_Unwind_Restore_WMMXC (&tmp); | |
517 } | |
518 return _UVRSR_OK; | |
519 | |
520 default: | |
521 return _UVRSR_FAILED; | |
522 } | |
523 } | |
524 | |
525 | |
526 /* Core unwinding functions. */ | |
527 | |
528 /* Calculate the address encoded by a 31-bit self-relative offset at address | |
529 P. */ | |
530 static inline _uw | |
531 selfrel_offset31 (const _uw *p) | |
532 { | |
533 _uw offset; | |
534 | |
535 offset = *p; | |
536 /* Sign extend to 32 bits. */ | |
537 if (offset & (1 << 30)) | |
538 offset |= 1u << 31; | |
539 else | |
540 offset &= ~(1u << 31); | |
541 | |
542 return offset + (_uw) p; | |
543 } | |
544 | |
545 | |
546 /* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains | |
547 NREC entries. */ | |
548 | |
549 static const __EIT_entry * | |
550 search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address) | |
551 { | |
552 _uw next_fn; | |
553 _uw this_fn; | |
554 int n, left, right; | |
555 | |
556 if (nrec == 0) | |
557 return (__EIT_entry *) 0; | |
558 | |
559 left = 0; | |
560 right = nrec - 1; | |
561 | |
562 while (1) | |
563 { | |
564 n = (left + right) / 2; | |
565 this_fn = selfrel_offset31 (&table[n].fnoffset); | |
566 if (n != nrec - 1) | |
567 next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1; | |
568 else | |
569 next_fn = (_uw)0 - 1; | |
570 | |
571 if (return_address < this_fn) | |
572 { | |
573 if (n == left) | |
574 return (__EIT_entry *) 0; | |
575 right = n - 1; | |
576 } | |
577 else if (return_address <= next_fn) | |
578 return &table[n]; | |
579 else | |
580 left = n + 1; | |
581 } | |
582 } | |
583 | |
584 /* Find the exception index table eintry for the given address. | |
585 Fill in the relevant fields of the UCB. | |
586 Returns _URC_FAILURE if an error occurred, _URC_OK on success. */ | |
587 | |
588 static _Unwind_Reason_Code | |
589 get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address) | |
590 { | |
591 const __EIT_entry * eitp; | |
592 int nrec; | |
593 | |
594 /* The return address is the address of the instruction following the | |
595 call instruction (plus one in thumb mode). If this was the last | |
596 instruction in the function the address will lie in the following | |
597 function. Subtract 2 from the address so that it points within the call | |
598 instruction itself. */ | |
599 return_address -= 2; | |
600 | |
601 if (__gnu_Unwind_Find_exidx) | |
602 { | |
603 eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address, | |
604 &nrec); | |
605 if (!eitp) | |
606 { | |
607 UCB_PR_ADDR (ucbp) = 0; | |
608 return _URC_FAILURE; | |
609 } | |
610 } | |
611 else | |
612 { | |
613 eitp = &__exidx_start; | |
614 nrec = &__exidx_end - &__exidx_start; | |
615 } | |
616 | |
617 eitp = search_EIT_table (eitp, nrec, return_address); | |
618 | |
619 if (!eitp) | |
620 { | |
621 UCB_PR_ADDR (ucbp) = 0; | |
622 return _URC_FAILURE; | |
623 } | |
624 ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset); | |
625 | |
626 /* Can this frame be unwound at all? */ | |
627 if (eitp->content == EXIDX_CANTUNWIND) | |
628 { | |
629 UCB_PR_ADDR (ucbp) = 0; | |
630 return _URC_END_OF_STACK; | |
631 } | |
632 | |
633 /* Obtain the address of the "real" __EHT_Header word. */ | |
634 | |
635 if (eitp->content & uint32_highbit) | |
636 { | |
637 /* It is immediate data. */ | |
638 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content; | |
639 ucbp->pr_cache.additional = 1; | |
640 } | |
641 else | |
642 { | |
643 /* The low 31 bits of the content field are a self-relative | |
644 offset to an _Unwind_EHT_Entry structure. */ | |
645 ucbp->pr_cache.ehtp = | |
646 (_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content); | |
647 ucbp->pr_cache.additional = 0; | |
648 } | |
649 | |
650 /* Discover the personality routine address. */ | |
651 if (*ucbp->pr_cache.ehtp & (1u << 31)) | |
652 { | |
653 /* One of the predefined standard routines. */ | |
654 _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf; | |
655 if (idx == 0) | |
656 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr0; | |
657 else if (idx == 1) | |
658 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr1; | |
659 else if (idx == 2) | |
660 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr2; | |
661 else | |
662 { /* Failed */ | |
663 UCB_PR_ADDR (ucbp) = 0; | |
664 return _URC_FAILURE; | |
665 } | |
666 } | |
667 else | |
668 { | |
669 /* Execute region offset to PR */ | |
670 UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp); | |
671 } | |
672 return _URC_OK; | |
673 } | |
674 | |
675 | |
676 /* Perform phase2 unwinding. VRS is the initial virtual register state. */ | |
677 | |
678 static void __attribute__((noreturn)) | |
679 unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs) | |
680 { | |
681 _Unwind_Reason_Code pr_result; | |
682 | |
683 do | |
684 { | |
685 /* Find the entry for this routine. */ | |
686 if (get_eit_entry (ucbp, vrs->core.r[R_PC]) != _URC_OK) | |
687 abort (); | |
688 | |
689 UCB_SAVED_CALLSITE_ADDR (ucbp) = vrs->core.r[R_PC]; | |
690 | |
691 /* Call the pr to decide what to do. */ | |
692 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
693 (_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs); | |
694 } | |
695 while (pr_result == _URC_CONTINUE_UNWIND); | |
696 | |
697 if (pr_result != _URC_INSTALL_CONTEXT) | |
698 abort(); | |
699 | |
700 restore_core_regs (&vrs->core); | |
701 } | |
702 | |
703 /* Perform phase2 forced unwinding. */ | |
704 | |
705 static _Unwind_Reason_Code | |
706 unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs, | |
707 int resuming) | |
708 { | |
709 _Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp); | |
710 void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp); | |
711 _Unwind_Reason_Code pr_result = 0; | |
712 /* We use phase1_vrs here even though we do not demand save, for the | |
713 prev_sp field. */ | |
714 phase1_vrs saved_vrs, next_vrs; | |
715 | |
716 /* Save the core registers. */ | |
717 saved_vrs.core = entry_vrs->core; | |
718 /* We don't need to demand-save the non-core registers, because we | |
719 unwind in a single pass. */ | |
720 saved_vrs.demand_save_flags = 0; | |
721 | |
722 /* Unwind until we reach a propagation barrier. */ | |
723 do | |
724 { | |
725 _Unwind_State action; | |
726 _Unwind_Reason_Code entry_code; | |
727 _Unwind_Reason_Code stop_code; | |
728 | |
729 /* Find the entry for this routine. */ | |
730 entry_code = get_eit_entry (ucbp, saved_vrs.core.r[R_PC]); | |
731 | |
732 if (resuming) | |
733 { | |
734 action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND; | |
735 resuming = 0; | |
736 } | |
737 else | |
738 action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND; | |
739 | |
740 if (entry_code == _URC_OK) | |
741 { | |
742 UCB_SAVED_CALLSITE_ADDR (ucbp) = saved_vrs.core.r[R_PC]; | |
743 | |
744 next_vrs = saved_vrs; | |
745 | |
746 /* Call the pr to decide what to do. */ | |
747 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
748 (action, ucbp, (void *) &next_vrs); | |
749 | |
750 saved_vrs.prev_sp = next_vrs.core.r[R_SP]; | |
751 } | |
752 else | |
753 { | |
754 /* Treat any failure as the end of unwinding, to cope more | |
755 gracefully with missing EH information. Mixed EH and | |
756 non-EH within one object will usually result in failure, | |
757 because the .ARM.exidx tables do not indicate the end | |
758 of the code to which they apply; but mixed EH and non-EH | |
759 shared objects should return an unwind failure at the | |
760 entry of a non-EH shared object. */ | |
761 action |= _US_END_OF_STACK; | |
762 | |
763 saved_vrs.prev_sp = saved_vrs.core.r[R_SP]; | |
764 } | |
765 | |
766 stop_code = stop_fn (1, action, ucbp->exception_class, ucbp, | |
767 (void *)&saved_vrs, stop_arg); | |
768 if (stop_code != _URC_NO_REASON) | |
769 return _URC_FAILURE; | |
770 | |
771 if (entry_code != _URC_OK) | |
772 return entry_code; | |
773 | |
774 saved_vrs = next_vrs; | |
775 } | |
776 while (pr_result == _URC_CONTINUE_UNWIND); | |
777 | |
778 if (pr_result != _URC_INSTALL_CONTEXT) | |
779 { | |
780 /* Some sort of failure has occurred in the pr and probably the | |
781 pr returned _URC_FAILURE. */ | |
782 return _URC_FAILURE; | |
783 } | |
784 | |
785 restore_core_regs (&saved_vrs.core); | |
786 } | |
787 | |
788 /* This is a very limited implementation of _Unwind_GetCFA. It returns | |
789 the stack pointer as it is about to be unwound, and is only valid | |
790 while calling the stop function during forced unwinding. If the | |
791 current personality routine result is going to run a cleanup, this | |
792 will not be the CFA; but when the frame is really unwound, it will | |
793 be. */ | |
794 | |
795 _Unwind_Word | |
796 _Unwind_GetCFA (_Unwind_Context *context) | |
797 { | |
798 return ((phase1_vrs *) context)->prev_sp; | |
799 } | |
800 | |
801 /* Perform phase1 unwinding. UCBP is the exception being thrown, and | |
802 entry_VRS is the register state on entry to _Unwind_RaiseException. */ | |
803 | |
804 _Unwind_Reason_Code | |
805 __gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *); | |
806 | |
807 _Unwind_Reason_Code | |
808 __gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp, | |
809 phase2_vrs * entry_vrs) | |
810 { | |
811 phase1_vrs saved_vrs; | |
812 _Unwind_Reason_Code pr_result; | |
813 | |
814 /* Set the pc to the call site. */ | |
815 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR]; | |
816 | |
817 /* Save the core registers. */ | |
818 saved_vrs.core = entry_vrs->core; | |
819 /* Set demand-save flags. */ | |
820 saved_vrs.demand_save_flags = ~(_uw) 0; | |
821 | |
822 /* Unwind until we reach a propagation barrier. */ | |
823 do | |
824 { | |
825 /* Find the entry for this routine. */ | |
826 if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK) | |
827 return _URC_FAILURE; | |
828 | |
829 /* Call the pr to decide what to do. */ | |
830 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
831 (_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs); | |
832 } | |
833 while (pr_result == _URC_CONTINUE_UNWIND); | |
834 | |
835 /* We've unwound as far as we want to go, so restore the original | |
836 register state. */ | |
837 restore_non_core_regs (&saved_vrs); | |
838 if (pr_result != _URC_HANDLER_FOUND) | |
839 { | |
840 /* Some sort of failure has occurred in the pr and probably the | |
841 pr returned _URC_FAILURE. */ | |
842 return _URC_FAILURE; | |
843 } | |
844 | |
845 unwind_phase2 (ucbp, entry_vrs); | |
846 } | |
847 | |
848 /* Resume unwinding after a cleanup has been run. UCBP is the exception | |
849 being thrown and ENTRY_VRS is the register state on entry to | |
850 _Unwind_Resume. */ | |
851 _Unwind_Reason_Code | |
852 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *, | |
853 _Unwind_Stop_Fn, void *, phase2_vrs *); | |
854 | |
855 _Unwind_Reason_Code | |
856 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp, | |
857 _Unwind_Stop_Fn stop_fn, void *stop_arg, | |
858 phase2_vrs *entry_vrs) | |
859 { | |
860 UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn; | |
861 UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg; | |
862 | |
863 /* Set the pc to the call site. */ | |
864 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR]; | |
865 | |
866 return unwind_phase2_forced (ucbp, entry_vrs, 0); | |
867 } | |
868 | |
869 _Unwind_Reason_Code | |
870 __gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *); | |
871 | |
872 _Unwind_Reason_Code | |
873 __gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs) | |
874 { | |
875 _Unwind_Reason_Code pr_result; | |
876 | |
877 /* Recover the saved address. */ | |
878 entry_vrs->core.r[R_PC] = UCB_SAVED_CALLSITE_ADDR (ucbp); | |
879 | |
880 if (UCB_FORCED_STOP_FN (ucbp)) | |
881 { | |
882 unwind_phase2_forced (ucbp, entry_vrs, 1); | |
883 | |
884 /* We can't return failure at this point. */ | |
885 abort (); | |
886 } | |
887 | |
888 /* Call the cached PR. */ | |
889 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
890 (_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs); | |
891 | |
892 switch (pr_result) | |
893 { | |
894 case _URC_INSTALL_CONTEXT: | |
895 /* Upload the registers to enter the landing pad. */ | |
896 restore_core_regs (&entry_vrs->core); | |
897 | |
898 case _URC_CONTINUE_UNWIND: | |
899 /* Continue unwinding the next frame. */ | |
900 unwind_phase2 (ucbp, entry_vrs); | |
901 | |
902 default: | |
903 abort (); | |
904 } | |
905 } | |
906 | |
907 _Unwind_Reason_Code | |
908 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *); | |
909 | |
910 _Unwind_Reason_Code | |
911 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp, | |
912 phase2_vrs * entry_vrs) | |
913 { | |
914 if (!UCB_FORCED_STOP_FN (ucbp)) | |
915 return __gnu_Unwind_RaiseException (ucbp, entry_vrs); | |
916 | |
917 /* Set the pc to the call site. */ | |
918 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR]; | |
919 /* Continue unwinding the next frame. */ | |
920 return unwind_phase2_forced (ucbp, entry_vrs, 0); | |
921 } | |
922 | |
923 /* Clean up an exception object when unwinding is complete. */ | |
924 void | |
925 _Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused))) | |
926 { | |
927 } | |
928 | |
929 | |
930 /* Get the _Unwind_Control_Block from an _Unwind_Context. */ | |
931 | |
932 static inline _Unwind_Control_Block * | |
933 unwind_UCB_from_context (_Unwind_Context * context) | |
934 { | |
935 return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP); | |
936 } | |
937 | |
938 | |
939 /* Free an exception. */ | |
940 | |
941 void | |
942 _Unwind_DeleteException (_Unwind_Exception * exc) | |
943 { | |
944 if (exc->exception_cleanup) | |
945 (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc); | |
946 } | |
947 | |
948 | |
949 /* Perform stack backtrace through unwind data. */ | |
950 _Unwind_Reason_Code | |
951 __gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument, | |
952 phase2_vrs * entry_vrs); | |
953 _Unwind_Reason_Code | |
954 __gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument, | |
955 phase2_vrs * entry_vrs) | |
956 { | |
957 phase1_vrs saved_vrs; | |
958 _Unwind_Reason_Code code; | |
959 | |
960 _Unwind_Control_Block ucb; | |
961 _Unwind_Control_Block *ucbp = &ucb; | |
962 | |
963 /* Set the pc to the call site. */ | |
964 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR]; | |
965 | |
966 /* Save the core registers. */ | |
967 saved_vrs.core = entry_vrs->core; | |
968 /* Set demand-save flags. */ | |
969 saved_vrs.demand_save_flags = ~(_uw) 0; | |
970 | |
971 do | |
972 { | |
973 /* Find the entry for this routine. */ | |
974 if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK) | |
975 { | |
976 code = _URC_FAILURE; | |
977 break; | |
978 } | |
979 | |
980 /* The dwarf unwinder assumes the context structure holds things | |
981 like the function and LSDA pointers. The ARM implementation | |
982 caches these in the exception header (UCB). To avoid | |
983 rewriting everything we make the virtual IP register point at | |
984 the UCB. */ | |
985 _Unwind_SetGR((_Unwind_Context *)&saved_vrs, 12, (_Unwind_Ptr) ucbp); | |
986 | |
987 /* Call trace function. */ | |
988 if ((*trace) ((_Unwind_Context *) &saved_vrs, trace_argument) | |
989 != _URC_NO_REASON) | |
990 { | |
991 code = _URC_FAILURE; | |
992 break; | |
993 } | |
994 | |
995 /* Call the pr to decide what to do. */ | |
996 code = ((personality_routine) UCB_PR_ADDR (ucbp)) | |
997 (_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND, | |
998 ucbp, (void *) &saved_vrs); | |
999 } | |
1000 while (code != _URC_END_OF_STACK | |
1001 && code != _URC_FAILURE); | |
1002 | |
1003 finish: | |
1004 restore_non_core_regs (&saved_vrs); | |
1005 return code; | |
1006 } | |
1007 | |
1008 | |
1009 /* Common implementation for ARM ABI defined personality routines. | |
1010 ID is the index of the personality routine, other arguments are as defined | |
1011 by __aeabi_unwind_cpp_pr{0,1,2}. */ | |
1012 | |
1013 static _Unwind_Reason_Code | |
1014 __gnu_unwind_pr_common (_Unwind_State state, | |
1015 _Unwind_Control_Block *ucbp, | |
1016 _Unwind_Context *context, | |
1017 int id) | |
1018 { | |
1019 __gnu_unwind_state uws; | |
1020 _uw *data; | |
1021 _uw offset; | |
1022 _uw len; | |
1023 _uw rtti_count; | |
1024 int phase2_call_unexpected_after_unwind = 0; | |
1025 int in_range = 0; | |
1026 int forced_unwind = state & _US_FORCE_UNWIND; | |
1027 | |
1028 state &= _US_ACTION_MASK; | |
1029 | |
1030 data = (_uw *) ucbp->pr_cache.ehtp; | |
1031 uws.data = *(data++); | |
1032 uws.next = data; | |
1033 if (id == 0) | |
1034 { | |
1035 uws.data <<= 8; | |
1036 uws.words_left = 0; | |
1037 uws.bytes_left = 3; | |
1038 } | |
1039 else | |
1040 { | |
1041 uws.words_left = (uws.data >> 16) & 0xff; | |
1042 uws.data <<= 16; | |
1043 uws.bytes_left = 2; | |
1044 data += uws.words_left; | |
1045 } | |
1046 | |
1047 /* Restore the saved pointer. */ | |
1048 if (state == _US_UNWIND_FRAME_RESUME) | |
1049 data = (_uw *) ucbp->cleanup_cache.bitpattern[0]; | |
1050 | |
1051 if ((ucbp->pr_cache.additional & 1) == 0) | |
1052 { | |
1053 /* Process descriptors. */ | |
1054 while (*data) | |
1055 { | |
1056 _uw addr; | |
1057 _uw fnstart; | |
1058 | |
1059 if (id == 2) | |
1060 { | |
1061 len = ((EHT32 *) data)->length; | |
1062 offset = ((EHT32 *) data)->offset; | |
1063 data += 2; | |
1064 } | |
1065 else | |
1066 { | |
1067 len = ((EHT16 *) data)->length; | |
1068 offset = ((EHT16 *) data)->offset; | |
1069 data++; | |
1070 } | |
1071 | |
1072 fnstart = ucbp->pr_cache.fnstart + (offset & ~1); | |
1073 addr = _Unwind_GetGR (context, R_PC); | |
1074 in_range = (fnstart <= addr && addr < fnstart + (len & ~1)); | |
1075 | |
1076 switch (((offset & 1) << 1) | (len & 1)) | |
1077 { | |
1078 case 0: | |
1079 /* Cleanup. */ | |
1080 if (state != _US_VIRTUAL_UNWIND_FRAME | |
1081 && in_range) | |
1082 { | |
1083 /* Cleanup in range, and we are running cleanups. */ | |
1084 _uw lp; | |
1085 | |
1086 /* Landing pad address is 31-bit pc-relative offset. */ | |
1087 lp = selfrel_offset31 (data); | |
1088 data++; | |
1089 /* Save the exception data pointer. */ | |
1090 ucbp->cleanup_cache.bitpattern[0] = (_uw) data; | |
1091 if (!__cxa_begin_cleanup (ucbp)) | |
1092 return _URC_FAILURE; | |
1093 /* Setup the VRS to enter the landing pad. */ | |
1094 _Unwind_SetGR (context, R_PC, lp); | |
1095 return _URC_INSTALL_CONTEXT; | |
1096 } | |
1097 /* Cleanup not in range, or we are in stage 1. */ | |
1098 data++; | |
1099 break; | |
1100 | |
1101 case 1: | |
1102 /* Catch handler. */ | |
1103 if (state == _US_VIRTUAL_UNWIND_FRAME) | |
1104 { | |
1105 if (in_range) | |
1106 { | |
1107 /* Check for a barrier. */ | |
1108 _uw rtti; | |
1109 bool is_reference = (data[0] & uint32_highbit) != 0; | |
1110 void *matched; | |
1111 | |
1112 /* Check for no-throw areas. */ | |
1113 if (data[1] == (_uw) -2) | |
1114 return _URC_FAILURE; | |
1115 | |
1116 /* The thrown object immediately follows the ECB. */ | |
1117 matched = (void *)(ucbp + 1); | |
1118 if (data[1] != (_uw) -1) | |
1119 { | |
1120 /* Match a catch specification. */ | |
1121 rtti = _Unwind_decode_target2 ((_uw) &data[1]); | |
1122 if (!__cxa_type_match (ucbp, (type_info *) rtti, | |
1123 is_reference, | |
1124 &matched)) | |
1125 matched = (void *)0; | |
1126 } | |
1127 | |
1128 if (matched) | |
1129 { | |
1130 ucbp->barrier_cache.sp = | |
1131 _Unwind_GetGR (context, R_SP); | |
1132 ucbp->barrier_cache.bitpattern[0] = (_uw) matched; | |
1133 ucbp->barrier_cache.bitpattern[1] = (_uw) data; | |
1134 return _URC_HANDLER_FOUND; | |
1135 } | |
1136 } | |
1137 /* Handler out of range, or not matched. */ | |
1138 } | |
1139 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP) | |
1140 && ucbp->barrier_cache.bitpattern[1] == (_uw) data) | |
1141 { | |
1142 /* Matched a previous propagation barrier. */ | |
1143 _uw lp; | |
1144 | |
1145 /* Setup for entry to the handler. */ | |
1146 lp = selfrel_offset31 (data); | |
1147 _Unwind_SetGR (context, R_PC, lp); | |
1148 _Unwind_SetGR (context, 0, (_uw) ucbp); | |
1149 return _URC_INSTALL_CONTEXT; | |
1150 } | |
1151 /* Catch handler not matched. Advance to the next descriptor. */ | |
1152 data += 2; | |
1153 break; | |
1154 | |
1155 case 2: | |
1156 rtti_count = data[0] & 0x7fffffff; | |
1157 /* Exception specification. */ | |
1158 if (state == _US_VIRTUAL_UNWIND_FRAME) | |
1159 { | |
1160 if (in_range && (!forced_unwind || !rtti_count)) | |
1161 { | |
1162 /* Match against the exception specification. */ | |
1163 _uw i; | |
1164 _uw rtti; | |
1165 void *matched; | |
1166 | |
1167 for (i = 0; i < rtti_count; i++) | |
1168 { | |
1169 matched = (void *)(ucbp + 1); | |
1170 rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]); | |
1171 if (__cxa_type_match (ucbp, (type_info *) rtti, 0, | |
1172 &matched)) | |
1173 break; | |
1174 } | |
1175 | |
1176 if (i == rtti_count) | |
1177 { | |
1178 /* Exception does not match the spec. */ | |
1179 ucbp->barrier_cache.sp = | |
1180 _Unwind_GetGR (context, R_SP); | |
1181 ucbp->barrier_cache.bitpattern[0] = (_uw) matched; | |
1182 ucbp->barrier_cache.bitpattern[1] = (_uw) data; | |
1183 return _URC_HANDLER_FOUND; | |
1184 } | |
1185 } | |
1186 /* Handler out of range, or exception is permitted. */ | |
1187 } | |
1188 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP) | |
1189 && ucbp->barrier_cache.bitpattern[1] == (_uw) data) | |
1190 { | |
1191 /* Matched a previous propagation barrier. */ | |
1192 _uw lp; | |
1193 /* Record the RTTI list for __cxa_call_unexpected. */ | |
1194 ucbp->barrier_cache.bitpattern[1] = rtti_count; | |
1195 ucbp->barrier_cache.bitpattern[2] = 0; | |
1196 ucbp->barrier_cache.bitpattern[3] = 4; | |
1197 ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1]; | |
1198 | |
1199 if (data[0] & uint32_highbit) | |
1200 phase2_call_unexpected_after_unwind = 1; | |
1201 else | |
1202 { | |
1203 data += rtti_count + 1; | |
1204 /* Setup for entry to the handler. */ | |
1205 lp = selfrel_offset31 (data); | |
1206 data++; | |
1207 _Unwind_SetGR (context, R_PC, lp); | |
1208 _Unwind_SetGR (context, 0, (_uw) ucbp); | |
1209 return _URC_INSTALL_CONTEXT; | |
1210 } | |
1211 } | |
1212 if (data[0] & uint32_highbit) | |
1213 data++; | |
1214 data += rtti_count + 1; | |
1215 break; | |
1216 | |
1217 default: | |
1218 /* Should never happen. */ | |
1219 return _URC_FAILURE; | |
1220 } | |
1221 /* Finished processing this descriptor. */ | |
1222 } | |
1223 } | |
1224 | |
1225 if (__gnu_unwind_execute (context, &uws) != _URC_OK) | |
1226 return _URC_FAILURE; | |
1227 | |
1228 if (phase2_call_unexpected_after_unwind) | |
1229 { | |
1230 /* Enter __cxa_unexpected as if called from the call site. */ | |
1231 _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC)); | |
1232 _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected); | |
1233 return _URC_INSTALL_CONTEXT; | |
1234 } | |
1235 | |
1236 return _URC_CONTINUE_UNWIND; | |
1237 } | |
1238 | |
1239 | |
1240 /* ABI defined personality routine entry points. */ | |
1241 | |
1242 _Unwind_Reason_Code | |
1243 __aeabi_unwind_cpp_pr0 (_Unwind_State state, | |
1244 _Unwind_Control_Block *ucbp, | |
1245 _Unwind_Context *context) | |
1246 { | |
1247 return __gnu_unwind_pr_common (state, ucbp, context, 0); | |
1248 } | |
1249 | |
1250 _Unwind_Reason_Code | |
1251 __aeabi_unwind_cpp_pr1 (_Unwind_State state, | |
1252 _Unwind_Control_Block *ucbp, | |
1253 _Unwind_Context *context) | |
1254 { | |
1255 return __gnu_unwind_pr_common (state, ucbp, context, 1); | |
1256 } | |
1257 | |
1258 _Unwind_Reason_Code | |
1259 __aeabi_unwind_cpp_pr2 (_Unwind_State state, | |
1260 _Unwind_Control_Block *ucbp, | |
1261 _Unwind_Context *context) | |
1262 { | |
1263 return __gnu_unwind_pr_common (state, ucbp, context, 2); | |
1264 } |