Mercurial > hg > CbC > CbC_gcc
comparison gcc/dwarf2cfi.c @ 111:04ced10e8804
gcc 7
author | kono |
---|---|
date | Fri, 27 Oct 2017 22:46:09 +0900 |
parents | |
children | 84e7813d76e9 |
comparison
equal
deleted
inserted
replaced
68:561a7518be6b | 111:04ced10e8804 |
---|---|
1 /* Dwarf2 Call Frame Information helper routines. | |
2 Copyright (C) 1992-2017 Free Software Foundation, Inc. | |
3 | |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify it under | |
7 the terms of the GNU General Public License as published by the Free | |
8 Software Foundation; either version 3, or (at your option) any later | |
9 version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 #include "config.h" | |
21 #include "system.h" | |
22 #include "coretypes.h" | |
23 #include "target.h" | |
24 #include "function.h" | |
25 #include "rtl.h" | |
26 #include "tree.h" | |
27 #include "tree-pass.h" | |
28 #include "memmodel.h" | |
29 #include "tm_p.h" | |
30 #include "emit-rtl.h" | |
31 #include "stor-layout.h" | |
32 #include "cfgbuild.h" | |
33 #include "dwarf2out.h" | |
34 #include "dwarf2asm.h" | |
35 #include "common/common-target.h" | |
36 | |
37 #include "except.h" /* expand_builtin_dwarf_sp_column */ | |
38 #include "profile-count.h" /* For expr.h */ | |
39 #include "expr.h" /* init_return_column_size */ | |
40 #include "output.h" /* asm_out_file */ | |
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */ | |
42 | |
43 | |
44 /* ??? Poison these here until it can be done generically. They've been | |
45 totally replaced in this file; make sure it stays that way. */ | |
46 #undef DWARF2_UNWIND_INFO | |
47 #undef DWARF2_FRAME_INFO | |
48 #if (GCC_VERSION >= 3000) | |
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO | |
50 #endif | |
51 | |
52 #ifndef INCOMING_RETURN_ADDR_RTX | |
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX) | |
54 #endif | |
55 | |
56 /* A collected description of an entire row of the abstract CFI table. */ | |
57 struct GTY(()) dw_cfi_row | |
58 { | |
59 /* The expression that computes the CFA, expressed in two different ways. | |
60 The CFA member for the simple cases, and the full CFI expression for | |
61 the complex cases. The later will be a DW_CFA_cfa_expression. */ | |
62 dw_cfa_location cfa; | |
63 dw_cfi_ref cfa_cfi; | |
64 | |
65 /* The expressions for any register column that is saved. */ | |
66 cfi_vec reg_save; | |
67 }; | |
68 | |
69 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */ | |
70 struct GTY(()) reg_saved_in_data { | |
71 rtx orig_reg; | |
72 rtx saved_in_reg; | |
73 }; | |
74 | |
75 | |
76 /* Since we no longer have a proper CFG, we're going to create a facsimile | |
77 of one on the fly while processing the frame-related insns. | |
78 | |
79 We create dw_trace_info structures for each extended basic block beginning | |
80 and ending at a "save point". Save points are labels, barriers, certain | |
81 notes, and of course the beginning and end of the function. | |
82 | |
83 As we encounter control transfer insns, we propagate the "current" | |
84 row state across the edges to the starts of traces. When checking is | |
85 enabled, we validate that we propagate the same data from all sources. | |
86 | |
87 All traces are members of the TRACE_INFO array, in the order in which | |
88 they appear in the instruction stream. | |
89 | |
90 All save points are present in the TRACE_INDEX hash, mapping the insn | |
91 starting a trace to the dw_trace_info describing the trace. */ | |
92 | |
93 struct dw_trace_info | |
94 { | |
95 /* The insn that begins the trace. */ | |
96 rtx_insn *head; | |
97 | |
98 /* The row state at the beginning and end of the trace. */ | |
99 dw_cfi_row *beg_row, *end_row; | |
100 | |
101 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find | |
102 while scanning insns. However, the args_size value is irrelevant at | |
103 any point except can_throw_internal_p insns. Therefore the "delay" | |
104 sizes the values that must actually be emitted for this trace. */ | |
105 HOST_WIDE_INT beg_true_args_size, end_true_args_size; | |
106 HOST_WIDE_INT beg_delay_args_size, end_delay_args_size; | |
107 | |
108 /* The first EH insn in the trace, where beg_delay_args_size must be set. */ | |
109 rtx_insn *eh_head; | |
110 | |
111 /* The following variables contain data used in interpreting frame related | |
112 expressions. These are not part of the "real" row state as defined by | |
113 Dwarf, but it seems like they need to be propagated into a trace in case | |
114 frame related expressions have been sunk. */ | |
115 /* ??? This seems fragile. These variables are fragments of a larger | |
116 expression. If we do not keep the entire expression together, we risk | |
117 not being able to put it together properly. Consider forcing targets | |
118 to generate self-contained expressions and dropping all of the magic | |
119 interpretation code in this file. Or at least refusing to shrink wrap | |
120 any frame related insn that doesn't contain a complete expression. */ | |
121 | |
122 /* The register used for saving registers to the stack, and its offset | |
123 from the CFA. */ | |
124 dw_cfa_location cfa_store; | |
125 | |
126 /* A temporary register holding an integral value used in adjusting SP | |
127 or setting up the store_reg. The "offset" field holds the integer | |
128 value, not an offset. */ | |
129 dw_cfa_location cfa_temp; | |
130 | |
131 /* A set of registers saved in other registers. This is the inverse of | |
132 the row->reg_save info, if the entry is a DW_CFA_register. This is | |
133 implemented as a flat array because it normally contains zero or 1 | |
134 entry, depending on the target. IA-64 is the big spender here, using | |
135 a maximum of 5 entries. */ | |
136 vec<reg_saved_in_data> regs_saved_in_regs; | |
137 | |
138 /* An identifier for this trace. Used only for debugging dumps. */ | |
139 unsigned id; | |
140 | |
141 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */ | |
142 bool switch_sections; | |
143 | |
144 /* True if we've seen different values incoming to beg_true_args_size. */ | |
145 bool args_size_undefined; | |
146 }; | |
147 | |
148 | |
149 /* Hashtable helpers. */ | |
150 | |
151 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info> | |
152 { | |
153 static inline hashval_t hash (const dw_trace_info *); | |
154 static inline bool equal (const dw_trace_info *, const dw_trace_info *); | |
155 }; | |
156 | |
157 inline hashval_t | |
158 trace_info_hasher::hash (const dw_trace_info *ti) | |
159 { | |
160 return INSN_UID (ti->head); | |
161 } | |
162 | |
163 inline bool | |
164 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b) | |
165 { | |
166 return a->head == b->head; | |
167 } | |
168 | |
169 | |
170 /* The variables making up the pseudo-cfg, as described above. */ | |
171 static vec<dw_trace_info> trace_info; | |
172 static vec<dw_trace_info *> trace_work_list; | |
173 static hash_table<trace_info_hasher> *trace_index; | |
174 | |
175 /* A vector of call frame insns for the CIE. */ | |
176 cfi_vec cie_cfi_vec; | |
177 | |
178 /* The state of the first row of the FDE table, which includes the | |
179 state provided by the CIE. */ | |
180 static GTY(()) dw_cfi_row *cie_cfi_row; | |
181 | |
182 static GTY(()) reg_saved_in_data *cie_return_save; | |
183 | |
184 static GTY(()) unsigned long dwarf2out_cfi_label_num; | |
185 | |
186 /* The insn after which a new CFI note should be emitted. */ | |
187 static rtx_insn *add_cfi_insn; | |
188 | |
189 /* When non-null, add_cfi will add the CFI to this vector. */ | |
190 static cfi_vec *add_cfi_vec; | |
191 | |
192 /* The current instruction trace. */ | |
193 static dw_trace_info *cur_trace; | |
194 | |
195 /* The current, i.e. most recently generated, row of the CFI table. */ | |
196 static dw_cfi_row *cur_row; | |
197 | |
198 /* A copy of the current CFA, for use during the processing of a | |
199 single insn. */ | |
200 static dw_cfa_location *cur_cfa; | |
201 | |
202 /* We delay emitting a register save until either (a) we reach the end | |
203 of the prologue or (b) the register is clobbered. This clusters | |
204 register saves so that there are fewer pc advances. */ | |
205 | |
206 struct queued_reg_save { | |
207 rtx reg; | |
208 rtx saved_reg; | |
209 HOST_WIDE_INT cfa_offset; | |
210 }; | |
211 | |
212 | |
213 static vec<queued_reg_save> queued_reg_saves; | |
214 | |
215 /* True if any CFI directives were emitted at the current insn. */ | |
216 static bool any_cfis_emitted; | |
217 | |
218 /* Short-hand for commonly used register numbers. */ | |
219 static unsigned dw_stack_pointer_regnum; | |
220 static unsigned dw_frame_pointer_regnum; | |
221 | |
222 /* Hook used by __throw. */ | |
223 | |
224 rtx | |
225 expand_builtin_dwarf_sp_column (void) | |
226 { | |
227 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM); | |
228 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1)); | |
229 } | |
230 | |
231 /* MEM is a memory reference for the register size table, each element of | |
232 which has mode MODE. Initialize column C as a return address column. */ | |
233 | |
234 static void | |
235 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c) | |
236 { | |
237 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode); | |
238 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode); | |
239 emit_move_insn (adjust_address (mem, mode, offset), | |
240 gen_int_mode (size, mode)); | |
241 } | |
242 | |
243 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and | |
244 init_one_dwarf_reg_size to communicate on what has been done by the | |
245 latter. */ | |
246 | |
247 struct init_one_dwarf_reg_state | |
248 { | |
249 /* Whether the dwarf return column was initialized. */ | |
250 bool wrote_return_column; | |
251 | |
252 /* For each hard register REGNO, whether init_one_dwarf_reg_size | |
253 was given REGNO to process already. */ | |
254 bool processed_regno [FIRST_PSEUDO_REGISTER]; | |
255 | |
256 }; | |
257 | |
258 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to | |
259 initialize the dwarf register size table entry corresponding to register | |
260 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to | |
261 use for the size entry to initialize, and INIT_STATE is the communication | |
262 datastructure conveying what we're doing to our caller. */ | |
263 | |
264 static | |
265 void init_one_dwarf_reg_size (int regno, machine_mode regmode, | |
266 rtx table, machine_mode slotmode, | |
267 init_one_dwarf_reg_state *init_state) | |
268 { | |
269 const unsigned int dnum = DWARF_FRAME_REGNUM (regno); | |
270 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1); | |
271 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum); | |
272 | |
273 const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode); | |
274 const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode); | |
275 | |
276 init_state->processed_regno[regno] = true; | |
277 | |
278 if (rnum >= DWARF_FRAME_REGISTERS) | |
279 return; | |
280 | |
281 if (dnum == DWARF_FRAME_RETURN_COLUMN) | |
282 { | |
283 if (regmode == VOIDmode) | |
284 return; | |
285 init_state->wrote_return_column = true; | |
286 } | |
287 | |
288 if (slotoffset < 0) | |
289 return; | |
290 | |
291 emit_move_insn (adjust_address (table, slotmode, slotoffset), | |
292 gen_int_mode (regsize, slotmode)); | |
293 } | |
294 | |
295 /* Generate code to initialize the dwarf register size table located | |
296 at the provided ADDRESS. */ | |
297 | |
298 void | |
299 expand_builtin_init_dwarf_reg_sizes (tree address) | |
300 { | |
301 unsigned int i; | |
302 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node); | |
303 rtx addr = expand_normal (address); | |
304 rtx mem = gen_rtx_MEM (BLKmode, addr); | |
305 | |
306 init_one_dwarf_reg_state init_state; | |
307 | |
308 memset ((char *)&init_state, 0, sizeof (init_state)); | |
309 | |
310 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
311 { | |
312 machine_mode save_mode; | |
313 rtx span; | |
314 | |
315 /* No point in processing a register multiple times. This could happen | |
316 with register spans, e.g. when a reg is first processed as a piece of | |
317 a span, then as a register on its own later on. */ | |
318 | |
319 if (init_state.processed_regno[i]) | |
320 continue; | |
321 | |
322 save_mode = targetm.dwarf_frame_reg_mode (i); | |
323 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i)); | |
324 | |
325 if (!span) | |
326 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state); | |
327 else | |
328 { | |
329 for (int si = 0; si < XVECLEN (span, 0); si++) | |
330 { | |
331 rtx reg = XVECEXP (span, 0, si); | |
332 | |
333 init_one_dwarf_reg_size | |
334 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state); | |
335 } | |
336 } | |
337 } | |
338 | |
339 if (!init_state.wrote_return_column) | |
340 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN); | |
341 | |
342 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN | |
343 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN); | |
344 #endif | |
345 | |
346 targetm.init_dwarf_reg_sizes_extra (address); | |
347 } | |
348 | |
349 | |
350 static dw_trace_info * | |
351 get_trace_info (rtx_insn *insn) | |
352 { | |
353 dw_trace_info dummy; | |
354 dummy.head = insn; | |
355 return trace_index->find_with_hash (&dummy, INSN_UID (insn)); | |
356 } | |
357 | |
358 static bool | |
359 save_point_p (rtx_insn *insn) | |
360 { | |
361 /* Labels, except those that are really jump tables. */ | |
362 if (LABEL_P (insn)) | |
363 return inside_basic_block_p (insn); | |
364 | |
365 /* We split traces at the prologue/epilogue notes because those | |
366 are points at which the unwind info is usually stable. This | |
367 makes it easier to find spots with identical unwind info so | |
368 that we can use remember/restore_state opcodes. */ | |
369 if (NOTE_P (insn)) | |
370 switch (NOTE_KIND (insn)) | |
371 { | |
372 case NOTE_INSN_PROLOGUE_END: | |
373 case NOTE_INSN_EPILOGUE_BEG: | |
374 return true; | |
375 } | |
376 | |
377 return false; | |
378 } | |
379 | |
380 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */ | |
381 | |
382 static inline HOST_WIDE_INT | |
383 div_data_align (HOST_WIDE_INT off) | |
384 { | |
385 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT; | |
386 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off); | |
387 return r; | |
388 } | |
389 | |
390 /* Return true if we need a signed version of a given opcode | |
391 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */ | |
392 | |
393 static inline bool | |
394 need_data_align_sf_opcode (HOST_WIDE_INT off) | |
395 { | |
396 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0; | |
397 } | |
398 | |
399 /* Return a pointer to a newly allocated Call Frame Instruction. */ | |
400 | |
401 static inline dw_cfi_ref | |
402 new_cfi (void) | |
403 { | |
404 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> (); | |
405 | |
406 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0; | |
407 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0; | |
408 | |
409 return cfi; | |
410 } | |
411 | |
412 /* Return a newly allocated CFI row, with no defined data. */ | |
413 | |
414 static dw_cfi_row * | |
415 new_cfi_row (void) | |
416 { | |
417 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> (); | |
418 | |
419 row->cfa.reg = INVALID_REGNUM; | |
420 | |
421 return row; | |
422 } | |
423 | |
424 /* Return a copy of an existing CFI row. */ | |
425 | |
426 static dw_cfi_row * | |
427 copy_cfi_row (dw_cfi_row *src) | |
428 { | |
429 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> (); | |
430 | |
431 *dst = *src; | |
432 dst->reg_save = vec_safe_copy (src->reg_save); | |
433 | |
434 return dst; | |
435 } | |
436 | |
437 /* Generate a new label for the CFI info to refer to. */ | |
438 | |
439 static char * | |
440 dwarf2out_cfi_label (void) | |
441 { | |
442 int num = dwarf2out_cfi_label_num++; | |
443 char label[20]; | |
444 | |
445 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num); | |
446 | |
447 return xstrdup (label); | |
448 } | |
449 | |
450 /* Add CFI either to the current insn stream or to a vector, or both. */ | |
451 | |
452 static void | |
453 add_cfi (dw_cfi_ref cfi) | |
454 { | |
455 any_cfis_emitted = true; | |
456 | |
457 if (add_cfi_insn != NULL) | |
458 { | |
459 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn); | |
460 NOTE_CFI (add_cfi_insn) = cfi; | |
461 } | |
462 | |
463 if (add_cfi_vec != NULL) | |
464 vec_safe_push (*add_cfi_vec, cfi); | |
465 } | |
466 | |
467 static void | |
468 add_cfi_args_size (HOST_WIDE_INT size) | |
469 { | |
470 dw_cfi_ref cfi = new_cfi (); | |
471 | |
472 /* While we can occasionally have args_size < 0 internally, this state | |
473 should not persist at a point we actually need an opcode. */ | |
474 gcc_assert (size >= 0); | |
475 | |
476 cfi->dw_cfi_opc = DW_CFA_GNU_args_size; | |
477 cfi->dw_cfi_oprnd1.dw_cfi_offset = size; | |
478 | |
479 add_cfi (cfi); | |
480 } | |
481 | |
482 static void | |
483 add_cfi_restore (unsigned reg) | |
484 { | |
485 dw_cfi_ref cfi = new_cfi (); | |
486 | |
487 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore); | |
488 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg; | |
489 | |
490 add_cfi (cfi); | |
491 } | |
492 | |
493 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating | |
494 that the register column is no longer saved. */ | |
495 | |
496 static void | |
497 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi) | |
498 { | |
499 if (vec_safe_length (row->reg_save) <= column) | |
500 vec_safe_grow_cleared (row->reg_save, column + 1); | |
501 (*row->reg_save)[column] = cfi; | |
502 } | |
503 | |
504 /* This function fills in aa dw_cfa_location structure from a dwarf location | |
505 descriptor sequence. */ | |
506 | |
507 static void | |
508 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc) | |
509 { | |
510 struct dw_loc_descr_node *ptr; | |
511 cfa->offset = 0; | |
512 cfa->base_offset = 0; | |
513 cfa->indirect = 0; | |
514 cfa->reg = -1; | |
515 | |
516 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next) | |
517 { | |
518 enum dwarf_location_atom op = ptr->dw_loc_opc; | |
519 | |
520 switch (op) | |
521 { | |
522 case DW_OP_reg0: | |
523 case DW_OP_reg1: | |
524 case DW_OP_reg2: | |
525 case DW_OP_reg3: | |
526 case DW_OP_reg4: | |
527 case DW_OP_reg5: | |
528 case DW_OP_reg6: | |
529 case DW_OP_reg7: | |
530 case DW_OP_reg8: | |
531 case DW_OP_reg9: | |
532 case DW_OP_reg10: | |
533 case DW_OP_reg11: | |
534 case DW_OP_reg12: | |
535 case DW_OP_reg13: | |
536 case DW_OP_reg14: | |
537 case DW_OP_reg15: | |
538 case DW_OP_reg16: | |
539 case DW_OP_reg17: | |
540 case DW_OP_reg18: | |
541 case DW_OP_reg19: | |
542 case DW_OP_reg20: | |
543 case DW_OP_reg21: | |
544 case DW_OP_reg22: | |
545 case DW_OP_reg23: | |
546 case DW_OP_reg24: | |
547 case DW_OP_reg25: | |
548 case DW_OP_reg26: | |
549 case DW_OP_reg27: | |
550 case DW_OP_reg28: | |
551 case DW_OP_reg29: | |
552 case DW_OP_reg30: | |
553 case DW_OP_reg31: | |
554 cfa->reg = op - DW_OP_reg0; | |
555 break; | |
556 case DW_OP_regx: | |
557 cfa->reg = ptr->dw_loc_oprnd1.v.val_int; | |
558 break; | |
559 case DW_OP_breg0: | |
560 case DW_OP_breg1: | |
561 case DW_OP_breg2: | |
562 case DW_OP_breg3: | |
563 case DW_OP_breg4: | |
564 case DW_OP_breg5: | |
565 case DW_OP_breg6: | |
566 case DW_OP_breg7: | |
567 case DW_OP_breg8: | |
568 case DW_OP_breg9: | |
569 case DW_OP_breg10: | |
570 case DW_OP_breg11: | |
571 case DW_OP_breg12: | |
572 case DW_OP_breg13: | |
573 case DW_OP_breg14: | |
574 case DW_OP_breg15: | |
575 case DW_OP_breg16: | |
576 case DW_OP_breg17: | |
577 case DW_OP_breg18: | |
578 case DW_OP_breg19: | |
579 case DW_OP_breg20: | |
580 case DW_OP_breg21: | |
581 case DW_OP_breg22: | |
582 case DW_OP_breg23: | |
583 case DW_OP_breg24: | |
584 case DW_OP_breg25: | |
585 case DW_OP_breg26: | |
586 case DW_OP_breg27: | |
587 case DW_OP_breg28: | |
588 case DW_OP_breg29: | |
589 case DW_OP_breg30: | |
590 case DW_OP_breg31: | |
591 cfa->reg = op - DW_OP_breg0; | |
592 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int; | |
593 break; | |
594 case DW_OP_bregx: | |
595 cfa->reg = ptr->dw_loc_oprnd1.v.val_int; | |
596 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int; | |
597 break; | |
598 case DW_OP_deref: | |
599 cfa->indirect = 1; | |
600 break; | |
601 case DW_OP_plus_uconst: | |
602 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned; | |
603 break; | |
604 default: | |
605 gcc_unreachable (); | |
606 } | |
607 } | |
608 } | |
609 | |
610 /* Find the previous value for the CFA, iteratively. CFI is the opcode | |
611 to interpret, *LOC will be updated as necessary, *REMEMBER is used for | |
612 one level of remember/restore state processing. */ | |
613 | |
614 void | |
615 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember) | |
616 { | |
617 switch (cfi->dw_cfi_opc) | |
618 { | |
619 case DW_CFA_def_cfa_offset: | |
620 case DW_CFA_def_cfa_offset_sf: | |
621 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset; | |
622 break; | |
623 case DW_CFA_def_cfa_register: | |
624 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num; | |
625 break; | |
626 case DW_CFA_def_cfa: | |
627 case DW_CFA_def_cfa_sf: | |
628 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num; | |
629 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset; | |
630 break; | |
631 case DW_CFA_def_cfa_expression: | |
632 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc); | |
633 break; | |
634 | |
635 case DW_CFA_remember_state: | |
636 gcc_assert (!remember->in_use); | |
637 *remember = *loc; | |
638 remember->in_use = 1; | |
639 break; | |
640 case DW_CFA_restore_state: | |
641 gcc_assert (remember->in_use); | |
642 *loc = *remember; | |
643 remember->in_use = 0; | |
644 break; | |
645 | |
646 default: | |
647 break; | |
648 } | |
649 } | |
650 | |
651 /* Determine if two dw_cfa_location structures define the same data. */ | |
652 | |
653 bool | |
654 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2) | |
655 { | |
656 return (loc1->reg == loc2->reg | |
657 && loc1->offset == loc2->offset | |
658 && loc1->indirect == loc2->indirect | |
659 && (loc1->indirect == 0 | |
660 || loc1->base_offset == loc2->base_offset)); | |
661 } | |
662 | |
663 /* Determine if two CFI operands are identical. */ | |
664 | |
665 static bool | |
666 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b) | |
667 { | |
668 switch (t) | |
669 { | |
670 case dw_cfi_oprnd_unused: | |
671 return true; | |
672 case dw_cfi_oprnd_reg_num: | |
673 return a->dw_cfi_reg_num == b->dw_cfi_reg_num; | |
674 case dw_cfi_oprnd_offset: | |
675 return a->dw_cfi_offset == b->dw_cfi_offset; | |
676 case dw_cfi_oprnd_addr: | |
677 return (a->dw_cfi_addr == b->dw_cfi_addr | |
678 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0); | |
679 case dw_cfi_oprnd_loc: | |
680 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc); | |
681 } | |
682 gcc_unreachable (); | |
683 } | |
684 | |
685 /* Determine if two CFI entries are identical. */ | |
686 | |
687 static bool | |
688 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b) | |
689 { | |
690 enum dwarf_call_frame_info opc; | |
691 | |
692 /* Make things easier for our callers, including missing operands. */ | |
693 if (a == b) | |
694 return true; | |
695 if (a == NULL || b == NULL) | |
696 return false; | |
697 | |
698 /* Obviously, the opcodes must match. */ | |
699 opc = a->dw_cfi_opc; | |
700 if (opc != b->dw_cfi_opc) | |
701 return false; | |
702 | |
703 /* Compare the two operands, re-using the type of the operands as | |
704 already exposed elsewhere. */ | |
705 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc), | |
706 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1) | |
707 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc), | |
708 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2)); | |
709 } | |
710 | |
711 /* Determine if two CFI_ROW structures are identical. */ | |
712 | |
713 static bool | |
714 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b) | |
715 { | |
716 size_t i, n_a, n_b, n_max; | |
717 | |
718 if (a->cfa_cfi) | |
719 { | |
720 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi)) | |
721 return false; | |
722 } | |
723 else if (!cfa_equal_p (&a->cfa, &b->cfa)) | |
724 return false; | |
725 | |
726 n_a = vec_safe_length (a->reg_save); | |
727 n_b = vec_safe_length (b->reg_save); | |
728 n_max = MAX (n_a, n_b); | |
729 | |
730 for (i = 0; i < n_max; ++i) | |
731 { | |
732 dw_cfi_ref r_a = NULL, r_b = NULL; | |
733 | |
734 if (i < n_a) | |
735 r_a = (*a->reg_save)[i]; | |
736 if (i < n_b) | |
737 r_b = (*b->reg_save)[i]; | |
738 | |
739 if (!cfi_equal_p (r_a, r_b)) | |
740 return false; | |
741 } | |
742 | |
743 return true; | |
744 } | |
745 | |
746 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining | |
747 what opcode to emit. Returns the CFI opcode to effect the change, or | |
748 NULL if NEW_CFA == OLD_CFA. */ | |
749 | |
750 static dw_cfi_ref | |
751 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa) | |
752 { | |
753 dw_cfi_ref cfi; | |
754 | |
755 /* If nothing changed, no need to issue any call frame instructions. */ | |
756 if (cfa_equal_p (old_cfa, new_cfa)) | |
757 return NULL; | |
758 | |
759 cfi = new_cfi (); | |
760 | |
761 if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect) | |
762 { | |
763 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating | |
764 the CFA register did not change but the offset did. The data | |
765 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or | |
766 in the assembler via the .cfi_def_cfa_offset directive. */ | |
767 if (new_cfa->offset < 0) | |
768 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf; | |
769 else | |
770 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset; | |
771 cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset; | |
772 } | |
773 else if (new_cfa->offset == old_cfa->offset | |
774 && old_cfa->reg != INVALID_REGNUM | |
775 && !new_cfa->indirect | |
776 && !old_cfa->indirect) | |
777 { | |
778 /* Construct a "DW_CFA_def_cfa_register <register>" instruction, | |
779 indicating the CFA register has changed to <register> but the | |
780 offset has not changed. */ | |
781 cfi->dw_cfi_opc = DW_CFA_def_cfa_register; | |
782 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg; | |
783 } | |
784 else if (new_cfa->indirect == 0) | |
785 { | |
786 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction, | |
787 indicating the CFA register has changed to <register> with | |
788 the specified offset. The data factoring for DW_CFA_def_cfa_sf | |
789 happens in output_cfi, or in the assembler via the .cfi_def_cfa | |
790 directive. */ | |
791 if (new_cfa->offset < 0) | |
792 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf; | |
793 else | |
794 cfi->dw_cfi_opc = DW_CFA_def_cfa; | |
795 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg; | |
796 cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset; | |
797 } | |
798 else | |
799 { | |
800 /* Construct a DW_CFA_def_cfa_expression instruction to | |
801 calculate the CFA using a full location expression since no | |
802 register-offset pair is available. */ | |
803 struct dw_loc_descr_node *loc_list; | |
804 | |
805 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression; | |
806 loc_list = build_cfa_loc (new_cfa, 0); | |
807 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list; | |
808 } | |
809 | |
810 return cfi; | |
811 } | |
812 | |
813 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */ | |
814 | |
815 static void | |
816 def_cfa_1 (dw_cfa_location *new_cfa) | |
817 { | |
818 dw_cfi_ref cfi; | |
819 | |
820 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0) | |
821 cur_trace->cfa_store.offset = new_cfa->offset; | |
822 | |
823 cfi = def_cfa_0 (&cur_row->cfa, new_cfa); | |
824 if (cfi) | |
825 { | |
826 cur_row->cfa = *new_cfa; | |
827 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression | |
828 ? cfi : NULL); | |
829 | |
830 add_cfi (cfi); | |
831 } | |
832 } | |
833 | |
834 /* Add the CFI for saving a register. REG is the CFA column number. | |
835 If SREG is -1, the register is saved at OFFSET from the CFA; | |
836 otherwise it is saved in SREG. */ | |
837 | |
838 static void | |
839 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset) | |
840 { | |
841 dw_fde_ref fde = cfun ? cfun->fde : NULL; | |
842 dw_cfi_ref cfi = new_cfi (); | |
843 | |
844 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg; | |
845 | |
846 /* When stack is aligned, store REG using DW_CFA_expression with FP. */ | |
847 if (fde | |
848 && fde->stack_realign | |
849 && sreg == INVALID_REGNUM) | |
850 { | |
851 cfi->dw_cfi_opc = DW_CFA_expression; | |
852 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg; | |
853 cfi->dw_cfi_oprnd2.dw_cfi_loc | |
854 = build_cfa_aligned_loc (&cur_row->cfa, offset, | |
855 fde->stack_realignment); | |
856 } | |
857 else if (sreg == INVALID_REGNUM) | |
858 { | |
859 if (need_data_align_sf_opcode (offset)) | |
860 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf; | |
861 else if (reg & ~0x3f) | |
862 cfi->dw_cfi_opc = DW_CFA_offset_extended; | |
863 else | |
864 cfi->dw_cfi_opc = DW_CFA_offset; | |
865 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset; | |
866 } | |
867 else if (sreg == reg) | |
868 { | |
869 /* While we could emit something like DW_CFA_same_value or | |
870 DW_CFA_restore, we never expect to see something like that | |
871 in a prologue. This is more likely to be a bug. A backend | |
872 can always bypass this by using REG_CFA_RESTORE directly. */ | |
873 gcc_unreachable (); | |
874 } | |
875 else | |
876 { | |
877 cfi->dw_cfi_opc = DW_CFA_register; | |
878 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg; | |
879 } | |
880 | |
881 add_cfi (cfi); | |
882 update_row_reg_save (cur_row, reg, cfi); | |
883 } | |
884 | |
885 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note | |
886 and adjust data structures to match. */ | |
887 | |
888 static void | |
889 notice_args_size (rtx_insn *insn) | |
890 { | |
891 HOST_WIDE_INT args_size, delta; | |
892 rtx note; | |
893 | |
894 note = find_reg_note (insn, REG_ARGS_SIZE, NULL); | |
895 if (note == NULL) | |
896 return; | |
897 | |
898 args_size = INTVAL (XEXP (note, 0)); | |
899 delta = args_size - cur_trace->end_true_args_size; | |
900 if (delta == 0) | |
901 return; | |
902 | |
903 cur_trace->end_true_args_size = args_size; | |
904 | |
905 /* If the CFA is computed off the stack pointer, then we must adjust | |
906 the computation of the CFA as well. */ | |
907 if (cur_cfa->reg == dw_stack_pointer_regnum) | |
908 { | |
909 gcc_assert (!cur_cfa->indirect); | |
910 | |
911 /* Convert a change in args_size (always a positive in the | |
912 direction of stack growth) to a change in stack pointer. */ | |
913 if (!STACK_GROWS_DOWNWARD) | |
914 delta = -delta; | |
915 | |
916 cur_cfa->offset += delta; | |
917 } | |
918 } | |
919 | |
920 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the | |
921 data within the trace related to EH insns and args_size. */ | |
922 | |
923 static void | |
924 notice_eh_throw (rtx_insn *insn) | |
925 { | |
926 HOST_WIDE_INT args_size; | |
927 | |
928 args_size = cur_trace->end_true_args_size; | |
929 if (cur_trace->eh_head == NULL) | |
930 { | |
931 cur_trace->eh_head = insn; | |
932 cur_trace->beg_delay_args_size = args_size; | |
933 cur_trace->end_delay_args_size = args_size; | |
934 } | |
935 else if (cur_trace->end_delay_args_size != args_size) | |
936 { | |
937 cur_trace->end_delay_args_size = args_size; | |
938 | |
939 /* ??? If the CFA is the stack pointer, search backward for the last | |
940 CFI note and insert there. Given that the stack changed for the | |
941 args_size change, there *must* be such a note in between here and | |
942 the last eh insn. */ | |
943 add_cfi_args_size (args_size); | |
944 } | |
945 } | |
946 | |
947 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */ | |
948 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is | |
949 used in places where rtl is prohibited. */ | |
950 | |
951 static inline unsigned | |
952 dwf_regno (const_rtx reg) | |
953 { | |
954 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER); | |
955 return DWARF_FRAME_REGNUM (REGNO (reg)); | |
956 } | |
957 | |
958 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */ | |
959 | |
960 static bool | |
961 compare_reg_or_pc (rtx x, rtx y) | |
962 { | |
963 if (REG_P (x) && REG_P (y)) | |
964 return REGNO (x) == REGNO (y); | |
965 return x == y; | |
966 } | |
967 | |
968 /* Record SRC as being saved in DEST. DEST may be null to delete an | |
969 existing entry. SRC may be a register or PC_RTX. */ | |
970 | |
971 static void | |
972 record_reg_saved_in_reg (rtx dest, rtx src) | |
973 { | |
974 reg_saved_in_data *elt; | |
975 size_t i; | |
976 | |
977 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt) | |
978 if (compare_reg_or_pc (elt->orig_reg, src)) | |
979 { | |
980 if (dest == NULL) | |
981 cur_trace->regs_saved_in_regs.unordered_remove (i); | |
982 else | |
983 elt->saved_in_reg = dest; | |
984 return; | |
985 } | |
986 | |
987 if (dest == NULL) | |
988 return; | |
989 | |
990 reg_saved_in_data e = {src, dest}; | |
991 cur_trace->regs_saved_in_regs.safe_push (e); | |
992 } | |
993 | |
994 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at | |
995 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */ | |
996 | |
997 static void | |
998 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset) | |
999 { | |
1000 queued_reg_save *q; | |
1001 queued_reg_save e = {reg, sreg, offset}; | |
1002 size_t i; | |
1003 | |
1004 /* Duplicates waste space, but it's also necessary to remove them | |
1005 for correctness, since the queue gets output in reverse order. */ | |
1006 FOR_EACH_VEC_ELT (queued_reg_saves, i, q) | |
1007 if (compare_reg_or_pc (q->reg, reg)) | |
1008 { | |
1009 *q = e; | |
1010 return; | |
1011 } | |
1012 | |
1013 queued_reg_saves.safe_push (e); | |
1014 } | |
1015 | |
1016 /* Output all the entries in QUEUED_REG_SAVES. */ | |
1017 | |
1018 static void | |
1019 dwarf2out_flush_queued_reg_saves (void) | |
1020 { | |
1021 queued_reg_save *q; | |
1022 size_t i; | |
1023 | |
1024 FOR_EACH_VEC_ELT (queued_reg_saves, i, q) | |
1025 { | |
1026 unsigned int reg, sreg; | |
1027 | |
1028 record_reg_saved_in_reg (q->saved_reg, q->reg); | |
1029 | |
1030 if (q->reg == pc_rtx) | |
1031 reg = DWARF_FRAME_RETURN_COLUMN; | |
1032 else | |
1033 reg = dwf_regno (q->reg); | |
1034 if (q->saved_reg) | |
1035 sreg = dwf_regno (q->saved_reg); | |
1036 else | |
1037 sreg = INVALID_REGNUM; | |
1038 reg_save (reg, sreg, q->cfa_offset); | |
1039 } | |
1040 | |
1041 queued_reg_saves.truncate (0); | |
1042 } | |
1043 | |
1044 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved | |
1045 location for? Or, does it clobber a register which we've previously | |
1046 said that some other register is saved in, and for which we now | |
1047 have a new location for? */ | |
1048 | |
1049 static bool | |
1050 clobbers_queued_reg_save (const_rtx insn) | |
1051 { | |
1052 queued_reg_save *q; | |
1053 size_t iq; | |
1054 | |
1055 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q) | |
1056 { | |
1057 size_t ir; | |
1058 reg_saved_in_data *rir; | |
1059 | |
1060 if (modified_in_p (q->reg, insn)) | |
1061 return true; | |
1062 | |
1063 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir) | |
1064 if (compare_reg_or_pc (q->reg, rir->orig_reg) | |
1065 && modified_in_p (rir->saved_in_reg, insn)) | |
1066 return true; | |
1067 } | |
1068 | |
1069 return false; | |
1070 } | |
1071 | |
1072 /* What register, if any, is currently saved in REG? */ | |
1073 | |
1074 static rtx | |
1075 reg_saved_in (rtx reg) | |
1076 { | |
1077 unsigned int regn = REGNO (reg); | |
1078 queued_reg_save *q; | |
1079 reg_saved_in_data *rir; | |
1080 size_t i; | |
1081 | |
1082 FOR_EACH_VEC_ELT (queued_reg_saves, i, q) | |
1083 if (q->saved_reg && regn == REGNO (q->saved_reg)) | |
1084 return q->reg; | |
1085 | |
1086 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir) | |
1087 if (regn == REGNO (rir->saved_in_reg)) | |
1088 return rir->orig_reg; | |
1089 | |
1090 return NULL_RTX; | |
1091 } | |
1092 | |
1093 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */ | |
1094 | |
1095 static void | |
1096 dwarf2out_frame_debug_def_cfa (rtx pat) | |
1097 { | |
1098 memset (cur_cfa, 0, sizeof (*cur_cfa)); | |
1099 | |
1100 if (GET_CODE (pat) == PLUS) | |
1101 { | |
1102 cur_cfa->offset = INTVAL (XEXP (pat, 1)); | |
1103 pat = XEXP (pat, 0); | |
1104 } | |
1105 if (MEM_P (pat)) | |
1106 { | |
1107 cur_cfa->indirect = 1; | |
1108 pat = XEXP (pat, 0); | |
1109 if (GET_CODE (pat) == PLUS) | |
1110 { | |
1111 cur_cfa->base_offset = INTVAL (XEXP (pat, 1)); | |
1112 pat = XEXP (pat, 0); | |
1113 } | |
1114 } | |
1115 /* ??? If this fails, we could be calling into the _loc functions to | |
1116 define a full expression. So far no port does that. */ | |
1117 gcc_assert (REG_P (pat)); | |
1118 cur_cfa->reg = dwf_regno (pat); | |
1119 } | |
1120 | |
1121 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */ | |
1122 | |
1123 static void | |
1124 dwarf2out_frame_debug_adjust_cfa (rtx pat) | |
1125 { | |
1126 rtx src, dest; | |
1127 | |
1128 gcc_assert (GET_CODE (pat) == SET); | |
1129 dest = XEXP (pat, 0); | |
1130 src = XEXP (pat, 1); | |
1131 | |
1132 switch (GET_CODE (src)) | |
1133 { | |
1134 case PLUS: | |
1135 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg); | |
1136 cur_cfa->offset -= INTVAL (XEXP (src, 1)); | |
1137 break; | |
1138 | |
1139 case REG: | |
1140 break; | |
1141 | |
1142 default: | |
1143 gcc_unreachable (); | |
1144 } | |
1145 | |
1146 cur_cfa->reg = dwf_regno (dest); | |
1147 gcc_assert (cur_cfa->indirect == 0); | |
1148 } | |
1149 | |
1150 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */ | |
1151 | |
1152 static void | |
1153 dwarf2out_frame_debug_cfa_offset (rtx set) | |
1154 { | |
1155 HOST_WIDE_INT offset; | |
1156 rtx src, addr, span; | |
1157 unsigned int sregno; | |
1158 | |
1159 src = XEXP (set, 1); | |
1160 addr = XEXP (set, 0); | |
1161 gcc_assert (MEM_P (addr)); | |
1162 addr = XEXP (addr, 0); | |
1163 | |
1164 /* As documented, only consider extremely simple addresses. */ | |
1165 switch (GET_CODE (addr)) | |
1166 { | |
1167 case REG: | |
1168 gcc_assert (dwf_regno (addr) == cur_cfa->reg); | |
1169 offset = -cur_cfa->offset; | |
1170 break; | |
1171 case PLUS: | |
1172 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg); | |
1173 offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset; | |
1174 break; | |
1175 default: | |
1176 gcc_unreachable (); | |
1177 } | |
1178 | |
1179 if (src == pc_rtx) | |
1180 { | |
1181 span = NULL; | |
1182 sregno = DWARF_FRAME_RETURN_COLUMN; | |
1183 } | |
1184 else | |
1185 { | |
1186 span = targetm.dwarf_register_span (src); | |
1187 sregno = dwf_regno (src); | |
1188 } | |
1189 | |
1190 /* ??? We'd like to use queue_reg_save, but we need to come up with | |
1191 a different flushing heuristic for epilogues. */ | |
1192 if (!span) | |
1193 reg_save (sregno, INVALID_REGNUM, offset); | |
1194 else | |
1195 { | |
1196 /* We have a PARALLEL describing where the contents of SRC live. | |
1197 Adjust the offset for each piece of the PARALLEL. */ | |
1198 HOST_WIDE_INT span_offset = offset; | |
1199 | |
1200 gcc_assert (GET_CODE (span) == PARALLEL); | |
1201 | |
1202 const int par_len = XVECLEN (span, 0); | |
1203 for (int par_index = 0; par_index < par_len; par_index++) | |
1204 { | |
1205 rtx elem = XVECEXP (span, 0, par_index); | |
1206 sregno = dwf_regno (src); | |
1207 reg_save (sregno, INVALID_REGNUM, span_offset); | |
1208 span_offset += GET_MODE_SIZE (GET_MODE (elem)); | |
1209 } | |
1210 } | |
1211 } | |
1212 | |
1213 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */ | |
1214 | |
1215 static void | |
1216 dwarf2out_frame_debug_cfa_register (rtx set) | |
1217 { | |
1218 rtx src, dest; | |
1219 unsigned sregno, dregno; | |
1220 | |
1221 src = XEXP (set, 1); | |
1222 dest = XEXP (set, 0); | |
1223 | |
1224 record_reg_saved_in_reg (dest, src); | |
1225 if (src == pc_rtx) | |
1226 sregno = DWARF_FRAME_RETURN_COLUMN; | |
1227 else | |
1228 sregno = dwf_regno (src); | |
1229 | |
1230 dregno = dwf_regno (dest); | |
1231 | |
1232 /* ??? We'd like to use queue_reg_save, but we need to come up with | |
1233 a different flushing heuristic for epilogues. */ | |
1234 reg_save (sregno, dregno, 0); | |
1235 } | |
1236 | |
1237 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */ | |
1238 | |
1239 static void | |
1240 dwarf2out_frame_debug_cfa_expression (rtx set) | |
1241 { | |
1242 rtx src, dest, span; | |
1243 dw_cfi_ref cfi = new_cfi (); | |
1244 unsigned regno; | |
1245 | |
1246 dest = SET_DEST (set); | |
1247 src = SET_SRC (set); | |
1248 | |
1249 gcc_assert (REG_P (src)); | |
1250 gcc_assert (MEM_P (dest)); | |
1251 | |
1252 span = targetm.dwarf_register_span (src); | |
1253 gcc_assert (!span); | |
1254 | |
1255 regno = dwf_regno (src); | |
1256 | |
1257 cfi->dw_cfi_opc = DW_CFA_expression; | |
1258 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno; | |
1259 cfi->dw_cfi_oprnd2.dw_cfi_loc | |
1260 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest), | |
1261 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED); | |
1262 | |
1263 /* ??? We'd like to use queue_reg_save, were the interface different, | |
1264 and, as above, we could manage flushing for epilogues. */ | |
1265 add_cfi (cfi); | |
1266 update_row_reg_save (cur_row, regno, cfi); | |
1267 } | |
1268 | |
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION | |
1270 note. */ | |
1271 | |
1272 static void | |
1273 dwarf2out_frame_debug_cfa_val_expression (rtx set) | |
1274 { | |
1275 rtx dest = SET_DEST (set); | |
1276 gcc_assert (REG_P (dest)); | |
1277 | |
1278 rtx span = targetm.dwarf_register_span (dest); | |
1279 gcc_assert (!span); | |
1280 | |
1281 rtx src = SET_SRC (set); | |
1282 dw_cfi_ref cfi = new_cfi (); | |
1283 cfi->dw_cfi_opc = DW_CFA_val_expression; | |
1284 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest); | |
1285 cfi->dw_cfi_oprnd2.dw_cfi_loc | |
1286 = mem_loc_descriptor (src, GET_MODE (src), | |
1287 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED); | |
1288 add_cfi (cfi); | |
1289 update_row_reg_save (cur_row, dwf_regno (dest), cfi); | |
1290 } | |
1291 | |
1292 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */ | |
1293 | |
1294 static void | |
1295 dwarf2out_frame_debug_cfa_restore (rtx reg) | |
1296 { | |
1297 gcc_assert (REG_P (reg)); | |
1298 | |
1299 rtx span = targetm.dwarf_register_span (reg); | |
1300 if (!span) | |
1301 { | |
1302 unsigned int regno = dwf_regno (reg); | |
1303 add_cfi_restore (regno); | |
1304 update_row_reg_save (cur_row, regno, NULL); | |
1305 } | |
1306 else | |
1307 { | |
1308 /* We have a PARALLEL describing where the contents of REG live. | |
1309 Restore the register for each piece of the PARALLEL. */ | |
1310 gcc_assert (GET_CODE (span) == PARALLEL); | |
1311 | |
1312 const int par_len = XVECLEN (span, 0); | |
1313 for (int par_index = 0; par_index < par_len; par_index++) | |
1314 { | |
1315 reg = XVECEXP (span, 0, par_index); | |
1316 gcc_assert (REG_P (reg)); | |
1317 unsigned int regno = dwf_regno (reg); | |
1318 add_cfi_restore (regno); | |
1319 update_row_reg_save (cur_row, regno, NULL); | |
1320 } | |
1321 } | |
1322 } | |
1323 | |
1324 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE. | |
1325 ??? Perhaps we should note in the CIE where windows are saved (instead of | |
1326 assuming 0(cfa)) and what registers are in the window. */ | |
1327 | |
1328 static void | |
1329 dwarf2out_frame_debug_cfa_window_save (void) | |
1330 { | |
1331 dw_cfi_ref cfi = new_cfi (); | |
1332 | |
1333 cfi->dw_cfi_opc = DW_CFA_GNU_window_save; | |
1334 add_cfi (cfi); | |
1335 } | |
1336 | |
1337 /* Record call frame debugging information for an expression EXPR, | |
1338 which either sets SP or FP (adjusting how we calculate the frame | |
1339 address) or saves a register to the stack or another register. | |
1340 LABEL indicates the address of EXPR. | |
1341 | |
1342 This function encodes a state machine mapping rtxes to actions on | |
1343 cfa, cfa_store, and cfa_temp.reg. We describe these rules so | |
1344 users need not read the source code. | |
1345 | |
1346 The High-Level Picture | |
1347 | |
1348 Changes in the register we use to calculate the CFA: Currently we | |
1349 assume that if you copy the CFA register into another register, we | |
1350 should take the other one as the new CFA register; this seems to | |
1351 work pretty well. If it's wrong for some target, it's simple | |
1352 enough not to set RTX_FRAME_RELATED_P on the insn in question. | |
1353 | |
1354 Changes in the register we use for saving registers to the stack: | |
1355 This is usually SP, but not always. Again, we deduce that if you | |
1356 copy SP into another register (and SP is not the CFA register), | |
1357 then the new register is the one we will be using for register | |
1358 saves. This also seems to work. | |
1359 | |
1360 Register saves: There's not much guesswork about this one; if | |
1361 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a | |
1362 register save, and the register used to calculate the destination | |
1363 had better be the one we think we're using for this purpose. | |
1364 It's also assumed that a copy from a call-saved register to another | |
1365 register is saving that register if RTX_FRAME_RELATED_P is set on | |
1366 that instruction. If the copy is from a call-saved register to | |
1367 the *same* register, that means that the register is now the same | |
1368 value as in the caller. | |
1369 | |
1370 Except: If the register being saved is the CFA register, and the | |
1371 offset is nonzero, we are saving the CFA, so we assume we have to | |
1372 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that | |
1373 the intent is to save the value of SP from the previous frame. | |
1374 | |
1375 In addition, if a register has previously been saved to a different | |
1376 register, | |
1377 | |
1378 Invariants / Summaries of Rules | |
1379 | |
1380 cfa current rule for calculating the CFA. It usually | |
1381 consists of a register and an offset. This is | |
1382 actually stored in *cur_cfa, but abbreviated | |
1383 for the purposes of this documentation. | |
1384 cfa_store register used by prologue code to save things to the stack | |
1385 cfa_store.offset is the offset from the value of | |
1386 cfa_store.reg to the actual CFA | |
1387 cfa_temp register holding an integral value. cfa_temp.offset | |
1388 stores the value, which will be used to adjust the | |
1389 stack pointer. cfa_temp is also used like cfa_store, | |
1390 to track stores to the stack via fp or a temp reg. | |
1391 | |
1392 Rules 1- 4: Setting a register's value to cfa.reg or an expression | |
1393 with cfa.reg as the first operand changes the cfa.reg and its | |
1394 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and | |
1395 cfa_temp.offset. | |
1396 | |
1397 Rules 6- 9: Set a non-cfa.reg register value to a constant or an | |
1398 expression yielding a constant. This sets cfa_temp.reg | |
1399 and cfa_temp.offset. | |
1400 | |
1401 Rule 5: Create a new register cfa_store used to save items to the | |
1402 stack. | |
1403 | |
1404 Rules 10-14: Save a register to the stack. Define offset as the | |
1405 difference of the original location and cfa_store's | |
1406 location (or cfa_temp's location if cfa_temp is used). | |
1407 | |
1408 Rules 16-20: If AND operation happens on sp in prologue, we assume | |
1409 stack is realigned. We will use a group of DW_OP_XXX | |
1410 expressions to represent the location of the stored | |
1411 register instead of CFA+offset. | |
1412 | |
1413 The Rules | |
1414 | |
1415 "{a,b}" indicates a choice of a xor b. | |
1416 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg. | |
1417 | |
1418 Rule 1: | |
1419 (set <reg1> <reg2>:cfa.reg) | |
1420 effects: cfa.reg = <reg1> | |
1421 cfa.offset unchanged | |
1422 cfa_temp.reg = <reg1> | |
1423 cfa_temp.offset = cfa.offset | |
1424 | |
1425 Rule 2: | |
1426 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg | |
1427 {<const_int>,<reg>:cfa_temp.reg})) | |
1428 effects: cfa.reg = sp if fp used | |
1429 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp | |
1430 cfa_store.offset += {+/- <const_int>, cfa_temp.offset} | |
1431 if cfa_store.reg==sp | |
1432 | |
1433 Rule 3: | |
1434 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>)) | |
1435 effects: cfa.reg = fp | |
1436 cfa_offset += +/- <const_int> | |
1437 | |
1438 Rule 4: | |
1439 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>)) | |
1440 constraints: <reg1> != fp | |
1441 <reg1> != sp | |
1442 effects: cfa.reg = <reg1> | |
1443 cfa_temp.reg = <reg1> | |
1444 cfa_temp.offset = cfa.offset | |
1445 | |
1446 Rule 5: | |
1447 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg)) | |
1448 constraints: <reg1> != fp | |
1449 <reg1> != sp | |
1450 effects: cfa_store.reg = <reg1> | |
1451 cfa_store.offset = cfa.offset - cfa_temp.offset | |
1452 | |
1453 Rule 6: | |
1454 (set <reg> <const_int>) | |
1455 effects: cfa_temp.reg = <reg> | |
1456 cfa_temp.offset = <const_int> | |
1457 | |
1458 Rule 7: | |
1459 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>)) | |
1460 effects: cfa_temp.reg = <reg1> | |
1461 cfa_temp.offset |= <const_int> | |
1462 | |
1463 Rule 8: | |
1464 (set <reg> (high <exp>)) | |
1465 effects: none | |
1466 | |
1467 Rule 9: | |
1468 (set <reg> (lo_sum <exp> <const_int>)) | |
1469 effects: cfa_temp.reg = <reg> | |
1470 cfa_temp.offset = <const_int> | |
1471 | |
1472 Rule 10: | |
1473 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>) | |
1474 effects: cfa_store.offset -= <const_int> | |
1475 cfa.offset = cfa_store.offset if cfa.reg == sp | |
1476 cfa.reg = sp | |
1477 cfa.base_offset = -cfa_store.offset | |
1478 | |
1479 Rule 11: | |
1480 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>) | |
1481 effects: cfa_store.offset += -/+ mode_size(mem) | |
1482 cfa.offset = cfa_store.offset if cfa.reg == sp | |
1483 cfa.reg = sp | |
1484 cfa.base_offset = -cfa_store.offset | |
1485 | |
1486 Rule 12: | |
1487 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>)) | |
1488 | |
1489 <reg2>) | |
1490 effects: cfa.reg = <reg1> | |
1491 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset | |
1492 | |
1493 Rule 13: | |
1494 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>) | |
1495 effects: cfa.reg = <reg1> | |
1496 cfa.base_offset = -{cfa_store,cfa_temp}.offset | |
1497 | |
1498 Rule 14: | |
1499 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>) | |
1500 effects: cfa.reg = <reg1> | |
1501 cfa.base_offset = -cfa_temp.offset | |
1502 cfa_temp.offset -= mode_size(mem) | |
1503 | |
1504 Rule 15: | |
1505 (set <reg> {unspec, unspec_volatile}) | |
1506 effects: target-dependent | |
1507 | |
1508 Rule 16: | |
1509 (set sp (and: sp <const_int>)) | |
1510 constraints: cfa_store.reg == sp | |
1511 effects: cfun->fde.stack_realign = 1 | |
1512 cfa_store.offset = 0 | |
1513 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp | |
1514 | |
1515 Rule 17: | |
1516 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int)))) | |
1517 effects: cfa_store.offset += -/+ mode_size(mem) | |
1518 | |
1519 Rule 18: | |
1520 (set (mem ({pre_inc, pre_dec} sp)) fp) | |
1521 constraints: fde->stack_realign == 1 | |
1522 effects: cfa_store.offset = 0 | |
1523 cfa.reg != HARD_FRAME_POINTER_REGNUM | |
1524 | |
1525 Rule 19: | |
1526 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg) | |
1527 constraints: fde->stack_realign == 1 | |
1528 && cfa.offset == 0 | |
1529 && cfa.indirect == 0 | |
1530 && cfa.reg != HARD_FRAME_POINTER_REGNUM | |
1531 effects: Use DW_CFA_def_cfa_expression to define cfa | |
1532 cfa.reg == fde->drap_reg */ | |
1533 | |
1534 static void | |
1535 dwarf2out_frame_debug_expr (rtx expr) | |
1536 { | |
1537 rtx src, dest, span; | |
1538 HOST_WIDE_INT offset; | |
1539 dw_fde_ref fde; | |
1540 | |
1541 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of | |
1542 the PARALLEL independently. The first element is always processed if | |
1543 it is a SET. This is for backward compatibility. Other elements | |
1544 are processed only if they are SETs and the RTX_FRAME_RELATED_P | |
1545 flag is set in them. */ | |
1546 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE) | |
1547 { | |
1548 int par_index; | |
1549 int limit = XVECLEN (expr, 0); | |
1550 rtx elem; | |
1551 | |
1552 /* PARALLELs have strict read-modify-write semantics, so we | |
1553 ought to evaluate every rvalue before changing any lvalue. | |
1554 It's cumbersome to do that in general, but there's an | |
1555 easy approximation that is enough for all current users: | |
1556 handle register saves before register assignments. */ | |
1557 if (GET_CODE (expr) == PARALLEL) | |
1558 for (par_index = 0; par_index < limit; par_index++) | |
1559 { | |
1560 elem = XVECEXP (expr, 0, par_index); | |
1561 if (GET_CODE (elem) == SET | |
1562 && MEM_P (SET_DEST (elem)) | |
1563 && (RTX_FRAME_RELATED_P (elem) || par_index == 0)) | |
1564 dwarf2out_frame_debug_expr (elem); | |
1565 } | |
1566 | |
1567 for (par_index = 0; par_index < limit; par_index++) | |
1568 { | |
1569 elem = XVECEXP (expr, 0, par_index); | |
1570 if (GET_CODE (elem) == SET | |
1571 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE) | |
1572 && (RTX_FRAME_RELATED_P (elem) || par_index == 0)) | |
1573 dwarf2out_frame_debug_expr (elem); | |
1574 } | |
1575 return; | |
1576 } | |
1577 | |
1578 gcc_assert (GET_CODE (expr) == SET); | |
1579 | |
1580 src = SET_SRC (expr); | |
1581 dest = SET_DEST (expr); | |
1582 | |
1583 if (REG_P (src)) | |
1584 { | |
1585 rtx rsi = reg_saved_in (src); | |
1586 if (rsi) | |
1587 src = rsi; | |
1588 } | |
1589 | |
1590 fde = cfun->fde; | |
1591 | |
1592 switch (GET_CODE (dest)) | |
1593 { | |
1594 case REG: | |
1595 switch (GET_CODE (src)) | |
1596 { | |
1597 /* Setting FP from SP. */ | |
1598 case REG: | |
1599 if (cur_cfa->reg == dwf_regno (src)) | |
1600 { | |
1601 /* Rule 1 */ | |
1602 /* Update the CFA rule wrt SP or FP. Make sure src is | |
1603 relative to the current CFA register. | |
1604 | |
1605 We used to require that dest be either SP or FP, but the | |
1606 ARM copies SP to a temporary register, and from there to | |
1607 FP. So we just rely on the backends to only set | |
1608 RTX_FRAME_RELATED_P on appropriate insns. */ | |
1609 cur_cfa->reg = dwf_regno (dest); | |
1610 cur_trace->cfa_temp.reg = cur_cfa->reg; | |
1611 cur_trace->cfa_temp.offset = cur_cfa->offset; | |
1612 } | |
1613 else | |
1614 { | |
1615 /* Saving a register in a register. */ | |
1616 gcc_assert (!fixed_regs [REGNO (dest)] | |
1617 /* For the SPARC and its register window. */ | |
1618 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN)); | |
1619 | |
1620 /* After stack is aligned, we can only save SP in FP | |
1621 if drap register is used. In this case, we have | |
1622 to restore stack pointer with the CFA value and we | |
1623 don't generate this DWARF information. */ | |
1624 if (fde | |
1625 && fde->stack_realign | |
1626 && REGNO (src) == STACK_POINTER_REGNUM) | |
1627 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM | |
1628 && fde->drap_reg != INVALID_REGNUM | |
1629 && cur_cfa->reg != dwf_regno (src)); | |
1630 else | |
1631 queue_reg_save (src, dest, 0); | |
1632 } | |
1633 break; | |
1634 | |
1635 case PLUS: | |
1636 case MINUS: | |
1637 case LO_SUM: | |
1638 if (dest == stack_pointer_rtx) | |
1639 { | |
1640 /* Rule 2 */ | |
1641 /* Adjusting SP. */ | |
1642 switch (GET_CODE (XEXP (src, 1))) | |
1643 { | |
1644 case CONST_INT: | |
1645 offset = INTVAL (XEXP (src, 1)); | |
1646 break; | |
1647 case REG: | |
1648 gcc_assert (dwf_regno (XEXP (src, 1)) | |
1649 == cur_trace->cfa_temp.reg); | |
1650 offset = cur_trace->cfa_temp.offset; | |
1651 break; | |
1652 default: | |
1653 gcc_unreachable (); | |
1654 } | |
1655 | |
1656 if (XEXP (src, 0) == hard_frame_pointer_rtx) | |
1657 { | |
1658 /* Restoring SP from FP in the epilogue. */ | |
1659 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum); | |
1660 cur_cfa->reg = dw_stack_pointer_regnum; | |
1661 } | |
1662 else if (GET_CODE (src) == LO_SUM) | |
1663 /* Assume we've set the source reg of the LO_SUM from sp. */ | |
1664 ; | |
1665 else | |
1666 gcc_assert (XEXP (src, 0) == stack_pointer_rtx); | |
1667 | |
1668 if (GET_CODE (src) != MINUS) | |
1669 offset = -offset; | |
1670 if (cur_cfa->reg == dw_stack_pointer_regnum) | |
1671 cur_cfa->offset += offset; | |
1672 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum) | |
1673 cur_trace->cfa_store.offset += offset; | |
1674 } | |
1675 else if (dest == hard_frame_pointer_rtx) | |
1676 { | |
1677 /* Rule 3 */ | |
1678 /* Either setting the FP from an offset of the SP, | |
1679 or adjusting the FP */ | |
1680 gcc_assert (frame_pointer_needed); | |
1681 | |
1682 gcc_assert (REG_P (XEXP (src, 0)) | |
1683 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg | |
1684 && CONST_INT_P (XEXP (src, 1))); | |
1685 offset = INTVAL (XEXP (src, 1)); | |
1686 if (GET_CODE (src) != MINUS) | |
1687 offset = -offset; | |
1688 cur_cfa->offset += offset; | |
1689 cur_cfa->reg = dw_frame_pointer_regnum; | |
1690 } | |
1691 else | |
1692 { | |
1693 gcc_assert (GET_CODE (src) != MINUS); | |
1694 | |
1695 /* Rule 4 */ | |
1696 if (REG_P (XEXP (src, 0)) | |
1697 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg | |
1698 && CONST_INT_P (XEXP (src, 1))) | |
1699 { | |
1700 /* Setting a temporary CFA register that will be copied | |
1701 into the FP later on. */ | |
1702 offset = - INTVAL (XEXP (src, 1)); | |
1703 cur_cfa->offset += offset; | |
1704 cur_cfa->reg = dwf_regno (dest); | |
1705 /* Or used to save regs to the stack. */ | |
1706 cur_trace->cfa_temp.reg = cur_cfa->reg; | |
1707 cur_trace->cfa_temp.offset = cur_cfa->offset; | |
1708 } | |
1709 | |
1710 /* Rule 5 */ | |
1711 else if (REG_P (XEXP (src, 0)) | |
1712 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg | |
1713 && XEXP (src, 1) == stack_pointer_rtx) | |
1714 { | |
1715 /* Setting a scratch register that we will use instead | |
1716 of SP for saving registers to the stack. */ | |
1717 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum); | |
1718 cur_trace->cfa_store.reg = dwf_regno (dest); | |
1719 cur_trace->cfa_store.offset | |
1720 = cur_cfa->offset - cur_trace->cfa_temp.offset; | |
1721 } | |
1722 | |
1723 /* Rule 9 */ | |
1724 else if (GET_CODE (src) == LO_SUM | |
1725 && CONST_INT_P (XEXP (src, 1))) | |
1726 { | |
1727 cur_trace->cfa_temp.reg = dwf_regno (dest); | |
1728 cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1)); | |
1729 } | |
1730 else | |
1731 gcc_unreachable (); | |
1732 } | |
1733 break; | |
1734 | |
1735 /* Rule 6 */ | |
1736 case CONST_INT: | |
1737 cur_trace->cfa_temp.reg = dwf_regno (dest); | |
1738 cur_trace->cfa_temp.offset = INTVAL (src); | |
1739 break; | |
1740 | |
1741 /* Rule 7 */ | |
1742 case IOR: | |
1743 gcc_assert (REG_P (XEXP (src, 0)) | |
1744 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg | |
1745 && CONST_INT_P (XEXP (src, 1))); | |
1746 | |
1747 cur_trace->cfa_temp.reg = dwf_regno (dest); | |
1748 cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1)); | |
1749 break; | |
1750 | |
1751 /* Skip over HIGH, assuming it will be followed by a LO_SUM, | |
1752 which will fill in all of the bits. */ | |
1753 /* Rule 8 */ | |
1754 case HIGH: | |
1755 break; | |
1756 | |
1757 /* Rule 15 */ | |
1758 case UNSPEC: | |
1759 case UNSPEC_VOLATILE: | |
1760 /* All unspecs should be represented by REG_CFA_* notes. */ | |
1761 gcc_unreachable (); | |
1762 return; | |
1763 | |
1764 /* Rule 16 */ | |
1765 case AND: | |
1766 /* If this AND operation happens on stack pointer in prologue, | |
1767 we assume the stack is realigned and we extract the | |
1768 alignment. */ | |
1769 if (fde && XEXP (src, 0) == stack_pointer_rtx) | |
1770 { | |
1771 /* We interpret reg_save differently with stack_realign set. | |
1772 Thus we must flush whatever we have queued first. */ | |
1773 dwarf2out_flush_queued_reg_saves (); | |
1774 | |
1775 gcc_assert (cur_trace->cfa_store.reg | |
1776 == dwf_regno (XEXP (src, 0))); | |
1777 fde->stack_realign = 1; | |
1778 fde->stack_realignment = INTVAL (XEXP (src, 1)); | |
1779 cur_trace->cfa_store.offset = 0; | |
1780 | |
1781 if (cur_cfa->reg != dw_stack_pointer_regnum | |
1782 && cur_cfa->reg != dw_frame_pointer_regnum) | |
1783 fde->drap_reg = cur_cfa->reg; | |
1784 } | |
1785 return; | |
1786 | |
1787 default: | |
1788 gcc_unreachable (); | |
1789 } | |
1790 break; | |
1791 | |
1792 case MEM: | |
1793 | |
1794 /* Saving a register to the stack. Make sure dest is relative to the | |
1795 CFA register. */ | |
1796 switch (GET_CODE (XEXP (dest, 0))) | |
1797 { | |
1798 /* Rule 10 */ | |
1799 /* With a push. */ | |
1800 case PRE_MODIFY: | |
1801 case POST_MODIFY: | |
1802 /* We can't handle variable size modifications. */ | |
1803 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1)) | |
1804 == CONST_INT); | |
1805 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1)); | |
1806 | |
1807 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM | |
1808 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum); | |
1809 | |
1810 cur_trace->cfa_store.offset += offset; | |
1811 if (cur_cfa->reg == dw_stack_pointer_regnum) | |
1812 cur_cfa->offset = cur_trace->cfa_store.offset; | |
1813 | |
1814 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY) | |
1815 offset -= cur_trace->cfa_store.offset; | |
1816 else | |
1817 offset = -cur_trace->cfa_store.offset; | |
1818 break; | |
1819 | |
1820 /* Rule 11 */ | |
1821 case PRE_INC: | |
1822 case PRE_DEC: | |
1823 case POST_DEC: | |
1824 offset = GET_MODE_SIZE (GET_MODE (dest)); | |
1825 if (GET_CODE (XEXP (dest, 0)) == PRE_INC) | |
1826 offset = -offset; | |
1827 | |
1828 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0)) | |
1829 == STACK_POINTER_REGNUM) | |
1830 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum); | |
1831 | |
1832 cur_trace->cfa_store.offset += offset; | |
1833 | |
1834 /* Rule 18: If stack is aligned, we will use FP as a | |
1835 reference to represent the address of the stored | |
1836 regiser. */ | |
1837 if (fde | |
1838 && fde->stack_realign | |
1839 && REG_P (src) | |
1840 && REGNO (src) == HARD_FRAME_POINTER_REGNUM) | |
1841 { | |
1842 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum); | |
1843 cur_trace->cfa_store.offset = 0; | |
1844 } | |
1845 | |
1846 if (cur_cfa->reg == dw_stack_pointer_regnum) | |
1847 cur_cfa->offset = cur_trace->cfa_store.offset; | |
1848 | |
1849 if (GET_CODE (XEXP (dest, 0)) == POST_DEC) | |
1850 offset += -cur_trace->cfa_store.offset; | |
1851 else | |
1852 offset = -cur_trace->cfa_store.offset; | |
1853 break; | |
1854 | |
1855 /* Rule 12 */ | |
1856 /* With an offset. */ | |
1857 case PLUS: | |
1858 case MINUS: | |
1859 case LO_SUM: | |
1860 { | |
1861 unsigned int regno; | |
1862 | |
1863 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1)) | |
1864 && REG_P (XEXP (XEXP (dest, 0), 0))); | |
1865 offset = INTVAL (XEXP (XEXP (dest, 0), 1)); | |
1866 if (GET_CODE (XEXP (dest, 0)) == MINUS) | |
1867 offset = -offset; | |
1868 | |
1869 regno = dwf_regno (XEXP (XEXP (dest, 0), 0)); | |
1870 | |
1871 if (cur_cfa->reg == regno) | |
1872 offset -= cur_cfa->offset; | |
1873 else if (cur_trace->cfa_store.reg == regno) | |
1874 offset -= cur_trace->cfa_store.offset; | |
1875 else | |
1876 { | |
1877 gcc_assert (cur_trace->cfa_temp.reg == regno); | |
1878 offset -= cur_trace->cfa_temp.offset; | |
1879 } | |
1880 } | |
1881 break; | |
1882 | |
1883 /* Rule 13 */ | |
1884 /* Without an offset. */ | |
1885 case REG: | |
1886 { | |
1887 unsigned int regno = dwf_regno (XEXP (dest, 0)); | |
1888 | |
1889 if (cur_cfa->reg == regno) | |
1890 offset = -cur_cfa->offset; | |
1891 else if (cur_trace->cfa_store.reg == regno) | |
1892 offset = -cur_trace->cfa_store.offset; | |
1893 else | |
1894 { | |
1895 gcc_assert (cur_trace->cfa_temp.reg == regno); | |
1896 offset = -cur_trace->cfa_temp.offset; | |
1897 } | |
1898 } | |
1899 break; | |
1900 | |
1901 /* Rule 14 */ | |
1902 case POST_INC: | |
1903 gcc_assert (cur_trace->cfa_temp.reg | |
1904 == dwf_regno (XEXP (XEXP (dest, 0), 0))); | |
1905 offset = -cur_trace->cfa_temp.offset; | |
1906 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest)); | |
1907 break; | |
1908 | |
1909 default: | |
1910 gcc_unreachable (); | |
1911 } | |
1912 | |
1913 /* Rule 17 */ | |
1914 /* If the source operand of this MEM operation is a memory, | |
1915 we only care how much stack grew. */ | |
1916 if (MEM_P (src)) | |
1917 break; | |
1918 | |
1919 if (REG_P (src) | |
1920 && REGNO (src) != STACK_POINTER_REGNUM | |
1921 && REGNO (src) != HARD_FRAME_POINTER_REGNUM | |
1922 && dwf_regno (src) == cur_cfa->reg) | |
1923 { | |
1924 /* We're storing the current CFA reg into the stack. */ | |
1925 | |
1926 if (cur_cfa->offset == 0) | |
1927 { | |
1928 /* Rule 19 */ | |
1929 /* If stack is aligned, putting CFA reg into stack means | |
1930 we can no longer use reg + offset to represent CFA. | |
1931 Here we use DW_CFA_def_cfa_expression instead. The | |
1932 result of this expression equals to the original CFA | |
1933 value. */ | |
1934 if (fde | |
1935 && fde->stack_realign | |
1936 && cur_cfa->indirect == 0 | |
1937 && cur_cfa->reg != dw_frame_pointer_regnum) | |
1938 { | |
1939 gcc_assert (fde->drap_reg == cur_cfa->reg); | |
1940 | |
1941 cur_cfa->indirect = 1; | |
1942 cur_cfa->reg = dw_frame_pointer_regnum; | |
1943 cur_cfa->base_offset = offset; | |
1944 cur_cfa->offset = 0; | |
1945 | |
1946 fde->drap_reg_saved = 1; | |
1947 break; | |
1948 } | |
1949 | |
1950 /* If the source register is exactly the CFA, assume | |
1951 we're saving SP like any other register; this happens | |
1952 on the ARM. */ | |
1953 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset); | |
1954 break; | |
1955 } | |
1956 else | |
1957 { | |
1958 /* Otherwise, we'll need to look in the stack to | |
1959 calculate the CFA. */ | |
1960 rtx x = XEXP (dest, 0); | |
1961 | |
1962 if (!REG_P (x)) | |
1963 x = XEXP (x, 0); | |
1964 gcc_assert (REG_P (x)); | |
1965 | |
1966 cur_cfa->reg = dwf_regno (x); | |
1967 cur_cfa->base_offset = offset; | |
1968 cur_cfa->indirect = 1; | |
1969 break; | |
1970 } | |
1971 } | |
1972 | |
1973 if (REG_P (src)) | |
1974 span = targetm.dwarf_register_span (src); | |
1975 else | |
1976 span = NULL; | |
1977 | |
1978 if (!span) | |
1979 queue_reg_save (src, NULL_RTX, offset); | |
1980 else | |
1981 { | |
1982 /* We have a PARALLEL describing where the contents of SRC live. | |
1983 Queue register saves for each piece of the PARALLEL. */ | |
1984 HOST_WIDE_INT span_offset = offset; | |
1985 | |
1986 gcc_assert (GET_CODE (span) == PARALLEL); | |
1987 | |
1988 const int par_len = XVECLEN (span, 0); | |
1989 for (int par_index = 0; par_index < par_len; par_index++) | |
1990 { | |
1991 rtx elem = XVECEXP (span, 0, par_index); | |
1992 queue_reg_save (elem, NULL_RTX, span_offset); | |
1993 span_offset += GET_MODE_SIZE (GET_MODE (elem)); | |
1994 } | |
1995 } | |
1996 break; | |
1997 | |
1998 default: | |
1999 gcc_unreachable (); | |
2000 } | |
2001 } | |
2002 | |
2003 /* Record call frame debugging information for INSN, which either sets | |
2004 SP or FP (adjusting how we calculate the frame address) or saves a | |
2005 register to the stack. */ | |
2006 | |
2007 static void | |
2008 dwarf2out_frame_debug (rtx_insn *insn) | |
2009 { | |
2010 rtx note, n, pat; | |
2011 bool handled_one = false; | |
2012 | |
2013 for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) | |
2014 switch (REG_NOTE_KIND (note)) | |
2015 { | |
2016 case REG_FRAME_RELATED_EXPR: | |
2017 pat = XEXP (note, 0); | |
2018 goto do_frame_expr; | |
2019 | |
2020 case REG_CFA_DEF_CFA: | |
2021 dwarf2out_frame_debug_def_cfa (XEXP (note, 0)); | |
2022 handled_one = true; | |
2023 break; | |
2024 | |
2025 case REG_CFA_ADJUST_CFA: | |
2026 n = XEXP (note, 0); | |
2027 if (n == NULL) | |
2028 { | |
2029 n = PATTERN (insn); | |
2030 if (GET_CODE (n) == PARALLEL) | |
2031 n = XVECEXP (n, 0, 0); | |
2032 } | |
2033 dwarf2out_frame_debug_adjust_cfa (n); | |
2034 handled_one = true; | |
2035 break; | |
2036 | |
2037 case REG_CFA_OFFSET: | |
2038 n = XEXP (note, 0); | |
2039 if (n == NULL) | |
2040 n = single_set (insn); | |
2041 dwarf2out_frame_debug_cfa_offset (n); | |
2042 handled_one = true; | |
2043 break; | |
2044 | |
2045 case REG_CFA_REGISTER: | |
2046 n = XEXP (note, 0); | |
2047 if (n == NULL) | |
2048 { | |
2049 n = PATTERN (insn); | |
2050 if (GET_CODE (n) == PARALLEL) | |
2051 n = XVECEXP (n, 0, 0); | |
2052 } | |
2053 dwarf2out_frame_debug_cfa_register (n); | |
2054 handled_one = true; | |
2055 break; | |
2056 | |
2057 case REG_CFA_EXPRESSION: | |
2058 case REG_CFA_VAL_EXPRESSION: | |
2059 n = XEXP (note, 0); | |
2060 if (n == NULL) | |
2061 n = single_set (insn); | |
2062 | |
2063 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION) | |
2064 dwarf2out_frame_debug_cfa_expression (n); | |
2065 else | |
2066 dwarf2out_frame_debug_cfa_val_expression (n); | |
2067 | |
2068 handled_one = true; | |
2069 break; | |
2070 | |
2071 case REG_CFA_RESTORE: | |
2072 n = XEXP (note, 0); | |
2073 if (n == NULL) | |
2074 { | |
2075 n = PATTERN (insn); | |
2076 if (GET_CODE (n) == PARALLEL) | |
2077 n = XVECEXP (n, 0, 0); | |
2078 n = XEXP (n, 0); | |
2079 } | |
2080 dwarf2out_frame_debug_cfa_restore (n); | |
2081 handled_one = true; | |
2082 break; | |
2083 | |
2084 case REG_CFA_SET_VDRAP: | |
2085 n = XEXP (note, 0); | |
2086 if (REG_P (n)) | |
2087 { | |
2088 dw_fde_ref fde = cfun->fde; | |
2089 if (fde) | |
2090 { | |
2091 gcc_assert (fde->vdrap_reg == INVALID_REGNUM); | |
2092 if (REG_P (n)) | |
2093 fde->vdrap_reg = dwf_regno (n); | |
2094 } | |
2095 } | |
2096 handled_one = true; | |
2097 break; | |
2098 | |
2099 case REG_CFA_TOGGLE_RA_MANGLE: | |
2100 case REG_CFA_WINDOW_SAVE: | |
2101 /* We overload both of these operations onto the same DWARF opcode. */ | |
2102 dwarf2out_frame_debug_cfa_window_save (); | |
2103 handled_one = true; | |
2104 break; | |
2105 | |
2106 case REG_CFA_FLUSH_QUEUE: | |
2107 /* The actual flush happens elsewhere. */ | |
2108 handled_one = true; | |
2109 break; | |
2110 | |
2111 default: | |
2112 break; | |
2113 } | |
2114 | |
2115 if (!handled_one) | |
2116 { | |
2117 pat = PATTERN (insn); | |
2118 do_frame_expr: | |
2119 dwarf2out_frame_debug_expr (pat); | |
2120 | |
2121 /* Check again. A parallel can save and update the same register. | |
2122 We could probably check just once, here, but this is safer than | |
2123 removing the check at the start of the function. */ | |
2124 if (clobbers_queued_reg_save (pat)) | |
2125 dwarf2out_flush_queued_reg_saves (); | |
2126 } | |
2127 } | |
2128 | |
2129 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */ | |
2130 | |
2131 static void | |
2132 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row) | |
2133 { | |
2134 size_t i, n_old, n_new, n_max; | |
2135 dw_cfi_ref cfi; | |
2136 | |
2137 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi)) | |
2138 add_cfi (new_row->cfa_cfi); | |
2139 else | |
2140 { | |
2141 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa); | |
2142 if (cfi) | |
2143 add_cfi (cfi); | |
2144 } | |
2145 | |
2146 n_old = vec_safe_length (old_row->reg_save); | |
2147 n_new = vec_safe_length (new_row->reg_save); | |
2148 n_max = MAX (n_old, n_new); | |
2149 | |
2150 for (i = 0; i < n_max; ++i) | |
2151 { | |
2152 dw_cfi_ref r_old = NULL, r_new = NULL; | |
2153 | |
2154 if (i < n_old) | |
2155 r_old = (*old_row->reg_save)[i]; | |
2156 if (i < n_new) | |
2157 r_new = (*new_row->reg_save)[i]; | |
2158 | |
2159 if (r_old == r_new) | |
2160 ; | |
2161 else if (r_new == NULL) | |
2162 add_cfi_restore (i); | |
2163 else if (!cfi_equal_p (r_old, r_new)) | |
2164 add_cfi (r_new); | |
2165 } | |
2166 } | |
2167 | |
2168 /* Examine CFI and return true if a cfi label and set_loc is needed | |
2169 beforehand. Even when generating CFI assembler instructions, we | |
2170 still have to add the cfi to the list so that lookup_cfa_1 works | |
2171 later on. When -g2 and above we even need to force emitting of | |
2172 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list | |
2173 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa | |
2174 and so don't use convert_cfa_to_fb_loc_list. */ | |
2175 | |
2176 static bool | |
2177 cfi_label_required_p (dw_cfi_ref cfi) | |
2178 { | |
2179 if (!dwarf2out_do_cfi_asm ()) | |
2180 return true; | |
2181 | |
2182 if (dwarf_version == 2 | |
2183 && debug_info_level > DINFO_LEVEL_TERSE | |
2184 && (write_symbols == DWARF2_DEBUG | |
2185 || write_symbols == VMS_AND_DWARF2_DEBUG)) | |
2186 { | |
2187 switch (cfi->dw_cfi_opc) | |
2188 { | |
2189 case DW_CFA_def_cfa_offset: | |
2190 case DW_CFA_def_cfa_offset_sf: | |
2191 case DW_CFA_def_cfa_register: | |
2192 case DW_CFA_def_cfa: | |
2193 case DW_CFA_def_cfa_sf: | |
2194 case DW_CFA_def_cfa_expression: | |
2195 case DW_CFA_restore_state: | |
2196 return true; | |
2197 default: | |
2198 return false; | |
2199 } | |
2200 } | |
2201 return false; | |
2202 } | |
2203 | |
2204 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the | |
2205 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as | |
2206 necessary. */ | |
2207 static void | |
2208 add_cfis_to_fde (void) | |
2209 { | |
2210 dw_fde_ref fde = cfun->fde; | |
2211 rtx_insn *insn, *next; | |
2212 | |
2213 for (insn = get_insns (); insn; insn = next) | |
2214 { | |
2215 next = NEXT_INSN (insn); | |
2216 | |
2217 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS) | |
2218 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi); | |
2219 | |
2220 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI) | |
2221 { | |
2222 bool required = cfi_label_required_p (NOTE_CFI (insn)); | |
2223 while (next) | |
2224 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI) | |
2225 { | |
2226 required |= cfi_label_required_p (NOTE_CFI (next)); | |
2227 next = NEXT_INSN (next); | |
2228 } | |
2229 else if (active_insn_p (next) | |
2230 || (NOTE_P (next) && (NOTE_KIND (next) | |
2231 == NOTE_INSN_SWITCH_TEXT_SECTIONS))) | |
2232 break; | |
2233 else | |
2234 next = NEXT_INSN (next); | |
2235 if (required) | |
2236 { | |
2237 int num = dwarf2out_cfi_label_num; | |
2238 const char *label = dwarf2out_cfi_label (); | |
2239 dw_cfi_ref xcfi; | |
2240 | |
2241 /* Set the location counter to the new label. */ | |
2242 xcfi = new_cfi (); | |
2243 xcfi->dw_cfi_opc = DW_CFA_advance_loc4; | |
2244 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label; | |
2245 vec_safe_push (fde->dw_fde_cfi, xcfi); | |
2246 | |
2247 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn); | |
2248 NOTE_LABEL_NUMBER (tmp) = num; | |
2249 } | |
2250 | |
2251 do | |
2252 { | |
2253 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI) | |
2254 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn)); | |
2255 insn = NEXT_INSN (insn); | |
2256 } | |
2257 while (insn != next); | |
2258 } | |
2259 } | |
2260 } | |
2261 | |
2262 static void dump_cfi_row (FILE *f, dw_cfi_row *row); | |
2263 | |
2264 /* If LABEL is the start of a trace, then initialize the state of that | |
2265 trace from CUR_TRACE and CUR_ROW. */ | |
2266 | |
2267 static void | |
2268 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin) | |
2269 { | |
2270 dw_trace_info *ti; | |
2271 HOST_WIDE_INT args_size; | |
2272 | |
2273 ti = get_trace_info (start); | |
2274 gcc_assert (ti != NULL); | |
2275 | |
2276 if (dump_file) | |
2277 { | |
2278 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n", | |
2279 cur_trace->id, ti->id, | |
2280 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"), | |
2281 (origin ? INSN_UID (origin) : 0)); | |
2282 } | |
2283 | |
2284 args_size = cur_trace->end_true_args_size; | |
2285 if (ti->beg_row == NULL) | |
2286 { | |
2287 /* This is the first time we've encountered this trace. Propagate | |
2288 state across the edge and push the trace onto the work list. */ | |
2289 ti->beg_row = copy_cfi_row (cur_row); | |
2290 ti->beg_true_args_size = args_size; | |
2291 | |
2292 ti->cfa_store = cur_trace->cfa_store; | |
2293 ti->cfa_temp = cur_trace->cfa_temp; | |
2294 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy (); | |
2295 | |
2296 trace_work_list.safe_push (ti); | |
2297 | |
2298 if (dump_file) | |
2299 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id); | |
2300 } | |
2301 else | |
2302 { | |
2303 | |
2304 /* We ought to have the same state incoming to a given trace no | |
2305 matter how we arrive at the trace. Anything else means we've | |
2306 got some kind of optimization error. */ | |
2307 #if CHECKING_P | |
2308 if (!cfi_row_equal_p (cur_row, ti->beg_row)) | |
2309 { | |
2310 if (dump_file) | |
2311 { | |
2312 fprintf (dump_file, "Inconsistent CFI state!\n"); | |
2313 fprintf (dump_file, "SHOULD have:\n"); | |
2314 dump_cfi_row (dump_file, ti->beg_row); | |
2315 fprintf (dump_file, "DO have:\n"); | |
2316 dump_cfi_row (dump_file, cur_row); | |
2317 } | |
2318 | |
2319 gcc_unreachable (); | |
2320 } | |
2321 #endif | |
2322 | |
2323 /* The args_size is allowed to conflict if it isn't actually used. */ | |
2324 if (ti->beg_true_args_size != args_size) | |
2325 ti->args_size_undefined = true; | |
2326 } | |
2327 } | |
2328 | |
2329 /* Similarly, but handle the args_size and CFA reset across EH | |
2330 and non-local goto edges. */ | |
2331 | |
2332 static void | |
2333 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin) | |
2334 { | |
2335 HOST_WIDE_INT save_args_size, delta; | |
2336 dw_cfa_location save_cfa; | |
2337 | |
2338 save_args_size = cur_trace->end_true_args_size; | |
2339 if (save_args_size == 0) | |
2340 { | |
2341 maybe_record_trace_start (start, origin); | |
2342 return; | |
2343 } | |
2344 | |
2345 delta = -save_args_size; | |
2346 cur_trace->end_true_args_size = 0; | |
2347 | |
2348 save_cfa = cur_row->cfa; | |
2349 if (cur_row->cfa.reg == dw_stack_pointer_regnum) | |
2350 { | |
2351 /* Convert a change in args_size (always a positive in the | |
2352 direction of stack growth) to a change in stack pointer. */ | |
2353 if (!STACK_GROWS_DOWNWARD) | |
2354 delta = -delta; | |
2355 | |
2356 cur_row->cfa.offset += delta; | |
2357 } | |
2358 | |
2359 maybe_record_trace_start (start, origin); | |
2360 | |
2361 cur_trace->end_true_args_size = save_args_size; | |
2362 cur_row->cfa = save_cfa; | |
2363 } | |
2364 | |
2365 /* Propagate CUR_TRACE state to the destinations implied by INSN. */ | |
2366 /* ??? Sadly, this is in large part a duplicate of make_edges. */ | |
2367 | |
2368 static void | |
2369 create_trace_edges (rtx_insn *insn) | |
2370 { | |
2371 rtx tmp; | |
2372 int i, n; | |
2373 | |
2374 if (JUMP_P (insn)) | |
2375 { | |
2376 rtx_jump_table_data *table; | |
2377 | |
2378 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX)) | |
2379 return; | |
2380 | |
2381 if (tablejump_p (insn, NULL, &table)) | |
2382 { | |
2383 rtvec vec = table->get_labels (); | |
2384 | |
2385 n = GET_NUM_ELEM (vec); | |
2386 for (i = 0; i < n; ++i) | |
2387 { | |
2388 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0)); | |
2389 maybe_record_trace_start (lab, insn); | |
2390 } | |
2391 } | |
2392 else if (computed_jump_p (insn)) | |
2393 { | |
2394 rtx_insn *temp; | |
2395 unsigned int i; | |
2396 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp) | |
2397 maybe_record_trace_start (temp, insn); | |
2398 } | |
2399 else if (returnjump_p (insn)) | |
2400 ; | |
2401 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL) | |
2402 { | |
2403 n = ASM_OPERANDS_LABEL_LENGTH (tmp); | |
2404 for (i = 0; i < n; ++i) | |
2405 { | |
2406 rtx_insn *lab = | |
2407 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0)); | |
2408 maybe_record_trace_start (lab, insn); | |
2409 } | |
2410 } | |
2411 else | |
2412 { | |
2413 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn); | |
2414 gcc_assert (lab != NULL); | |
2415 maybe_record_trace_start (lab, insn); | |
2416 } | |
2417 } | |
2418 else if (CALL_P (insn)) | |
2419 { | |
2420 /* Sibling calls don't have edges inside this function. */ | |
2421 if (SIBLING_CALL_P (insn)) | |
2422 return; | |
2423 | |
2424 /* Process non-local goto edges. */ | |
2425 if (can_nonlocal_goto (insn)) | |
2426 for (rtx_insn_list *lab = nonlocal_goto_handler_labels; | |
2427 lab; | |
2428 lab = lab->next ()) | |
2429 maybe_record_trace_start_abnormal (lab->insn (), insn); | |
2430 } | |
2431 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn))) | |
2432 { | |
2433 int i, n = seq->len (); | |
2434 for (i = 0; i < n; ++i) | |
2435 create_trace_edges (seq->insn (i)); | |
2436 return; | |
2437 } | |
2438 | |
2439 /* Process EH edges. */ | |
2440 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions) | |
2441 { | |
2442 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn); | |
2443 if (lp) | |
2444 maybe_record_trace_start_abnormal (lp->landing_pad, insn); | |
2445 } | |
2446 } | |
2447 | |
2448 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */ | |
2449 | |
2450 static void | |
2451 scan_insn_after (rtx_insn *insn) | |
2452 { | |
2453 if (RTX_FRAME_RELATED_P (insn)) | |
2454 dwarf2out_frame_debug (insn); | |
2455 notice_args_size (insn); | |
2456 } | |
2457 | |
2458 /* Scan the trace beginning at INSN and create the CFI notes for the | |
2459 instructions therein. */ | |
2460 | |
2461 static void | |
2462 scan_trace (dw_trace_info *trace) | |
2463 { | |
2464 rtx_insn *prev, *insn = trace->head; | |
2465 dw_cfa_location this_cfa; | |
2466 | |
2467 if (dump_file) | |
2468 fprintf (dump_file, "Processing trace %u : start at %s %d\n", | |
2469 trace->id, rtx_name[(int) GET_CODE (insn)], | |
2470 INSN_UID (insn)); | |
2471 | |
2472 trace->end_row = copy_cfi_row (trace->beg_row); | |
2473 trace->end_true_args_size = trace->beg_true_args_size; | |
2474 | |
2475 cur_trace = trace; | |
2476 cur_row = trace->end_row; | |
2477 | |
2478 this_cfa = cur_row->cfa; | |
2479 cur_cfa = &this_cfa; | |
2480 | |
2481 for (prev = insn, insn = NEXT_INSN (insn); | |
2482 insn; | |
2483 prev = insn, insn = NEXT_INSN (insn)) | |
2484 { | |
2485 rtx_insn *control; | |
2486 | |
2487 /* Do everything that happens "before" the insn. */ | |
2488 add_cfi_insn = prev; | |
2489 | |
2490 /* Notice the end of a trace. */ | |
2491 if (BARRIER_P (insn)) | |
2492 { | |
2493 /* Don't bother saving the unneeded queued registers at all. */ | |
2494 queued_reg_saves.truncate (0); | |
2495 break; | |
2496 } | |
2497 if (save_point_p (insn)) | |
2498 { | |
2499 /* Propagate across fallthru edges. */ | |
2500 dwarf2out_flush_queued_reg_saves (); | |
2501 maybe_record_trace_start (insn, NULL); | |
2502 break; | |
2503 } | |
2504 | |
2505 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn)) | |
2506 continue; | |
2507 | |
2508 /* Handle all changes to the row state. Sequences require special | |
2509 handling for the positioning of the notes. */ | |
2510 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn))) | |
2511 { | |
2512 rtx_insn *elt; | |
2513 int i, n = pat->len (); | |
2514 | |
2515 control = pat->insn (0); | |
2516 if (can_throw_internal (control)) | |
2517 notice_eh_throw (control); | |
2518 dwarf2out_flush_queued_reg_saves (); | |
2519 | |
2520 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control)) | |
2521 { | |
2522 /* ??? Hopefully multiple delay slots are not annulled. */ | |
2523 gcc_assert (n == 2); | |
2524 gcc_assert (!RTX_FRAME_RELATED_P (control)); | |
2525 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL)); | |
2526 | |
2527 elt = pat->insn (1); | |
2528 | |
2529 if (INSN_FROM_TARGET_P (elt)) | |
2530 { | |
2531 HOST_WIDE_INT restore_args_size; | |
2532 cfi_vec save_row_reg_save; | |
2533 | |
2534 /* If ELT is an instruction from target of an annulled | |
2535 branch, the effects are for the target only and so | |
2536 the args_size and CFA along the current path | |
2537 shouldn't change. */ | |
2538 add_cfi_insn = NULL; | |
2539 restore_args_size = cur_trace->end_true_args_size; | |
2540 cur_cfa = &cur_row->cfa; | |
2541 save_row_reg_save = vec_safe_copy (cur_row->reg_save); | |
2542 | |
2543 scan_insn_after (elt); | |
2544 | |
2545 /* ??? Should we instead save the entire row state? */ | |
2546 gcc_assert (!queued_reg_saves.length ()); | |
2547 | |
2548 create_trace_edges (control); | |
2549 | |
2550 cur_trace->end_true_args_size = restore_args_size; | |
2551 cur_row->cfa = this_cfa; | |
2552 cur_row->reg_save = save_row_reg_save; | |
2553 cur_cfa = &this_cfa; | |
2554 } | |
2555 else | |
2556 { | |
2557 /* If ELT is a annulled branch-taken instruction (i.e. | |
2558 executed only when branch is not taken), the args_size | |
2559 and CFA should not change through the jump. */ | |
2560 create_trace_edges (control); | |
2561 | |
2562 /* Update and continue with the trace. */ | |
2563 add_cfi_insn = insn; | |
2564 scan_insn_after (elt); | |
2565 def_cfa_1 (&this_cfa); | |
2566 } | |
2567 continue; | |
2568 } | |
2569 | |
2570 /* The insns in the delay slot should all be considered to happen | |
2571 "before" a call insn. Consider a call with a stack pointer | |
2572 adjustment in the delay slot. The backtrace from the callee | |
2573 should include the sp adjustment. Unfortunately, that leaves | |
2574 us with an unavoidable unwinding error exactly at the call insn | |
2575 itself. For jump insns we'd prefer to avoid this error by | |
2576 placing the notes after the sequence. */ | |
2577 if (JUMP_P (control)) | |
2578 add_cfi_insn = insn; | |
2579 | |
2580 for (i = 1; i < n; ++i) | |
2581 { | |
2582 elt = pat->insn (i); | |
2583 scan_insn_after (elt); | |
2584 } | |
2585 | |
2586 /* Make sure any register saves are visible at the jump target. */ | |
2587 dwarf2out_flush_queued_reg_saves (); | |
2588 any_cfis_emitted = false; | |
2589 | |
2590 /* However, if there is some adjustment on the call itself, e.g. | |
2591 a call_pop, that action should be considered to happen after | |
2592 the call returns. */ | |
2593 add_cfi_insn = insn; | |
2594 scan_insn_after (control); | |
2595 } | |
2596 else | |
2597 { | |
2598 /* Flush data before calls and jumps, and of course if necessary. */ | |
2599 if (can_throw_internal (insn)) | |
2600 { | |
2601 notice_eh_throw (insn); | |
2602 dwarf2out_flush_queued_reg_saves (); | |
2603 } | |
2604 else if (!NONJUMP_INSN_P (insn) | |
2605 || clobbers_queued_reg_save (insn) | |
2606 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL)) | |
2607 dwarf2out_flush_queued_reg_saves (); | |
2608 any_cfis_emitted = false; | |
2609 | |
2610 add_cfi_insn = insn; | |
2611 scan_insn_after (insn); | |
2612 control = insn; | |
2613 } | |
2614 | |
2615 /* Between frame-related-p and args_size we might have otherwise | |
2616 emitted two cfa adjustments. Do it now. */ | |
2617 def_cfa_1 (&this_cfa); | |
2618 | |
2619 /* Minimize the number of advances by emitting the entire queue | |
2620 once anything is emitted. */ | |
2621 if (any_cfis_emitted | |
2622 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL)) | |
2623 dwarf2out_flush_queued_reg_saves (); | |
2624 | |
2625 /* Note that a test for control_flow_insn_p does exactly the | |
2626 same tests as are done to actually create the edges. So | |
2627 always call the routine and let it not create edges for | |
2628 non-control-flow insns. */ | |
2629 create_trace_edges (control); | |
2630 } | |
2631 | |
2632 add_cfi_insn = NULL; | |
2633 cur_row = NULL; | |
2634 cur_trace = NULL; | |
2635 cur_cfa = NULL; | |
2636 } | |
2637 | |
2638 /* Scan the function and create the initial set of CFI notes. */ | |
2639 | |
2640 static void | |
2641 create_cfi_notes (void) | |
2642 { | |
2643 dw_trace_info *ti; | |
2644 | |
2645 gcc_checking_assert (!queued_reg_saves.exists ()); | |
2646 gcc_checking_assert (!trace_work_list.exists ()); | |
2647 | |
2648 /* Always begin at the entry trace. */ | |
2649 ti = &trace_info[0]; | |
2650 scan_trace (ti); | |
2651 | |
2652 while (!trace_work_list.is_empty ()) | |
2653 { | |
2654 ti = trace_work_list.pop (); | |
2655 scan_trace (ti); | |
2656 } | |
2657 | |
2658 queued_reg_saves.release (); | |
2659 trace_work_list.release (); | |
2660 } | |
2661 | |
2662 /* Return the insn before the first NOTE_INSN_CFI after START. */ | |
2663 | |
2664 static rtx_insn * | |
2665 before_next_cfi_note (rtx_insn *start) | |
2666 { | |
2667 rtx_insn *prev = start; | |
2668 while (start) | |
2669 { | |
2670 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI) | |
2671 return prev; | |
2672 prev = start; | |
2673 start = NEXT_INSN (start); | |
2674 } | |
2675 gcc_unreachable (); | |
2676 } | |
2677 | |
2678 /* Insert CFI notes between traces to properly change state between them. */ | |
2679 | |
2680 static void | |
2681 connect_traces (void) | |
2682 { | |
2683 unsigned i, n = trace_info.length (); | |
2684 dw_trace_info *prev_ti, *ti; | |
2685 | |
2686 /* ??? Ideally, we should have both queued and processed every trace. | |
2687 However the current representation of constant pools on various targets | |
2688 is indistinguishable from unreachable code. Assume for the moment that | |
2689 we can simply skip over such traces. */ | |
2690 /* ??? Consider creating a DATA_INSN rtx code to indicate that | |
2691 these are not "real" instructions, and should not be considered. | |
2692 This could be generically useful for tablejump data as well. */ | |
2693 /* Remove all unprocessed traces from the list. */ | |
2694 for (i = n - 1; i > 0; --i) | |
2695 { | |
2696 ti = &trace_info[i]; | |
2697 if (ti->beg_row == NULL) | |
2698 { | |
2699 trace_info.ordered_remove (i); | |
2700 n -= 1; | |
2701 } | |
2702 else | |
2703 gcc_assert (ti->end_row != NULL); | |
2704 } | |
2705 | |
2706 /* Work from the end back to the beginning. This lets us easily insert | |
2707 remember/restore_state notes in the correct order wrt other notes. */ | |
2708 prev_ti = &trace_info[n - 1]; | |
2709 for (i = n - 1; i > 0; --i) | |
2710 { | |
2711 dw_cfi_row *old_row; | |
2712 | |
2713 ti = prev_ti; | |
2714 prev_ti = &trace_info[i - 1]; | |
2715 | |
2716 add_cfi_insn = ti->head; | |
2717 | |
2718 /* In dwarf2out_switch_text_section, we'll begin a new FDE | |
2719 for the portion of the function in the alternate text | |
2720 section. The row state at the very beginning of that | |
2721 new FDE will be exactly the row state from the CIE. */ | |
2722 if (ti->switch_sections) | |
2723 old_row = cie_cfi_row; | |
2724 else | |
2725 { | |
2726 old_row = prev_ti->end_row; | |
2727 /* If there's no change from the previous end state, fine. */ | |
2728 if (cfi_row_equal_p (old_row, ti->beg_row)) | |
2729 ; | |
2730 /* Otherwise check for the common case of sharing state with | |
2731 the beginning of an epilogue, but not the end. Insert | |
2732 remember/restore opcodes in that case. */ | |
2733 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row)) | |
2734 { | |
2735 dw_cfi_ref cfi; | |
2736 | |
2737 /* Note that if we blindly insert the remember at the | |
2738 start of the trace, we can wind up increasing the | |
2739 size of the unwind info due to extra advance opcodes. | |
2740 Instead, put the remember immediately before the next | |
2741 state change. We know there must be one, because the | |
2742 state at the beginning and head of the trace differ. */ | |
2743 add_cfi_insn = before_next_cfi_note (prev_ti->head); | |
2744 cfi = new_cfi (); | |
2745 cfi->dw_cfi_opc = DW_CFA_remember_state; | |
2746 add_cfi (cfi); | |
2747 | |
2748 add_cfi_insn = ti->head; | |
2749 cfi = new_cfi (); | |
2750 cfi->dw_cfi_opc = DW_CFA_restore_state; | |
2751 add_cfi (cfi); | |
2752 | |
2753 old_row = prev_ti->beg_row; | |
2754 } | |
2755 /* Otherwise, we'll simply change state from the previous end. */ | |
2756 } | |
2757 | |
2758 change_cfi_row (old_row, ti->beg_row); | |
2759 | |
2760 if (dump_file && add_cfi_insn != ti->head) | |
2761 { | |
2762 rtx_insn *note; | |
2763 | |
2764 fprintf (dump_file, "Fixup between trace %u and %u:\n", | |
2765 prev_ti->id, ti->id); | |
2766 | |
2767 note = ti->head; | |
2768 do | |
2769 { | |
2770 note = NEXT_INSN (note); | |
2771 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI); | |
2772 output_cfi_directive (dump_file, NOTE_CFI (note)); | |
2773 } | |
2774 while (note != add_cfi_insn); | |
2775 } | |
2776 } | |
2777 | |
2778 /* Connect args_size between traces that have can_throw_internal insns. */ | |
2779 if (cfun->eh->lp_array) | |
2780 { | |
2781 HOST_WIDE_INT prev_args_size = 0; | |
2782 | |
2783 for (i = 0; i < n; ++i) | |
2784 { | |
2785 ti = &trace_info[i]; | |
2786 | |
2787 if (ti->switch_sections) | |
2788 prev_args_size = 0; | |
2789 if (ti->eh_head == NULL) | |
2790 continue; | |
2791 gcc_assert (!ti->args_size_undefined); | |
2792 | |
2793 if (ti->beg_delay_args_size != prev_args_size) | |
2794 { | |
2795 /* ??? Search back to previous CFI note. */ | |
2796 add_cfi_insn = PREV_INSN (ti->eh_head); | |
2797 add_cfi_args_size (ti->beg_delay_args_size); | |
2798 } | |
2799 | |
2800 prev_args_size = ti->end_delay_args_size; | |
2801 } | |
2802 } | |
2803 } | |
2804 | |
2805 /* Set up the pseudo-cfg of instruction traces, as described at the | |
2806 block comment at the top of the file. */ | |
2807 | |
2808 static void | |
2809 create_pseudo_cfg (void) | |
2810 { | |
2811 bool saw_barrier, switch_sections; | |
2812 dw_trace_info ti; | |
2813 rtx_insn *insn; | |
2814 unsigned i; | |
2815 | |
2816 /* The first trace begins at the start of the function, | |
2817 and begins with the CIE row state. */ | |
2818 trace_info.create (16); | |
2819 memset (&ti, 0, sizeof (ti)); | |
2820 ti.head = get_insns (); | |
2821 ti.beg_row = cie_cfi_row; | |
2822 ti.cfa_store = cie_cfi_row->cfa; | |
2823 ti.cfa_temp.reg = INVALID_REGNUM; | |
2824 trace_info.quick_push (ti); | |
2825 | |
2826 if (cie_return_save) | |
2827 ti.regs_saved_in_regs.safe_push (*cie_return_save); | |
2828 | |
2829 /* Walk all the insns, collecting start of trace locations. */ | |
2830 saw_barrier = false; | |
2831 switch_sections = false; | |
2832 for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) | |
2833 { | |
2834 if (BARRIER_P (insn)) | |
2835 saw_barrier = true; | |
2836 else if (NOTE_P (insn) | |
2837 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS) | |
2838 { | |
2839 /* We should have just seen a barrier. */ | |
2840 gcc_assert (saw_barrier); | |
2841 switch_sections = true; | |
2842 } | |
2843 /* Watch out for save_point notes between basic blocks. | |
2844 In particular, a note after a barrier. Do not record these, | |
2845 delaying trace creation until the label. */ | |
2846 else if (save_point_p (insn) | |
2847 && (LABEL_P (insn) || !saw_barrier)) | |
2848 { | |
2849 memset (&ti, 0, sizeof (ti)); | |
2850 ti.head = insn; | |
2851 ti.switch_sections = switch_sections; | |
2852 ti.id = trace_info.length (); | |
2853 trace_info.safe_push (ti); | |
2854 | |
2855 saw_barrier = false; | |
2856 switch_sections = false; | |
2857 } | |
2858 } | |
2859 | |
2860 /* Create the trace index after we've finished building trace_info, | |
2861 avoiding stale pointer problems due to reallocation. */ | |
2862 trace_index | |
2863 = new hash_table<trace_info_hasher> (trace_info.length ()); | |
2864 dw_trace_info *tp; | |
2865 FOR_EACH_VEC_ELT (trace_info, i, tp) | |
2866 { | |
2867 dw_trace_info **slot; | |
2868 | |
2869 if (dump_file) | |
2870 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id, | |
2871 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head), | |
2872 tp->switch_sections ? " (section switch)" : ""); | |
2873 | |
2874 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT); | |
2875 gcc_assert (*slot == NULL); | |
2876 *slot = tp; | |
2877 } | |
2878 } | |
2879 | |
2880 /* Record the initial position of the return address. RTL is | |
2881 INCOMING_RETURN_ADDR_RTX. */ | |
2882 | |
2883 static void | |
2884 initial_return_save (rtx rtl) | |
2885 { | |
2886 unsigned int reg = INVALID_REGNUM; | |
2887 HOST_WIDE_INT offset = 0; | |
2888 | |
2889 switch (GET_CODE (rtl)) | |
2890 { | |
2891 case REG: | |
2892 /* RA is in a register. */ | |
2893 reg = dwf_regno (rtl); | |
2894 break; | |
2895 | |
2896 case MEM: | |
2897 /* RA is on the stack. */ | |
2898 rtl = XEXP (rtl, 0); | |
2899 switch (GET_CODE (rtl)) | |
2900 { | |
2901 case REG: | |
2902 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM); | |
2903 offset = 0; | |
2904 break; | |
2905 | |
2906 case PLUS: | |
2907 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM); | |
2908 offset = INTVAL (XEXP (rtl, 1)); | |
2909 break; | |
2910 | |
2911 case MINUS: | |
2912 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM); | |
2913 offset = -INTVAL (XEXP (rtl, 1)); | |
2914 break; | |
2915 | |
2916 default: | |
2917 gcc_unreachable (); | |
2918 } | |
2919 | |
2920 break; | |
2921 | |
2922 case PLUS: | |
2923 /* The return address is at some offset from any value we can | |
2924 actually load. For instance, on the SPARC it is in %i7+8. Just | |
2925 ignore the offset for now; it doesn't matter for unwinding frames. */ | |
2926 gcc_assert (CONST_INT_P (XEXP (rtl, 1))); | |
2927 initial_return_save (XEXP (rtl, 0)); | |
2928 return; | |
2929 | |
2930 default: | |
2931 gcc_unreachable (); | |
2932 } | |
2933 | |
2934 if (reg != DWARF_FRAME_RETURN_COLUMN) | |
2935 { | |
2936 if (reg != INVALID_REGNUM) | |
2937 record_reg_saved_in_reg (rtl, pc_rtx); | |
2938 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset); | |
2939 } | |
2940 } | |
2941 | |
2942 static void | |
2943 create_cie_data (void) | |
2944 { | |
2945 dw_cfa_location loc; | |
2946 dw_trace_info cie_trace; | |
2947 | |
2948 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM); | |
2949 | |
2950 memset (&cie_trace, 0, sizeof (cie_trace)); | |
2951 cur_trace = &cie_trace; | |
2952 | |
2953 add_cfi_vec = &cie_cfi_vec; | |
2954 cie_cfi_row = cur_row = new_cfi_row (); | |
2955 | |
2956 /* On entry, the Canonical Frame Address is at SP. */ | |
2957 memset (&loc, 0, sizeof (loc)); | |
2958 loc.reg = dw_stack_pointer_regnum; | |
2959 loc.offset = INCOMING_FRAME_SP_OFFSET; | |
2960 def_cfa_1 (&loc); | |
2961 | |
2962 if (targetm.debug_unwind_info () == UI_DWARF2 | |
2963 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2) | |
2964 { | |
2965 initial_return_save (INCOMING_RETURN_ADDR_RTX); | |
2966 | |
2967 /* For a few targets, we have the return address incoming into a | |
2968 register, but choose a different return column. This will result | |
2969 in a DW_CFA_register for the return, and an entry in | |
2970 regs_saved_in_regs to match. If the target later stores that | |
2971 return address register to the stack, we want to be able to emit | |
2972 the DW_CFA_offset against the return column, not the intermediate | |
2973 save register. Save the contents of regs_saved_in_regs so that | |
2974 we can re-initialize it at the start of each function. */ | |
2975 switch (cie_trace.regs_saved_in_regs.length ()) | |
2976 { | |
2977 case 0: | |
2978 break; | |
2979 case 1: | |
2980 cie_return_save = ggc_alloc<reg_saved_in_data> (); | |
2981 *cie_return_save = cie_trace.regs_saved_in_regs[0]; | |
2982 cie_trace.regs_saved_in_regs.release (); | |
2983 break; | |
2984 default: | |
2985 gcc_unreachable (); | |
2986 } | |
2987 } | |
2988 | |
2989 add_cfi_vec = NULL; | |
2990 cur_row = NULL; | |
2991 cur_trace = NULL; | |
2992 } | |
2993 | |
2994 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI | |
2995 state at each location within the function. These notes will be | |
2996 emitted during pass_final. */ | |
2997 | |
2998 static unsigned int | |
2999 execute_dwarf2_frame (void) | |
3000 { | |
3001 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */ | |
3002 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM); | |
3003 | |
3004 /* The first time we're called, compute the incoming frame state. */ | |
3005 if (cie_cfi_vec == NULL) | |
3006 create_cie_data (); | |
3007 | |
3008 dwarf2out_alloc_current_fde (); | |
3009 | |
3010 create_pseudo_cfg (); | |
3011 | |
3012 /* Do the work. */ | |
3013 create_cfi_notes (); | |
3014 connect_traces (); | |
3015 add_cfis_to_fde (); | |
3016 | |
3017 /* Free all the data we allocated. */ | |
3018 { | |
3019 size_t i; | |
3020 dw_trace_info *ti; | |
3021 | |
3022 FOR_EACH_VEC_ELT (trace_info, i, ti) | |
3023 ti->regs_saved_in_regs.release (); | |
3024 } | |
3025 trace_info.release (); | |
3026 | |
3027 delete trace_index; | |
3028 trace_index = NULL; | |
3029 | |
3030 return 0; | |
3031 } | |
3032 | |
3033 /* Convert a DWARF call frame info. operation to its string name */ | |
3034 | |
3035 static const char * | |
3036 dwarf_cfi_name (unsigned int cfi_opc) | |
3037 { | |
3038 const char *name = get_DW_CFA_name (cfi_opc); | |
3039 | |
3040 if (name != NULL) | |
3041 return name; | |
3042 | |
3043 return "DW_CFA_<unknown>"; | |
3044 } | |
3045 | |
3046 /* This routine will generate the correct assembly data for a location | |
3047 description based on a cfi entry with a complex address. */ | |
3048 | |
3049 static void | |
3050 output_cfa_loc (dw_cfi_ref cfi, int for_eh) | |
3051 { | |
3052 dw_loc_descr_ref loc; | |
3053 unsigned long size; | |
3054 | |
3055 if (cfi->dw_cfi_opc == DW_CFA_expression | |
3056 || cfi->dw_cfi_opc == DW_CFA_val_expression) | |
3057 { | |
3058 unsigned r = | |
3059 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3060 dw2_asm_output_data (1, r, NULL); | |
3061 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc; | |
3062 } | |
3063 else | |
3064 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc; | |
3065 | |
3066 /* Output the size of the block. */ | |
3067 size = size_of_locs (loc); | |
3068 dw2_asm_output_data_uleb128 (size, NULL); | |
3069 | |
3070 /* Now output the operations themselves. */ | |
3071 output_loc_sequence (loc, for_eh); | |
3072 } | |
3073 | |
3074 /* Similar, but used for .cfi_escape. */ | |
3075 | |
3076 static void | |
3077 output_cfa_loc_raw (dw_cfi_ref cfi) | |
3078 { | |
3079 dw_loc_descr_ref loc; | |
3080 unsigned long size; | |
3081 | |
3082 if (cfi->dw_cfi_opc == DW_CFA_expression | |
3083 || cfi->dw_cfi_opc == DW_CFA_val_expression) | |
3084 { | |
3085 unsigned r = | |
3086 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3087 fprintf (asm_out_file, "%#x,", r); | |
3088 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc; | |
3089 } | |
3090 else | |
3091 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc; | |
3092 | |
3093 /* Output the size of the block. */ | |
3094 size = size_of_locs (loc); | |
3095 dw2_asm_output_data_uleb128_raw (size); | |
3096 fputc (',', asm_out_file); | |
3097 | |
3098 /* Now output the operations themselves. */ | |
3099 output_loc_sequence_raw (loc); | |
3100 } | |
3101 | |
3102 /* Output a Call Frame Information opcode and its operand(s). */ | |
3103 | |
3104 void | |
3105 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh) | |
3106 { | |
3107 unsigned long r; | |
3108 HOST_WIDE_INT off; | |
3109 | |
3110 if (cfi->dw_cfi_opc == DW_CFA_advance_loc) | |
3111 dw2_asm_output_data (1, (cfi->dw_cfi_opc | |
3112 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)), | |
3113 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX, | |
3114 ((unsigned HOST_WIDE_INT) | |
3115 cfi->dw_cfi_oprnd1.dw_cfi_offset)); | |
3116 else if (cfi->dw_cfi_opc == DW_CFA_offset) | |
3117 { | |
3118 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3119 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)), | |
3120 "DW_CFA_offset, column %#lx", r); | |
3121 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset); | |
3122 dw2_asm_output_data_uleb128 (off, NULL); | |
3123 } | |
3124 else if (cfi->dw_cfi_opc == DW_CFA_restore) | |
3125 { | |
3126 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3127 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)), | |
3128 "DW_CFA_restore, column %#lx", r); | |
3129 } | |
3130 else | |
3131 { | |
3132 dw2_asm_output_data (1, cfi->dw_cfi_opc, | |
3133 "%s", dwarf_cfi_name (cfi->dw_cfi_opc)); | |
3134 | |
3135 switch (cfi->dw_cfi_opc) | |
3136 { | |
3137 case DW_CFA_set_loc: | |
3138 if (for_eh) | |
3139 dw2_asm_output_encoded_addr_rtx ( | |
3140 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0), | |
3141 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr), | |
3142 false, NULL); | |
3143 else | |
3144 dw2_asm_output_addr (DWARF2_ADDR_SIZE, | |
3145 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL); | |
3146 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; | |
3147 break; | |
3148 | |
3149 case DW_CFA_advance_loc1: | |
3150 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr, | |
3151 fde->dw_fde_current_label, NULL); | |
3152 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; | |
3153 break; | |
3154 | |
3155 case DW_CFA_advance_loc2: | |
3156 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr, | |
3157 fde->dw_fde_current_label, NULL); | |
3158 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; | |
3159 break; | |
3160 | |
3161 case DW_CFA_advance_loc4: | |
3162 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr, | |
3163 fde->dw_fde_current_label, NULL); | |
3164 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; | |
3165 break; | |
3166 | |
3167 case DW_CFA_MIPS_advance_loc8: | |
3168 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr, | |
3169 fde->dw_fde_current_label, NULL); | |
3170 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr; | |
3171 break; | |
3172 | |
3173 case DW_CFA_offset_extended: | |
3174 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3175 dw2_asm_output_data_uleb128 (r, NULL); | |
3176 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset); | |
3177 dw2_asm_output_data_uleb128 (off, NULL); | |
3178 break; | |
3179 | |
3180 case DW_CFA_def_cfa: | |
3181 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3182 dw2_asm_output_data_uleb128 (r, NULL); | |
3183 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL); | |
3184 break; | |
3185 | |
3186 case DW_CFA_offset_extended_sf: | |
3187 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3188 dw2_asm_output_data_uleb128 (r, NULL); | |
3189 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset); | |
3190 dw2_asm_output_data_sleb128 (off, NULL); | |
3191 break; | |
3192 | |
3193 case DW_CFA_def_cfa_sf: | |
3194 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3195 dw2_asm_output_data_uleb128 (r, NULL); | |
3196 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset); | |
3197 dw2_asm_output_data_sleb128 (off, NULL); | |
3198 break; | |
3199 | |
3200 case DW_CFA_restore_extended: | |
3201 case DW_CFA_undefined: | |
3202 case DW_CFA_same_value: | |
3203 case DW_CFA_def_cfa_register: | |
3204 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3205 dw2_asm_output_data_uleb128 (r, NULL); | |
3206 break; | |
3207 | |
3208 case DW_CFA_register: | |
3209 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh); | |
3210 dw2_asm_output_data_uleb128 (r, NULL); | |
3211 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh); | |
3212 dw2_asm_output_data_uleb128 (r, NULL); | |
3213 break; | |
3214 | |
3215 case DW_CFA_def_cfa_offset: | |
3216 case DW_CFA_GNU_args_size: | |
3217 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL); | |
3218 break; | |
3219 | |
3220 case DW_CFA_def_cfa_offset_sf: | |
3221 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset); | |
3222 dw2_asm_output_data_sleb128 (off, NULL); | |
3223 break; | |
3224 | |
3225 case DW_CFA_GNU_window_save: | |
3226 break; | |
3227 | |
3228 case DW_CFA_def_cfa_expression: | |
3229 case DW_CFA_expression: | |
3230 case DW_CFA_val_expression: | |
3231 output_cfa_loc (cfi, for_eh); | |
3232 break; | |
3233 | |
3234 case DW_CFA_GNU_negative_offset_extended: | |
3235 /* Obsoleted by DW_CFA_offset_extended_sf. */ | |
3236 gcc_unreachable (); | |
3237 | |
3238 default: | |
3239 break; | |
3240 } | |
3241 } | |
3242 } | |
3243 | |
3244 /* Similar, but do it via assembler directives instead. */ | |
3245 | |
3246 void | |
3247 output_cfi_directive (FILE *f, dw_cfi_ref cfi) | |
3248 { | |
3249 unsigned long r, r2; | |
3250 | |
3251 switch (cfi->dw_cfi_opc) | |
3252 { | |
3253 case DW_CFA_advance_loc: | |
3254 case DW_CFA_advance_loc1: | |
3255 case DW_CFA_advance_loc2: | |
3256 case DW_CFA_advance_loc4: | |
3257 case DW_CFA_MIPS_advance_loc8: | |
3258 case DW_CFA_set_loc: | |
3259 /* Should only be created in a code path not followed when emitting | |
3260 via directives. The assembler is going to take care of this for | |
3261 us. But this routines is also used for debugging dumps, so | |
3262 print something. */ | |
3263 gcc_assert (f != asm_out_file); | |
3264 fprintf (f, "\t.cfi_advance_loc\n"); | |
3265 break; | |
3266 | |
3267 case DW_CFA_offset: | |
3268 case DW_CFA_offset_extended: | |
3269 case DW_CFA_offset_extended_sf: | |
3270 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3271 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n", | |
3272 r, cfi->dw_cfi_oprnd2.dw_cfi_offset); | |
3273 break; | |
3274 | |
3275 case DW_CFA_restore: | |
3276 case DW_CFA_restore_extended: | |
3277 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3278 fprintf (f, "\t.cfi_restore %lu\n", r); | |
3279 break; | |
3280 | |
3281 case DW_CFA_undefined: | |
3282 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3283 fprintf (f, "\t.cfi_undefined %lu\n", r); | |
3284 break; | |
3285 | |
3286 case DW_CFA_same_value: | |
3287 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3288 fprintf (f, "\t.cfi_same_value %lu\n", r); | |
3289 break; | |
3290 | |
3291 case DW_CFA_def_cfa: | |
3292 case DW_CFA_def_cfa_sf: | |
3293 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3294 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n", | |
3295 r, cfi->dw_cfi_oprnd2.dw_cfi_offset); | |
3296 break; | |
3297 | |
3298 case DW_CFA_def_cfa_register: | |
3299 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3300 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r); | |
3301 break; | |
3302 | |
3303 case DW_CFA_register: | |
3304 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1); | |
3305 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1); | |
3306 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2); | |
3307 break; | |
3308 | |
3309 case DW_CFA_def_cfa_offset: | |
3310 case DW_CFA_def_cfa_offset_sf: | |
3311 fprintf (f, "\t.cfi_def_cfa_offset " | |
3312 HOST_WIDE_INT_PRINT_DEC"\n", | |
3313 cfi->dw_cfi_oprnd1.dw_cfi_offset); | |
3314 break; | |
3315 | |
3316 case DW_CFA_remember_state: | |
3317 fprintf (f, "\t.cfi_remember_state\n"); | |
3318 break; | |
3319 case DW_CFA_restore_state: | |
3320 fprintf (f, "\t.cfi_restore_state\n"); | |
3321 break; | |
3322 | |
3323 case DW_CFA_GNU_args_size: | |
3324 if (f == asm_out_file) | |
3325 { | |
3326 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size); | |
3327 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset); | |
3328 if (flag_debug_asm) | |
3329 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC, | |
3330 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset); | |
3331 fputc ('\n', f); | |
3332 } | |
3333 else | |
3334 { | |
3335 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n", | |
3336 cfi->dw_cfi_oprnd1.dw_cfi_offset); | |
3337 } | |
3338 break; | |
3339 | |
3340 case DW_CFA_GNU_window_save: | |
3341 fprintf (f, "\t.cfi_window_save\n"); | |
3342 break; | |
3343 | |
3344 case DW_CFA_def_cfa_expression: | |
3345 case DW_CFA_expression: | |
3346 case DW_CFA_val_expression: | |
3347 if (f != asm_out_file) | |
3348 { | |
3349 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n", | |
3350 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "", | |
3351 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : ""); | |
3352 break; | |
3353 } | |
3354 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc); | |
3355 output_cfa_loc_raw (cfi); | |
3356 fputc ('\n', f); | |
3357 break; | |
3358 | |
3359 default: | |
3360 gcc_unreachable (); | |
3361 } | |
3362 } | |
3363 | |
3364 void | |
3365 dwarf2out_emit_cfi (dw_cfi_ref cfi) | |
3366 { | |
3367 if (dwarf2out_do_cfi_asm ()) | |
3368 output_cfi_directive (asm_out_file, cfi); | |
3369 } | |
3370 | |
3371 static void | |
3372 dump_cfi_row (FILE *f, dw_cfi_row *row) | |
3373 { | |
3374 dw_cfi_ref cfi; | |
3375 unsigned i; | |
3376 | |
3377 cfi = row->cfa_cfi; | |
3378 if (!cfi) | |
3379 { | |
3380 dw_cfa_location dummy; | |
3381 memset (&dummy, 0, sizeof (dummy)); | |
3382 dummy.reg = INVALID_REGNUM; | |
3383 cfi = def_cfa_0 (&dummy, &row->cfa); | |
3384 } | |
3385 output_cfi_directive (f, cfi); | |
3386 | |
3387 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi) | |
3388 if (cfi) | |
3389 output_cfi_directive (f, cfi); | |
3390 } | |
3391 | |
3392 void debug_cfi_row (dw_cfi_row *row); | |
3393 | |
3394 void | |
3395 debug_cfi_row (dw_cfi_row *row) | |
3396 { | |
3397 dump_cfi_row (stderr, row); | |
3398 } | |
3399 | |
3400 | |
3401 /* Save the result of dwarf2out_do_frame across PCH. | |
3402 This variable is tri-state, with 0 unset, >0 true, <0 false. */ | |
3403 static GTY(()) signed char saved_do_cfi_asm = 0; | |
3404 | |
3405 /* Decide whether we want to emit frame unwind information for the current | |
3406 translation unit. */ | |
3407 | |
3408 bool | |
3409 dwarf2out_do_frame (void) | |
3410 { | |
3411 /* We want to emit correct CFA location expressions or lists, so we | |
3412 have to return true if we're going to output debug info, even if | |
3413 we're not going to output frame or unwind info. */ | |
3414 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG) | |
3415 return true; | |
3416 | |
3417 if (saved_do_cfi_asm > 0) | |
3418 return true; | |
3419 | |
3420 if (targetm.debug_unwind_info () == UI_DWARF2) | |
3421 return true; | |
3422 | |
3423 if ((flag_unwind_tables || flag_exceptions) | |
3424 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2) | |
3425 return true; | |
3426 | |
3427 return false; | |
3428 } | |
3429 | |
3430 /* Decide whether to emit frame unwind via assembler directives. */ | |
3431 | |
3432 bool | |
3433 dwarf2out_do_cfi_asm (void) | |
3434 { | |
3435 int enc; | |
3436 | |
3437 if (saved_do_cfi_asm != 0) | |
3438 return saved_do_cfi_asm > 0; | |
3439 | |
3440 /* Assume failure for a moment. */ | |
3441 saved_do_cfi_asm = -1; | |
3442 | |
3443 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ()) | |
3444 return false; | |
3445 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE) | |
3446 return false; | |
3447 | |
3448 /* Make sure the personality encoding is one the assembler can support. | |
3449 In particular, aligned addresses can't be handled. */ | |
3450 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1); | |
3451 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel) | |
3452 return false; | |
3453 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0); | |
3454 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel) | |
3455 return false; | |
3456 | |
3457 /* If we can't get the assembler to emit only .debug_frame, and we don't need | |
3458 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */ | |
3459 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE | |
3460 && !flag_unwind_tables && !flag_exceptions | |
3461 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2) | |
3462 return false; | |
3463 | |
3464 /* Success! */ | |
3465 saved_do_cfi_asm = 1; | |
3466 return true; | |
3467 } | |
3468 | |
3469 namespace { | |
3470 | |
3471 const pass_data pass_data_dwarf2_frame = | |
3472 { | |
3473 RTL_PASS, /* type */ | |
3474 "dwarf2", /* name */ | |
3475 OPTGROUP_NONE, /* optinfo_flags */ | |
3476 TV_FINAL, /* tv_id */ | |
3477 0, /* properties_required */ | |
3478 0, /* properties_provided */ | |
3479 0, /* properties_destroyed */ | |
3480 0, /* todo_flags_start */ | |
3481 0, /* todo_flags_finish */ | |
3482 }; | |
3483 | |
3484 class pass_dwarf2_frame : public rtl_opt_pass | |
3485 { | |
3486 public: | |
3487 pass_dwarf2_frame (gcc::context *ctxt) | |
3488 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt) | |
3489 {} | |
3490 | |
3491 /* opt_pass methods: */ | |
3492 virtual bool gate (function *); | |
3493 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); } | |
3494 | |
3495 }; // class pass_dwarf2_frame | |
3496 | |
3497 bool | |
3498 pass_dwarf2_frame::gate (function *) | |
3499 { | |
3500 /* Targets which still implement the prologue in assembler text | |
3501 cannot use the generic dwarf2 unwinding. */ | |
3502 if (!targetm.have_prologue ()) | |
3503 return false; | |
3504 | |
3505 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit | |
3506 from the optimized shrink-wrapping annotations that we will compute. | |
3507 For now, only produce the CFI notes for dwarf2. */ | |
3508 return dwarf2out_do_frame (); | |
3509 } | |
3510 | |
3511 } // anon namespace | |
3512 | |
3513 rtl_opt_pass * | |
3514 make_pass_dwarf2_frame (gcc::context *ctxt) | |
3515 { | |
3516 return new pass_dwarf2_frame (ctxt); | |
3517 } | |
3518 | |
3519 #include "gt-dwarf2cfi.h" |