111
|
1 /* Dwarf2 Call Frame Information helper routines.
|
131
|
2 Copyright (C) 1992-2018 Free Software Foundation, Inc.
|
111
|
3
|
|
4 This file is part of GCC.
|
|
5
|
|
6 GCC is free software; you can redistribute it and/or modify it under
|
|
7 the terms of the GNU General Public License as published by the Free
|
|
8 Software Foundation; either version 3, or (at your option) any later
|
|
9 version.
|
|
10
|
|
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
14 for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with GCC; see the file COPYING3. If not see
|
|
18 <http://www.gnu.org/licenses/>. */
|
|
19
|
|
20 #include "config.h"
|
|
21 #include "system.h"
|
|
22 #include "coretypes.h"
|
|
23 #include "target.h"
|
|
24 #include "function.h"
|
|
25 #include "rtl.h"
|
|
26 #include "tree.h"
|
|
27 #include "tree-pass.h"
|
|
28 #include "memmodel.h"
|
|
29 #include "tm_p.h"
|
|
30 #include "emit-rtl.h"
|
|
31 #include "stor-layout.h"
|
|
32 #include "cfgbuild.h"
|
|
33 #include "dwarf2out.h"
|
|
34 #include "dwarf2asm.h"
|
|
35 #include "common/common-target.h"
|
|
36
|
|
37 #include "except.h" /* expand_builtin_dwarf_sp_column */
|
|
38 #include "profile-count.h" /* For expr.h */
|
|
39 #include "expr.h" /* init_return_column_size */
|
|
40 #include "output.h" /* asm_out_file */
|
|
41 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
|
|
42
|
|
43
|
|
44 /* ??? Poison these here until it can be done generically. They've been
|
|
45 totally replaced in this file; make sure it stays that way. */
|
|
46 #undef DWARF2_UNWIND_INFO
|
|
47 #undef DWARF2_FRAME_INFO
|
|
48 #if (GCC_VERSION >= 3000)
|
|
49 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
|
|
50 #endif
|
|
51
|
|
52 #ifndef INCOMING_RETURN_ADDR_RTX
|
|
53 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
|
|
54 #endif
|
131
|
55
|
|
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
|
|
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
|
|
58 #endif
|
111
|
59
|
|
60 /* A collected description of an entire row of the abstract CFI table. */
|
|
61 struct GTY(()) dw_cfi_row
|
|
62 {
|
|
63 /* The expression that computes the CFA, expressed in two different ways.
|
|
64 The CFA member for the simple cases, and the full CFI expression for
|
|
65 the complex cases. The later will be a DW_CFA_cfa_expression. */
|
|
66 dw_cfa_location cfa;
|
|
67 dw_cfi_ref cfa_cfi;
|
|
68
|
|
69 /* The expressions for any register column that is saved. */
|
|
70 cfi_vec reg_save;
|
|
71 };
|
|
72
|
|
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
|
|
74 struct GTY(()) reg_saved_in_data {
|
|
75 rtx orig_reg;
|
|
76 rtx saved_in_reg;
|
|
77 };
|
|
78
|
|
79
|
|
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
|
|
81 of one on the fly while processing the frame-related insns.
|
|
82
|
|
83 We create dw_trace_info structures for each extended basic block beginning
|
|
84 and ending at a "save point". Save points are labels, barriers, certain
|
|
85 notes, and of course the beginning and end of the function.
|
|
86
|
|
87 As we encounter control transfer insns, we propagate the "current"
|
|
88 row state across the edges to the starts of traces. When checking is
|
|
89 enabled, we validate that we propagate the same data from all sources.
|
|
90
|
|
91 All traces are members of the TRACE_INFO array, in the order in which
|
|
92 they appear in the instruction stream.
|
|
93
|
|
94 All save points are present in the TRACE_INDEX hash, mapping the insn
|
|
95 starting a trace to the dw_trace_info describing the trace. */
|
|
96
|
|
97 struct dw_trace_info
|
|
98 {
|
|
99 /* The insn that begins the trace. */
|
|
100 rtx_insn *head;
|
|
101
|
|
102 /* The row state at the beginning and end of the trace. */
|
|
103 dw_cfi_row *beg_row, *end_row;
|
|
104
|
|
105 /* Tracking for DW_CFA_GNU_args_size. The "true" sizes are those we find
|
|
106 while scanning insns. However, the args_size value is irrelevant at
|
|
107 any point except can_throw_internal_p insns. Therefore the "delay"
|
|
108 sizes the values that must actually be emitted for this trace. */
|
131
|
109 poly_int64_pod beg_true_args_size, end_true_args_size;
|
|
110 poly_int64_pod beg_delay_args_size, end_delay_args_size;
|
111
|
111
|
|
112 /* The first EH insn in the trace, where beg_delay_args_size must be set. */
|
|
113 rtx_insn *eh_head;
|
|
114
|
|
115 /* The following variables contain data used in interpreting frame related
|
|
116 expressions. These are not part of the "real" row state as defined by
|
|
117 Dwarf, but it seems like they need to be propagated into a trace in case
|
|
118 frame related expressions have been sunk. */
|
|
119 /* ??? This seems fragile. These variables are fragments of a larger
|
|
120 expression. If we do not keep the entire expression together, we risk
|
|
121 not being able to put it together properly. Consider forcing targets
|
|
122 to generate self-contained expressions and dropping all of the magic
|
|
123 interpretation code in this file. Or at least refusing to shrink wrap
|
|
124 any frame related insn that doesn't contain a complete expression. */
|
|
125
|
|
126 /* The register used for saving registers to the stack, and its offset
|
|
127 from the CFA. */
|
|
128 dw_cfa_location cfa_store;
|
|
129
|
|
130 /* A temporary register holding an integral value used in adjusting SP
|
|
131 or setting up the store_reg. The "offset" field holds the integer
|
|
132 value, not an offset. */
|
|
133 dw_cfa_location cfa_temp;
|
|
134
|
|
135 /* A set of registers saved in other registers. This is the inverse of
|
|
136 the row->reg_save info, if the entry is a DW_CFA_register. This is
|
|
137 implemented as a flat array because it normally contains zero or 1
|
|
138 entry, depending on the target. IA-64 is the big spender here, using
|
|
139 a maximum of 5 entries. */
|
|
140 vec<reg_saved_in_data> regs_saved_in_regs;
|
|
141
|
|
142 /* An identifier for this trace. Used only for debugging dumps. */
|
|
143 unsigned id;
|
|
144
|
|
145 /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
|
|
146 bool switch_sections;
|
|
147
|
|
148 /* True if we've seen different values incoming to beg_true_args_size. */
|
|
149 bool args_size_undefined;
|
131
|
150
|
|
151 /* True if we've seen an insn with a REG_ARGS_SIZE note before EH_HEAD. */
|
|
152 bool args_size_defined_for_eh;
|
111
|
153 };
|
|
154
|
|
155
|
|
156 /* Hashtable helpers. */
|
|
157
|
|
158 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
|
|
159 {
|
|
160 static inline hashval_t hash (const dw_trace_info *);
|
|
161 static inline bool equal (const dw_trace_info *, const dw_trace_info *);
|
|
162 };
|
|
163
|
|
164 inline hashval_t
|
|
165 trace_info_hasher::hash (const dw_trace_info *ti)
|
|
166 {
|
|
167 return INSN_UID (ti->head);
|
|
168 }
|
|
169
|
|
170 inline bool
|
|
171 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
|
|
172 {
|
|
173 return a->head == b->head;
|
|
174 }
|
|
175
|
|
176
|
|
177 /* The variables making up the pseudo-cfg, as described above. */
|
|
178 static vec<dw_trace_info> trace_info;
|
|
179 static vec<dw_trace_info *> trace_work_list;
|
|
180 static hash_table<trace_info_hasher> *trace_index;
|
|
181
|
|
182 /* A vector of call frame insns for the CIE. */
|
|
183 cfi_vec cie_cfi_vec;
|
|
184
|
|
185 /* The state of the first row of the FDE table, which includes the
|
|
186 state provided by the CIE. */
|
|
187 static GTY(()) dw_cfi_row *cie_cfi_row;
|
|
188
|
|
189 static GTY(()) reg_saved_in_data *cie_return_save;
|
|
190
|
|
191 static GTY(()) unsigned long dwarf2out_cfi_label_num;
|
|
192
|
|
193 /* The insn after which a new CFI note should be emitted. */
|
|
194 static rtx_insn *add_cfi_insn;
|
|
195
|
|
196 /* When non-null, add_cfi will add the CFI to this vector. */
|
|
197 static cfi_vec *add_cfi_vec;
|
|
198
|
|
199 /* The current instruction trace. */
|
|
200 static dw_trace_info *cur_trace;
|
|
201
|
|
202 /* The current, i.e. most recently generated, row of the CFI table. */
|
|
203 static dw_cfi_row *cur_row;
|
|
204
|
|
205 /* A copy of the current CFA, for use during the processing of a
|
|
206 single insn. */
|
|
207 static dw_cfa_location *cur_cfa;
|
|
208
|
|
209 /* We delay emitting a register save until either (a) we reach the end
|
|
210 of the prologue or (b) the register is clobbered. This clusters
|
|
211 register saves so that there are fewer pc advances. */
|
|
212
|
|
213 struct queued_reg_save {
|
|
214 rtx reg;
|
|
215 rtx saved_reg;
|
131
|
216 poly_int64_pod cfa_offset;
|
111
|
217 };
|
|
218
|
|
219
|
|
220 static vec<queued_reg_save> queued_reg_saves;
|
|
221
|
|
222 /* True if any CFI directives were emitted at the current insn. */
|
|
223 static bool any_cfis_emitted;
|
|
224
|
|
225 /* Short-hand for commonly used register numbers. */
|
|
226 static unsigned dw_stack_pointer_regnum;
|
|
227 static unsigned dw_frame_pointer_regnum;
|
|
228
|
|
229 /* Hook used by __throw. */
|
|
230
|
|
231 rtx
|
|
232 expand_builtin_dwarf_sp_column (void)
|
|
233 {
|
|
234 unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
|
|
235 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
|
|
236 }
|
|
237
|
|
238 /* MEM is a memory reference for the register size table, each element of
|
|
239 which has mode MODE. Initialize column C as a return address column. */
|
|
240
|
|
241 static void
|
|
242 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
|
|
243 {
|
|
244 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
|
|
245 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
|
|
246 emit_move_insn (adjust_address (mem, mode, offset),
|
|
247 gen_int_mode (size, mode));
|
|
248 }
|
|
249
|
|
250 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
|
|
251 init_one_dwarf_reg_size to communicate on what has been done by the
|
|
252 latter. */
|
|
253
|
|
254 struct init_one_dwarf_reg_state
|
|
255 {
|
|
256 /* Whether the dwarf return column was initialized. */
|
|
257 bool wrote_return_column;
|
|
258
|
|
259 /* For each hard register REGNO, whether init_one_dwarf_reg_size
|
|
260 was given REGNO to process already. */
|
|
261 bool processed_regno [FIRST_PSEUDO_REGISTER];
|
|
262
|
|
263 };
|
|
264
|
|
265 /* Helper for expand_builtin_init_dwarf_reg_sizes. Generate code to
|
|
266 initialize the dwarf register size table entry corresponding to register
|
|
267 REGNO in REGMODE. TABLE is the table base address, SLOTMODE is the mode to
|
|
268 use for the size entry to initialize, and INIT_STATE is the communication
|
|
269 datastructure conveying what we're doing to our caller. */
|
|
270
|
|
271 static
|
|
272 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
|
|
273 rtx table, machine_mode slotmode,
|
|
274 init_one_dwarf_reg_state *init_state)
|
|
275 {
|
|
276 const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
|
|
277 const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
|
|
278 const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
|
|
279
|
131
|
280 poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
|
|
281 poly_int64 regsize = GET_MODE_SIZE (regmode);
|
111
|
282
|
|
283 init_state->processed_regno[regno] = true;
|
|
284
|
|
285 if (rnum >= DWARF_FRAME_REGISTERS)
|
|
286 return;
|
|
287
|
|
288 if (dnum == DWARF_FRAME_RETURN_COLUMN)
|
|
289 {
|
|
290 if (regmode == VOIDmode)
|
|
291 return;
|
|
292 init_state->wrote_return_column = true;
|
|
293 }
|
|
294
|
131
|
295 /* ??? When is this true? Should it be a test based on DCOL instead? */
|
|
296 if (maybe_lt (slotoffset, 0))
|
111
|
297 return;
|
|
298
|
|
299 emit_move_insn (adjust_address (table, slotmode, slotoffset),
|
|
300 gen_int_mode (regsize, slotmode));
|
|
301 }
|
|
302
|
|
303 /* Generate code to initialize the dwarf register size table located
|
|
304 at the provided ADDRESS. */
|
|
305
|
|
306 void
|
|
307 expand_builtin_init_dwarf_reg_sizes (tree address)
|
|
308 {
|
|
309 unsigned int i;
|
|
310 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
|
|
311 rtx addr = expand_normal (address);
|
|
312 rtx mem = gen_rtx_MEM (BLKmode, addr);
|
|
313
|
|
314 init_one_dwarf_reg_state init_state;
|
|
315
|
|
316 memset ((char *)&init_state, 0, sizeof (init_state));
|
|
317
|
|
318 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
|
319 {
|
|
320 machine_mode save_mode;
|
|
321 rtx span;
|
|
322
|
|
323 /* No point in processing a register multiple times. This could happen
|
|
324 with register spans, e.g. when a reg is first processed as a piece of
|
|
325 a span, then as a register on its own later on. */
|
|
326
|
|
327 if (init_state.processed_regno[i])
|
|
328 continue;
|
|
329
|
|
330 save_mode = targetm.dwarf_frame_reg_mode (i);
|
|
331 span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
|
|
332
|
|
333 if (!span)
|
|
334 init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
|
|
335 else
|
|
336 {
|
|
337 for (int si = 0; si < XVECLEN (span, 0); si++)
|
|
338 {
|
|
339 rtx reg = XVECEXP (span, 0, si);
|
|
340
|
|
341 init_one_dwarf_reg_size
|
|
342 (REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
|
|
343 }
|
|
344 }
|
|
345 }
|
|
346
|
|
347 if (!init_state.wrote_return_column)
|
|
348 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
|
|
349
|
|
350 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
|
|
351 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
|
|
352 #endif
|
|
353
|
|
354 targetm.init_dwarf_reg_sizes_extra (address);
|
|
355 }
|
|
356
|
|
357
|
|
358 static dw_trace_info *
|
|
359 get_trace_info (rtx_insn *insn)
|
|
360 {
|
|
361 dw_trace_info dummy;
|
|
362 dummy.head = insn;
|
|
363 return trace_index->find_with_hash (&dummy, INSN_UID (insn));
|
|
364 }
|
|
365
|
|
366 static bool
|
|
367 save_point_p (rtx_insn *insn)
|
|
368 {
|
|
369 /* Labels, except those that are really jump tables. */
|
|
370 if (LABEL_P (insn))
|
|
371 return inside_basic_block_p (insn);
|
|
372
|
|
373 /* We split traces at the prologue/epilogue notes because those
|
|
374 are points at which the unwind info is usually stable. This
|
|
375 makes it easier to find spots with identical unwind info so
|
|
376 that we can use remember/restore_state opcodes. */
|
|
377 if (NOTE_P (insn))
|
|
378 switch (NOTE_KIND (insn))
|
|
379 {
|
|
380 case NOTE_INSN_PROLOGUE_END:
|
|
381 case NOTE_INSN_EPILOGUE_BEG:
|
|
382 return true;
|
|
383 }
|
|
384
|
|
385 return false;
|
|
386 }
|
|
387
|
|
388 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
|
|
389
|
|
390 static inline HOST_WIDE_INT
|
|
391 div_data_align (HOST_WIDE_INT off)
|
|
392 {
|
|
393 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
|
|
394 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
|
|
395 return r;
|
|
396 }
|
|
397
|
|
398 /* Return true if we need a signed version of a given opcode
|
|
399 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
|
|
400
|
|
401 static inline bool
|
|
402 need_data_align_sf_opcode (HOST_WIDE_INT off)
|
|
403 {
|
|
404 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
|
|
405 }
|
|
406
|
|
407 /* Return a pointer to a newly allocated Call Frame Instruction. */
|
|
408
|
|
409 static inline dw_cfi_ref
|
|
410 new_cfi (void)
|
|
411 {
|
|
412 dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
|
|
413
|
|
414 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
|
|
415 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
|
|
416
|
|
417 return cfi;
|
|
418 }
|
|
419
|
|
420 /* Return a newly allocated CFI row, with no defined data. */
|
|
421
|
|
422 static dw_cfi_row *
|
|
423 new_cfi_row (void)
|
|
424 {
|
|
425 dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
|
|
426
|
|
427 row->cfa.reg = INVALID_REGNUM;
|
|
428
|
|
429 return row;
|
|
430 }
|
|
431
|
|
432 /* Return a copy of an existing CFI row. */
|
|
433
|
|
434 static dw_cfi_row *
|
|
435 copy_cfi_row (dw_cfi_row *src)
|
|
436 {
|
|
437 dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
|
|
438
|
|
439 *dst = *src;
|
|
440 dst->reg_save = vec_safe_copy (src->reg_save);
|
|
441
|
|
442 return dst;
|
|
443 }
|
|
444
|
131
|
445 /* Return a copy of an existing CFA location. */
|
|
446
|
|
447 static dw_cfa_location *
|
|
448 copy_cfa (dw_cfa_location *src)
|
|
449 {
|
|
450 dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
|
|
451 *dst = *src;
|
|
452 return dst;
|
|
453 }
|
|
454
|
111
|
455 /* Generate a new label for the CFI info to refer to. */
|
|
456
|
|
457 static char *
|
|
458 dwarf2out_cfi_label (void)
|
|
459 {
|
|
460 int num = dwarf2out_cfi_label_num++;
|
|
461 char label[20];
|
|
462
|
|
463 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
|
|
464
|
|
465 return xstrdup (label);
|
|
466 }
|
|
467
|
|
468 /* Add CFI either to the current insn stream or to a vector, or both. */
|
|
469
|
|
470 static void
|
|
471 add_cfi (dw_cfi_ref cfi)
|
|
472 {
|
|
473 any_cfis_emitted = true;
|
|
474
|
|
475 if (add_cfi_insn != NULL)
|
|
476 {
|
|
477 add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
|
|
478 NOTE_CFI (add_cfi_insn) = cfi;
|
|
479 }
|
|
480
|
|
481 if (add_cfi_vec != NULL)
|
|
482 vec_safe_push (*add_cfi_vec, cfi);
|
|
483 }
|
|
484
|
|
485 static void
|
131
|
486 add_cfi_args_size (poly_int64 size)
|
111
|
487 {
|
131
|
488 /* We don't yet have a representation for polynomial sizes. */
|
|
489 HOST_WIDE_INT const_size = size.to_constant ();
|
|
490
|
111
|
491 dw_cfi_ref cfi = new_cfi ();
|
|
492
|
|
493 /* While we can occasionally have args_size < 0 internally, this state
|
|
494 should not persist at a point we actually need an opcode. */
|
131
|
495 gcc_assert (const_size >= 0);
|
111
|
496
|
|
497 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
|
131
|
498 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
|
111
|
499
|
|
500 add_cfi (cfi);
|
|
501 }
|
|
502
|
|
503 static void
|
|
504 add_cfi_restore (unsigned reg)
|
|
505 {
|
|
506 dw_cfi_ref cfi = new_cfi ();
|
|
507
|
|
508 cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
|
|
509 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
|
|
510
|
|
511 add_cfi (cfi);
|
|
512 }
|
|
513
|
|
514 /* Perform ROW->REG_SAVE[COLUMN] = CFI. CFI may be null, indicating
|
|
515 that the register column is no longer saved. */
|
|
516
|
|
517 static void
|
|
518 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
|
|
519 {
|
|
520 if (vec_safe_length (row->reg_save) <= column)
|
|
521 vec_safe_grow_cleared (row->reg_save, column + 1);
|
|
522 (*row->reg_save)[column] = cfi;
|
|
523 }
|
|
524
|
|
525 /* This function fills in aa dw_cfa_location structure from a dwarf location
|
|
526 descriptor sequence. */
|
|
527
|
|
528 static void
|
|
529 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
|
|
530 {
|
|
531 struct dw_loc_descr_node *ptr;
|
|
532 cfa->offset = 0;
|
|
533 cfa->base_offset = 0;
|
|
534 cfa->indirect = 0;
|
|
535 cfa->reg = -1;
|
|
536
|
|
537 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
|
|
538 {
|
|
539 enum dwarf_location_atom op = ptr->dw_loc_opc;
|
|
540
|
|
541 switch (op)
|
|
542 {
|
|
543 case DW_OP_reg0:
|
|
544 case DW_OP_reg1:
|
|
545 case DW_OP_reg2:
|
|
546 case DW_OP_reg3:
|
|
547 case DW_OP_reg4:
|
|
548 case DW_OP_reg5:
|
|
549 case DW_OP_reg6:
|
|
550 case DW_OP_reg7:
|
|
551 case DW_OP_reg8:
|
|
552 case DW_OP_reg9:
|
|
553 case DW_OP_reg10:
|
|
554 case DW_OP_reg11:
|
|
555 case DW_OP_reg12:
|
|
556 case DW_OP_reg13:
|
|
557 case DW_OP_reg14:
|
|
558 case DW_OP_reg15:
|
|
559 case DW_OP_reg16:
|
|
560 case DW_OP_reg17:
|
|
561 case DW_OP_reg18:
|
|
562 case DW_OP_reg19:
|
|
563 case DW_OP_reg20:
|
|
564 case DW_OP_reg21:
|
|
565 case DW_OP_reg22:
|
|
566 case DW_OP_reg23:
|
|
567 case DW_OP_reg24:
|
|
568 case DW_OP_reg25:
|
|
569 case DW_OP_reg26:
|
|
570 case DW_OP_reg27:
|
|
571 case DW_OP_reg28:
|
|
572 case DW_OP_reg29:
|
|
573 case DW_OP_reg30:
|
|
574 case DW_OP_reg31:
|
|
575 cfa->reg = op - DW_OP_reg0;
|
|
576 break;
|
|
577 case DW_OP_regx:
|
|
578 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
|
|
579 break;
|
|
580 case DW_OP_breg0:
|
|
581 case DW_OP_breg1:
|
|
582 case DW_OP_breg2:
|
|
583 case DW_OP_breg3:
|
|
584 case DW_OP_breg4:
|
|
585 case DW_OP_breg5:
|
|
586 case DW_OP_breg6:
|
|
587 case DW_OP_breg7:
|
|
588 case DW_OP_breg8:
|
|
589 case DW_OP_breg9:
|
|
590 case DW_OP_breg10:
|
|
591 case DW_OP_breg11:
|
|
592 case DW_OP_breg12:
|
|
593 case DW_OP_breg13:
|
|
594 case DW_OP_breg14:
|
|
595 case DW_OP_breg15:
|
|
596 case DW_OP_breg16:
|
|
597 case DW_OP_breg17:
|
|
598 case DW_OP_breg18:
|
|
599 case DW_OP_breg19:
|
|
600 case DW_OP_breg20:
|
|
601 case DW_OP_breg21:
|
|
602 case DW_OP_breg22:
|
|
603 case DW_OP_breg23:
|
|
604 case DW_OP_breg24:
|
|
605 case DW_OP_breg25:
|
|
606 case DW_OP_breg26:
|
|
607 case DW_OP_breg27:
|
|
608 case DW_OP_breg28:
|
|
609 case DW_OP_breg29:
|
|
610 case DW_OP_breg30:
|
|
611 case DW_OP_breg31:
|
|
612 cfa->reg = op - DW_OP_breg0;
|
|
613 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
|
|
614 break;
|
|
615 case DW_OP_bregx:
|
|
616 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
|
|
617 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
|
|
618 break;
|
|
619 case DW_OP_deref:
|
|
620 cfa->indirect = 1;
|
|
621 break;
|
|
622 case DW_OP_plus_uconst:
|
|
623 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
|
|
624 break;
|
|
625 default:
|
|
626 gcc_unreachable ();
|
|
627 }
|
|
628 }
|
|
629 }
|
|
630
|
|
631 /* Find the previous value for the CFA, iteratively. CFI is the opcode
|
|
632 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
|
|
633 one level of remember/restore state processing. */
|
|
634
|
|
635 void
|
|
636 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
|
|
637 {
|
|
638 switch (cfi->dw_cfi_opc)
|
|
639 {
|
|
640 case DW_CFA_def_cfa_offset:
|
|
641 case DW_CFA_def_cfa_offset_sf:
|
|
642 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
|
|
643 break;
|
|
644 case DW_CFA_def_cfa_register:
|
|
645 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
|
|
646 break;
|
|
647 case DW_CFA_def_cfa:
|
|
648 case DW_CFA_def_cfa_sf:
|
|
649 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
|
|
650 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
|
|
651 break;
|
|
652 case DW_CFA_def_cfa_expression:
|
131
|
653 if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
|
|
654 *loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
|
|
655 else
|
|
656 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
|
111
|
657 break;
|
|
658
|
|
659 case DW_CFA_remember_state:
|
|
660 gcc_assert (!remember->in_use);
|
|
661 *remember = *loc;
|
|
662 remember->in_use = 1;
|
|
663 break;
|
|
664 case DW_CFA_restore_state:
|
|
665 gcc_assert (remember->in_use);
|
|
666 *loc = *remember;
|
|
667 remember->in_use = 0;
|
|
668 break;
|
|
669
|
|
670 default:
|
|
671 break;
|
|
672 }
|
|
673 }
|
|
674
|
|
675 /* Determine if two dw_cfa_location structures define the same data. */
|
|
676
|
|
677 bool
|
|
678 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
|
|
679 {
|
|
680 return (loc1->reg == loc2->reg
|
131
|
681 && known_eq (loc1->offset, loc2->offset)
|
111
|
682 && loc1->indirect == loc2->indirect
|
|
683 && (loc1->indirect == 0
|
131
|
684 || known_eq (loc1->base_offset, loc2->base_offset)));
|
111
|
685 }
|
|
686
|
|
687 /* Determine if two CFI operands are identical. */
|
|
688
|
|
689 static bool
|
|
690 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
|
|
691 {
|
|
692 switch (t)
|
|
693 {
|
|
694 case dw_cfi_oprnd_unused:
|
|
695 return true;
|
|
696 case dw_cfi_oprnd_reg_num:
|
|
697 return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
|
|
698 case dw_cfi_oprnd_offset:
|
|
699 return a->dw_cfi_offset == b->dw_cfi_offset;
|
|
700 case dw_cfi_oprnd_addr:
|
|
701 return (a->dw_cfi_addr == b->dw_cfi_addr
|
|
702 || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
|
|
703 case dw_cfi_oprnd_loc:
|
|
704 return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
|
131
|
705 case dw_cfi_oprnd_cfa_loc:
|
|
706 return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
|
111
|
707 }
|
|
708 gcc_unreachable ();
|
|
709 }
|
|
710
|
|
711 /* Determine if two CFI entries are identical. */
|
|
712
|
|
713 static bool
|
|
714 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
|
|
715 {
|
|
716 enum dwarf_call_frame_info opc;
|
|
717
|
|
718 /* Make things easier for our callers, including missing operands. */
|
|
719 if (a == b)
|
|
720 return true;
|
|
721 if (a == NULL || b == NULL)
|
|
722 return false;
|
|
723
|
|
724 /* Obviously, the opcodes must match. */
|
|
725 opc = a->dw_cfi_opc;
|
|
726 if (opc != b->dw_cfi_opc)
|
|
727 return false;
|
|
728
|
|
729 /* Compare the two operands, re-using the type of the operands as
|
|
730 already exposed elsewhere. */
|
|
731 return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
|
|
732 &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
|
|
733 && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
|
|
734 &a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
|
|
735 }
|
|
736
|
|
737 /* Determine if two CFI_ROW structures are identical. */
|
|
738
|
|
739 static bool
|
|
740 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
|
|
741 {
|
|
742 size_t i, n_a, n_b, n_max;
|
|
743
|
|
744 if (a->cfa_cfi)
|
|
745 {
|
|
746 if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
|
|
747 return false;
|
|
748 }
|
|
749 else if (!cfa_equal_p (&a->cfa, &b->cfa))
|
|
750 return false;
|
|
751
|
|
752 n_a = vec_safe_length (a->reg_save);
|
|
753 n_b = vec_safe_length (b->reg_save);
|
|
754 n_max = MAX (n_a, n_b);
|
|
755
|
|
756 for (i = 0; i < n_max; ++i)
|
|
757 {
|
|
758 dw_cfi_ref r_a = NULL, r_b = NULL;
|
|
759
|
|
760 if (i < n_a)
|
|
761 r_a = (*a->reg_save)[i];
|
|
762 if (i < n_b)
|
|
763 r_b = (*b->reg_save)[i];
|
|
764
|
|
765 if (!cfi_equal_p (r_a, r_b))
|
|
766 return false;
|
|
767 }
|
|
768
|
|
769 return true;
|
|
770 }
|
|
771
|
|
772 /* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
|
|
773 what opcode to emit. Returns the CFI opcode to effect the change, or
|
|
774 NULL if NEW_CFA == OLD_CFA. */
|
|
775
|
|
776 static dw_cfi_ref
|
|
777 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
|
|
778 {
|
|
779 dw_cfi_ref cfi;
|
|
780
|
|
781 /* If nothing changed, no need to issue any call frame instructions. */
|
|
782 if (cfa_equal_p (old_cfa, new_cfa))
|
|
783 return NULL;
|
|
784
|
|
785 cfi = new_cfi ();
|
|
786
|
131
|
787 HOST_WIDE_INT const_offset;
|
|
788 if (new_cfa->reg == old_cfa->reg
|
|
789 && !new_cfa->indirect
|
|
790 && !old_cfa->indirect
|
|
791 && new_cfa->offset.is_constant (&const_offset))
|
111
|
792 {
|
|
793 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
|
|
794 the CFA register did not change but the offset did. The data
|
|
795 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
|
|
796 in the assembler via the .cfi_def_cfa_offset directive. */
|
131
|
797 if (const_offset < 0)
|
111
|
798 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
|
|
799 else
|
|
800 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
|
131
|
801 cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
|
111
|
802 }
|
131
|
803 else if (new_cfa->offset.is_constant ()
|
|
804 && known_eq (new_cfa->offset, old_cfa->offset)
|
111
|
805 && old_cfa->reg != INVALID_REGNUM
|
|
806 && !new_cfa->indirect
|
|
807 && !old_cfa->indirect)
|
|
808 {
|
|
809 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
|
|
810 indicating the CFA register has changed to <register> but the
|
131
|
811 offset has not changed. This requires the old CFA to have
|
|
812 been set as a register plus offset rather than a general
|
|
813 DW_CFA_def_cfa_expression. */
|
111
|
814 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
|
|
815 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
|
|
816 }
|
131
|
817 else if (new_cfa->indirect == 0
|
|
818 && new_cfa->offset.is_constant (&const_offset))
|
111
|
819 {
|
|
820 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
|
|
821 indicating the CFA register has changed to <register> with
|
|
822 the specified offset. The data factoring for DW_CFA_def_cfa_sf
|
|
823 happens in output_cfi, or in the assembler via the .cfi_def_cfa
|
|
824 directive. */
|
131
|
825 if (const_offset < 0)
|
111
|
826 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
|
|
827 else
|
|
828 cfi->dw_cfi_opc = DW_CFA_def_cfa;
|
|
829 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
|
131
|
830 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
|
111
|
831 }
|
|
832 else
|
|
833 {
|
|
834 /* Construct a DW_CFA_def_cfa_expression instruction to
|
|
835 calculate the CFA using a full location expression since no
|
|
836 register-offset pair is available. */
|
|
837 struct dw_loc_descr_node *loc_list;
|
|
838
|
|
839 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
|
|
840 loc_list = build_cfa_loc (new_cfa, 0);
|
|
841 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
|
131
|
842 if (!new_cfa->offset.is_constant ()
|
|
843 || !new_cfa->base_offset.is_constant ())
|
|
844 /* It's hard to reconstruct the CFA location for a polynomial
|
|
845 expression, so just cache it instead. */
|
|
846 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
|
|
847 else
|
|
848 cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
|
111
|
849 }
|
|
850
|
|
851 return cfi;
|
|
852 }
|
|
853
|
|
854 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact. */
|
|
855
|
|
856 static void
|
|
857 def_cfa_1 (dw_cfa_location *new_cfa)
|
|
858 {
|
|
859 dw_cfi_ref cfi;
|
|
860
|
|
861 if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
|
|
862 cur_trace->cfa_store.offset = new_cfa->offset;
|
|
863
|
|
864 cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
|
|
865 if (cfi)
|
|
866 {
|
|
867 cur_row->cfa = *new_cfa;
|
|
868 cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
|
|
869 ? cfi : NULL);
|
|
870
|
|
871 add_cfi (cfi);
|
|
872 }
|
|
873 }
|
|
874
|
|
875 /* Add the CFI for saving a register. REG is the CFA column number.
|
|
876 If SREG is -1, the register is saved at OFFSET from the CFA;
|
|
877 otherwise it is saved in SREG. */
|
|
878
|
|
879 static void
|
131
|
880 reg_save (unsigned int reg, unsigned int sreg, poly_int64 offset)
|
111
|
881 {
|
|
882 dw_fde_ref fde = cfun ? cfun->fde : NULL;
|
|
883 dw_cfi_ref cfi = new_cfi ();
|
|
884
|
|
885 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
|
|
886
|
131
|
887 if (sreg == INVALID_REGNUM)
|
111
|
888 {
|
131
|
889 HOST_WIDE_INT const_offset;
|
|
890 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
|
|
891 if (fde && fde->stack_realign)
|
|
892 {
|
|
893 cfi->dw_cfi_opc = DW_CFA_expression;
|
|
894 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
|
|
895 cfi->dw_cfi_oprnd2.dw_cfi_loc
|
|
896 = build_cfa_aligned_loc (&cur_row->cfa, offset,
|
|
897 fde->stack_realignment);
|
|
898 }
|
|
899 else if (offset.is_constant (&const_offset))
|
|
900 {
|
|
901 if (need_data_align_sf_opcode (const_offset))
|
|
902 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
|
|
903 else if (reg & ~0x3f)
|
|
904 cfi->dw_cfi_opc = DW_CFA_offset_extended;
|
|
905 else
|
|
906 cfi->dw_cfi_opc = DW_CFA_offset;
|
|
907 cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
|
|
908 }
|
111
|
909 else
|
131
|
910 {
|
|
911 cfi->dw_cfi_opc = DW_CFA_expression;
|
|
912 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
|
|
913 cfi->dw_cfi_oprnd2.dw_cfi_loc
|
|
914 = build_cfa_loc (&cur_row->cfa, offset);
|
|
915 }
|
111
|
916 }
|
|
917 else if (sreg == reg)
|
|
918 {
|
|
919 /* While we could emit something like DW_CFA_same_value or
|
|
920 DW_CFA_restore, we never expect to see something like that
|
|
921 in a prologue. This is more likely to be a bug. A backend
|
|
922 can always bypass this by using REG_CFA_RESTORE directly. */
|
|
923 gcc_unreachable ();
|
|
924 }
|
|
925 else
|
|
926 {
|
|
927 cfi->dw_cfi_opc = DW_CFA_register;
|
|
928 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
|
|
929 }
|
|
930
|
|
931 add_cfi (cfi);
|
|
932 update_row_reg_save (cur_row, reg, cfi);
|
|
933 }
|
|
934
|
|
935 /* A subroutine of scan_trace. Check INSN for a REG_ARGS_SIZE note
|
|
936 and adjust data structures to match. */
|
|
937
|
|
938 static void
|
|
939 notice_args_size (rtx_insn *insn)
|
|
940 {
|
131
|
941 poly_int64 args_size, delta;
|
111
|
942 rtx note;
|
|
943
|
|
944 note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
|
|
945 if (note == NULL)
|
|
946 return;
|
|
947
|
131
|
948 if (!cur_trace->eh_head)
|
|
949 cur_trace->args_size_defined_for_eh = true;
|
|
950
|
|
951 args_size = get_args_size (note);
|
111
|
952 delta = args_size - cur_trace->end_true_args_size;
|
131
|
953 if (known_eq (delta, 0))
|
111
|
954 return;
|
|
955
|
|
956 cur_trace->end_true_args_size = args_size;
|
|
957
|
|
958 /* If the CFA is computed off the stack pointer, then we must adjust
|
|
959 the computation of the CFA as well. */
|
|
960 if (cur_cfa->reg == dw_stack_pointer_regnum)
|
|
961 {
|
|
962 gcc_assert (!cur_cfa->indirect);
|
|
963
|
|
964 /* Convert a change in args_size (always a positive in the
|
|
965 direction of stack growth) to a change in stack pointer. */
|
|
966 if (!STACK_GROWS_DOWNWARD)
|
|
967 delta = -delta;
|
|
968
|
|
969 cur_cfa->offset += delta;
|
|
970 }
|
|
971 }
|
|
972
|
|
973 /* A subroutine of scan_trace. INSN is can_throw_internal. Update the
|
|
974 data within the trace related to EH insns and args_size. */
|
|
975
|
|
976 static void
|
|
977 notice_eh_throw (rtx_insn *insn)
|
|
978 {
|
131
|
979 poly_int64 args_size = cur_trace->end_true_args_size;
|
111
|
980 if (cur_trace->eh_head == NULL)
|
|
981 {
|
|
982 cur_trace->eh_head = insn;
|
|
983 cur_trace->beg_delay_args_size = args_size;
|
|
984 cur_trace->end_delay_args_size = args_size;
|
|
985 }
|
131
|
986 else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
|
111
|
987 {
|
|
988 cur_trace->end_delay_args_size = args_size;
|
|
989
|
|
990 /* ??? If the CFA is the stack pointer, search backward for the last
|
|
991 CFI note and insert there. Given that the stack changed for the
|
|
992 args_size change, there *must* be such a note in between here and
|
|
993 the last eh insn. */
|
|
994 add_cfi_args_size (args_size);
|
|
995 }
|
|
996 }
|
|
997
|
|
998 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
|
|
999 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
|
|
1000 used in places where rtl is prohibited. */
|
|
1001
|
|
1002 static inline unsigned
|
|
1003 dwf_regno (const_rtx reg)
|
|
1004 {
|
|
1005 gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
|
|
1006 return DWARF_FRAME_REGNUM (REGNO (reg));
|
|
1007 }
|
|
1008
|
|
1009 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
|
|
1010
|
|
1011 static bool
|
|
1012 compare_reg_or_pc (rtx x, rtx y)
|
|
1013 {
|
|
1014 if (REG_P (x) && REG_P (y))
|
|
1015 return REGNO (x) == REGNO (y);
|
|
1016 return x == y;
|
|
1017 }
|
|
1018
|
|
1019 /* Record SRC as being saved in DEST. DEST may be null to delete an
|
|
1020 existing entry. SRC may be a register or PC_RTX. */
|
|
1021
|
|
1022 static void
|
|
1023 record_reg_saved_in_reg (rtx dest, rtx src)
|
|
1024 {
|
|
1025 reg_saved_in_data *elt;
|
|
1026 size_t i;
|
|
1027
|
|
1028 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
|
|
1029 if (compare_reg_or_pc (elt->orig_reg, src))
|
|
1030 {
|
|
1031 if (dest == NULL)
|
|
1032 cur_trace->regs_saved_in_regs.unordered_remove (i);
|
|
1033 else
|
|
1034 elt->saved_in_reg = dest;
|
|
1035 return;
|
|
1036 }
|
|
1037
|
|
1038 if (dest == NULL)
|
|
1039 return;
|
|
1040
|
|
1041 reg_saved_in_data e = {src, dest};
|
|
1042 cur_trace->regs_saved_in_regs.safe_push (e);
|
|
1043 }
|
|
1044
|
|
1045 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
|
|
1046 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
|
|
1047
|
|
1048 static void
|
131
|
1049 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
|
111
|
1050 {
|
|
1051 queued_reg_save *q;
|
|
1052 queued_reg_save e = {reg, sreg, offset};
|
|
1053 size_t i;
|
|
1054
|
|
1055 /* Duplicates waste space, but it's also necessary to remove them
|
|
1056 for correctness, since the queue gets output in reverse order. */
|
|
1057 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
|
|
1058 if (compare_reg_or_pc (q->reg, reg))
|
|
1059 {
|
|
1060 *q = e;
|
|
1061 return;
|
|
1062 }
|
|
1063
|
|
1064 queued_reg_saves.safe_push (e);
|
|
1065 }
|
|
1066
|
|
1067 /* Output all the entries in QUEUED_REG_SAVES. */
|
|
1068
|
|
1069 static void
|
|
1070 dwarf2out_flush_queued_reg_saves (void)
|
|
1071 {
|
|
1072 queued_reg_save *q;
|
|
1073 size_t i;
|
|
1074
|
|
1075 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
|
|
1076 {
|
|
1077 unsigned int reg, sreg;
|
|
1078
|
|
1079 record_reg_saved_in_reg (q->saved_reg, q->reg);
|
|
1080
|
|
1081 if (q->reg == pc_rtx)
|
|
1082 reg = DWARF_FRAME_RETURN_COLUMN;
|
|
1083 else
|
|
1084 reg = dwf_regno (q->reg);
|
|
1085 if (q->saved_reg)
|
|
1086 sreg = dwf_regno (q->saved_reg);
|
|
1087 else
|
|
1088 sreg = INVALID_REGNUM;
|
|
1089 reg_save (reg, sreg, q->cfa_offset);
|
|
1090 }
|
|
1091
|
|
1092 queued_reg_saves.truncate (0);
|
|
1093 }
|
|
1094
|
|
1095 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
|
|
1096 location for? Or, does it clobber a register which we've previously
|
|
1097 said that some other register is saved in, and for which we now
|
|
1098 have a new location for? */
|
|
1099
|
|
1100 static bool
|
|
1101 clobbers_queued_reg_save (const_rtx insn)
|
|
1102 {
|
|
1103 queued_reg_save *q;
|
|
1104 size_t iq;
|
|
1105
|
|
1106 FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
|
|
1107 {
|
|
1108 size_t ir;
|
|
1109 reg_saved_in_data *rir;
|
|
1110
|
|
1111 if (modified_in_p (q->reg, insn))
|
|
1112 return true;
|
|
1113
|
|
1114 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
|
|
1115 if (compare_reg_or_pc (q->reg, rir->orig_reg)
|
|
1116 && modified_in_p (rir->saved_in_reg, insn))
|
|
1117 return true;
|
|
1118 }
|
|
1119
|
|
1120 return false;
|
|
1121 }
|
|
1122
|
|
1123 /* What register, if any, is currently saved in REG? */
|
|
1124
|
|
1125 static rtx
|
|
1126 reg_saved_in (rtx reg)
|
|
1127 {
|
|
1128 unsigned int regn = REGNO (reg);
|
|
1129 queued_reg_save *q;
|
|
1130 reg_saved_in_data *rir;
|
|
1131 size_t i;
|
|
1132
|
|
1133 FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
|
|
1134 if (q->saved_reg && regn == REGNO (q->saved_reg))
|
|
1135 return q->reg;
|
|
1136
|
|
1137 FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
|
|
1138 if (regn == REGNO (rir->saved_in_reg))
|
|
1139 return rir->orig_reg;
|
|
1140
|
|
1141 return NULL_RTX;
|
|
1142 }
|
|
1143
|
|
1144 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
|
|
1145
|
|
1146 static void
|
|
1147 dwarf2out_frame_debug_def_cfa (rtx pat)
|
|
1148 {
|
|
1149 memset (cur_cfa, 0, sizeof (*cur_cfa));
|
|
1150
|
131
|
1151 pat = strip_offset (pat, &cur_cfa->offset);
|
111
|
1152 if (MEM_P (pat))
|
|
1153 {
|
|
1154 cur_cfa->indirect = 1;
|
131
|
1155 pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
|
111
|
1156 }
|
|
1157 /* ??? If this fails, we could be calling into the _loc functions to
|
|
1158 define a full expression. So far no port does that. */
|
|
1159 gcc_assert (REG_P (pat));
|
|
1160 cur_cfa->reg = dwf_regno (pat);
|
|
1161 }
|
|
1162
|
|
1163 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
|
|
1164
|
|
1165 static void
|
|
1166 dwarf2out_frame_debug_adjust_cfa (rtx pat)
|
|
1167 {
|
|
1168 rtx src, dest;
|
|
1169
|
|
1170 gcc_assert (GET_CODE (pat) == SET);
|
|
1171 dest = XEXP (pat, 0);
|
|
1172 src = XEXP (pat, 1);
|
|
1173
|
|
1174 switch (GET_CODE (src))
|
|
1175 {
|
|
1176 case PLUS:
|
|
1177 gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
|
131
|
1178 cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
|
111
|
1179 break;
|
|
1180
|
|
1181 case REG:
|
|
1182 break;
|
|
1183
|
|
1184 default:
|
|
1185 gcc_unreachable ();
|
|
1186 }
|
|
1187
|
|
1188 cur_cfa->reg = dwf_regno (dest);
|
|
1189 gcc_assert (cur_cfa->indirect == 0);
|
|
1190 }
|
|
1191
|
|
1192 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
|
|
1193
|
|
1194 static void
|
|
1195 dwarf2out_frame_debug_cfa_offset (rtx set)
|
|
1196 {
|
131
|
1197 poly_int64 offset;
|
111
|
1198 rtx src, addr, span;
|
|
1199 unsigned int sregno;
|
|
1200
|
|
1201 src = XEXP (set, 1);
|
|
1202 addr = XEXP (set, 0);
|
|
1203 gcc_assert (MEM_P (addr));
|
|
1204 addr = XEXP (addr, 0);
|
|
1205
|
|
1206 /* As documented, only consider extremely simple addresses. */
|
|
1207 switch (GET_CODE (addr))
|
|
1208 {
|
|
1209 case REG:
|
|
1210 gcc_assert (dwf_regno (addr) == cur_cfa->reg);
|
|
1211 offset = -cur_cfa->offset;
|
|
1212 break;
|
|
1213 case PLUS:
|
|
1214 gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
|
131
|
1215 offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
|
111
|
1216 break;
|
|
1217 default:
|
|
1218 gcc_unreachable ();
|
|
1219 }
|
|
1220
|
|
1221 if (src == pc_rtx)
|
|
1222 {
|
|
1223 span = NULL;
|
|
1224 sregno = DWARF_FRAME_RETURN_COLUMN;
|
|
1225 }
|
|
1226 else
|
|
1227 {
|
|
1228 span = targetm.dwarf_register_span (src);
|
|
1229 sregno = dwf_regno (src);
|
|
1230 }
|
|
1231
|
|
1232 /* ??? We'd like to use queue_reg_save, but we need to come up with
|
|
1233 a different flushing heuristic for epilogues. */
|
|
1234 if (!span)
|
|
1235 reg_save (sregno, INVALID_REGNUM, offset);
|
|
1236 else
|
|
1237 {
|
|
1238 /* We have a PARALLEL describing where the contents of SRC live.
|
|
1239 Adjust the offset for each piece of the PARALLEL. */
|
131
|
1240 poly_int64 span_offset = offset;
|
111
|
1241
|
|
1242 gcc_assert (GET_CODE (span) == PARALLEL);
|
|
1243
|
|
1244 const int par_len = XVECLEN (span, 0);
|
|
1245 for (int par_index = 0; par_index < par_len; par_index++)
|
|
1246 {
|
|
1247 rtx elem = XVECEXP (span, 0, par_index);
|
|
1248 sregno = dwf_regno (src);
|
|
1249 reg_save (sregno, INVALID_REGNUM, span_offset);
|
|
1250 span_offset += GET_MODE_SIZE (GET_MODE (elem));
|
|
1251 }
|
|
1252 }
|
|
1253 }
|
|
1254
|
|
1255 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
|
|
1256
|
|
1257 static void
|
|
1258 dwarf2out_frame_debug_cfa_register (rtx set)
|
|
1259 {
|
|
1260 rtx src, dest;
|
|
1261 unsigned sregno, dregno;
|
|
1262
|
|
1263 src = XEXP (set, 1);
|
|
1264 dest = XEXP (set, 0);
|
|
1265
|
|
1266 record_reg_saved_in_reg (dest, src);
|
|
1267 if (src == pc_rtx)
|
|
1268 sregno = DWARF_FRAME_RETURN_COLUMN;
|
|
1269 else
|
|
1270 sregno = dwf_regno (src);
|
|
1271
|
|
1272 dregno = dwf_regno (dest);
|
|
1273
|
|
1274 /* ??? We'd like to use queue_reg_save, but we need to come up with
|
|
1275 a different flushing heuristic for epilogues. */
|
|
1276 reg_save (sregno, dregno, 0);
|
|
1277 }
|
|
1278
|
|
1279 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
|
|
1280
|
|
1281 static void
|
|
1282 dwarf2out_frame_debug_cfa_expression (rtx set)
|
|
1283 {
|
|
1284 rtx src, dest, span;
|
|
1285 dw_cfi_ref cfi = new_cfi ();
|
|
1286 unsigned regno;
|
|
1287
|
|
1288 dest = SET_DEST (set);
|
|
1289 src = SET_SRC (set);
|
|
1290
|
|
1291 gcc_assert (REG_P (src));
|
|
1292 gcc_assert (MEM_P (dest));
|
|
1293
|
|
1294 span = targetm.dwarf_register_span (src);
|
|
1295 gcc_assert (!span);
|
|
1296
|
|
1297 regno = dwf_regno (src);
|
|
1298
|
|
1299 cfi->dw_cfi_opc = DW_CFA_expression;
|
|
1300 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
|
|
1301 cfi->dw_cfi_oprnd2.dw_cfi_loc
|
|
1302 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
|
|
1303 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
|
|
1304
|
|
1305 /* ??? We'd like to use queue_reg_save, were the interface different,
|
|
1306 and, as above, we could manage flushing for epilogues. */
|
|
1307 add_cfi (cfi);
|
|
1308 update_row_reg_save (cur_row, regno, cfi);
|
|
1309 }
|
|
1310
|
|
1311 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
|
|
1312 note. */
|
|
1313
|
|
1314 static void
|
|
1315 dwarf2out_frame_debug_cfa_val_expression (rtx set)
|
|
1316 {
|
|
1317 rtx dest = SET_DEST (set);
|
|
1318 gcc_assert (REG_P (dest));
|
|
1319
|
|
1320 rtx span = targetm.dwarf_register_span (dest);
|
|
1321 gcc_assert (!span);
|
|
1322
|
|
1323 rtx src = SET_SRC (set);
|
|
1324 dw_cfi_ref cfi = new_cfi ();
|
|
1325 cfi->dw_cfi_opc = DW_CFA_val_expression;
|
|
1326 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
|
|
1327 cfi->dw_cfi_oprnd2.dw_cfi_loc
|
|
1328 = mem_loc_descriptor (src, GET_MODE (src),
|
|
1329 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
|
|
1330 add_cfi (cfi);
|
|
1331 update_row_reg_save (cur_row, dwf_regno (dest), cfi);
|
|
1332 }
|
|
1333
|
|
1334 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
|
|
1335
|
|
1336 static void
|
|
1337 dwarf2out_frame_debug_cfa_restore (rtx reg)
|
|
1338 {
|
|
1339 gcc_assert (REG_P (reg));
|
|
1340
|
|
1341 rtx span = targetm.dwarf_register_span (reg);
|
|
1342 if (!span)
|
|
1343 {
|
|
1344 unsigned int regno = dwf_regno (reg);
|
|
1345 add_cfi_restore (regno);
|
|
1346 update_row_reg_save (cur_row, regno, NULL);
|
|
1347 }
|
|
1348 else
|
|
1349 {
|
|
1350 /* We have a PARALLEL describing where the contents of REG live.
|
|
1351 Restore the register for each piece of the PARALLEL. */
|
|
1352 gcc_assert (GET_CODE (span) == PARALLEL);
|
|
1353
|
|
1354 const int par_len = XVECLEN (span, 0);
|
|
1355 for (int par_index = 0; par_index < par_len; par_index++)
|
|
1356 {
|
|
1357 reg = XVECEXP (span, 0, par_index);
|
|
1358 gcc_assert (REG_P (reg));
|
|
1359 unsigned int regno = dwf_regno (reg);
|
|
1360 add_cfi_restore (regno);
|
|
1361 update_row_reg_save (cur_row, regno, NULL);
|
|
1362 }
|
|
1363 }
|
|
1364 }
|
|
1365
|
|
1366 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
|
|
1367 ??? Perhaps we should note in the CIE where windows are saved (instead of
|
|
1368 assuming 0(cfa)) and what registers are in the window. */
|
|
1369
|
|
1370 static void
|
|
1371 dwarf2out_frame_debug_cfa_window_save (void)
|
|
1372 {
|
|
1373 dw_cfi_ref cfi = new_cfi ();
|
|
1374
|
|
1375 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
|
|
1376 add_cfi (cfi);
|
|
1377 }
|
|
1378
|
|
1379 /* Record call frame debugging information for an expression EXPR,
|
|
1380 which either sets SP or FP (adjusting how we calculate the frame
|
|
1381 address) or saves a register to the stack or another register.
|
|
1382 LABEL indicates the address of EXPR.
|
|
1383
|
|
1384 This function encodes a state machine mapping rtxes to actions on
|
|
1385 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
|
|
1386 users need not read the source code.
|
|
1387
|
|
1388 The High-Level Picture
|
|
1389
|
|
1390 Changes in the register we use to calculate the CFA: Currently we
|
|
1391 assume that if you copy the CFA register into another register, we
|
|
1392 should take the other one as the new CFA register; this seems to
|
|
1393 work pretty well. If it's wrong for some target, it's simple
|
|
1394 enough not to set RTX_FRAME_RELATED_P on the insn in question.
|
|
1395
|
|
1396 Changes in the register we use for saving registers to the stack:
|
|
1397 This is usually SP, but not always. Again, we deduce that if you
|
|
1398 copy SP into another register (and SP is not the CFA register),
|
|
1399 then the new register is the one we will be using for register
|
|
1400 saves. This also seems to work.
|
|
1401
|
|
1402 Register saves: There's not much guesswork about this one; if
|
|
1403 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
|
|
1404 register save, and the register used to calculate the destination
|
|
1405 had better be the one we think we're using for this purpose.
|
|
1406 It's also assumed that a copy from a call-saved register to another
|
|
1407 register is saving that register if RTX_FRAME_RELATED_P is set on
|
|
1408 that instruction. If the copy is from a call-saved register to
|
|
1409 the *same* register, that means that the register is now the same
|
|
1410 value as in the caller.
|
|
1411
|
|
1412 Except: If the register being saved is the CFA register, and the
|
|
1413 offset is nonzero, we are saving the CFA, so we assume we have to
|
|
1414 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
|
|
1415 the intent is to save the value of SP from the previous frame.
|
|
1416
|
|
1417 In addition, if a register has previously been saved to a different
|
|
1418 register,
|
|
1419
|
|
1420 Invariants / Summaries of Rules
|
|
1421
|
|
1422 cfa current rule for calculating the CFA. It usually
|
|
1423 consists of a register and an offset. This is
|
|
1424 actually stored in *cur_cfa, but abbreviated
|
|
1425 for the purposes of this documentation.
|
|
1426 cfa_store register used by prologue code to save things to the stack
|
|
1427 cfa_store.offset is the offset from the value of
|
|
1428 cfa_store.reg to the actual CFA
|
|
1429 cfa_temp register holding an integral value. cfa_temp.offset
|
|
1430 stores the value, which will be used to adjust the
|
|
1431 stack pointer. cfa_temp is also used like cfa_store,
|
|
1432 to track stores to the stack via fp or a temp reg.
|
|
1433
|
|
1434 Rules 1- 4: Setting a register's value to cfa.reg or an expression
|
|
1435 with cfa.reg as the first operand changes the cfa.reg and its
|
|
1436 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
|
|
1437 cfa_temp.offset.
|
|
1438
|
|
1439 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
|
|
1440 expression yielding a constant. This sets cfa_temp.reg
|
|
1441 and cfa_temp.offset.
|
|
1442
|
|
1443 Rule 5: Create a new register cfa_store used to save items to the
|
|
1444 stack.
|
|
1445
|
|
1446 Rules 10-14: Save a register to the stack. Define offset as the
|
|
1447 difference of the original location and cfa_store's
|
|
1448 location (or cfa_temp's location if cfa_temp is used).
|
|
1449
|
|
1450 Rules 16-20: If AND operation happens on sp in prologue, we assume
|
|
1451 stack is realigned. We will use a group of DW_OP_XXX
|
|
1452 expressions to represent the location of the stored
|
|
1453 register instead of CFA+offset.
|
|
1454
|
|
1455 The Rules
|
|
1456
|
|
1457 "{a,b}" indicates a choice of a xor b.
|
|
1458 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
|
|
1459
|
|
1460 Rule 1:
|
|
1461 (set <reg1> <reg2>:cfa.reg)
|
|
1462 effects: cfa.reg = <reg1>
|
|
1463 cfa.offset unchanged
|
|
1464 cfa_temp.reg = <reg1>
|
|
1465 cfa_temp.offset = cfa.offset
|
|
1466
|
|
1467 Rule 2:
|
|
1468 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
|
|
1469 {<const_int>,<reg>:cfa_temp.reg}))
|
|
1470 effects: cfa.reg = sp if fp used
|
|
1471 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
|
|
1472 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
|
|
1473 if cfa_store.reg==sp
|
|
1474
|
|
1475 Rule 3:
|
|
1476 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
|
|
1477 effects: cfa.reg = fp
|
|
1478 cfa_offset += +/- <const_int>
|
|
1479
|
|
1480 Rule 4:
|
|
1481 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
|
|
1482 constraints: <reg1> != fp
|
|
1483 <reg1> != sp
|
|
1484 effects: cfa.reg = <reg1>
|
|
1485 cfa_temp.reg = <reg1>
|
|
1486 cfa_temp.offset = cfa.offset
|
|
1487
|
|
1488 Rule 5:
|
|
1489 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
|
|
1490 constraints: <reg1> != fp
|
|
1491 <reg1> != sp
|
|
1492 effects: cfa_store.reg = <reg1>
|
|
1493 cfa_store.offset = cfa.offset - cfa_temp.offset
|
|
1494
|
|
1495 Rule 6:
|
|
1496 (set <reg> <const_int>)
|
|
1497 effects: cfa_temp.reg = <reg>
|
|
1498 cfa_temp.offset = <const_int>
|
|
1499
|
|
1500 Rule 7:
|
|
1501 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
|
|
1502 effects: cfa_temp.reg = <reg1>
|
|
1503 cfa_temp.offset |= <const_int>
|
|
1504
|
|
1505 Rule 8:
|
|
1506 (set <reg> (high <exp>))
|
|
1507 effects: none
|
|
1508
|
|
1509 Rule 9:
|
|
1510 (set <reg> (lo_sum <exp> <const_int>))
|
|
1511 effects: cfa_temp.reg = <reg>
|
|
1512 cfa_temp.offset = <const_int>
|
|
1513
|
|
1514 Rule 10:
|
|
1515 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
|
|
1516 effects: cfa_store.offset -= <const_int>
|
|
1517 cfa.offset = cfa_store.offset if cfa.reg == sp
|
|
1518 cfa.reg = sp
|
|
1519 cfa.base_offset = -cfa_store.offset
|
|
1520
|
|
1521 Rule 11:
|
|
1522 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
|
|
1523 effects: cfa_store.offset += -/+ mode_size(mem)
|
|
1524 cfa.offset = cfa_store.offset if cfa.reg == sp
|
|
1525 cfa.reg = sp
|
|
1526 cfa.base_offset = -cfa_store.offset
|
|
1527
|
|
1528 Rule 12:
|
|
1529 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
|
|
1530
|
|
1531 <reg2>)
|
|
1532 effects: cfa.reg = <reg1>
|
|
1533 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
|
|
1534
|
|
1535 Rule 13:
|
|
1536 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
|
|
1537 effects: cfa.reg = <reg1>
|
|
1538 cfa.base_offset = -{cfa_store,cfa_temp}.offset
|
|
1539
|
|
1540 Rule 14:
|
|
1541 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
|
|
1542 effects: cfa.reg = <reg1>
|
|
1543 cfa.base_offset = -cfa_temp.offset
|
|
1544 cfa_temp.offset -= mode_size(mem)
|
|
1545
|
|
1546 Rule 15:
|
|
1547 (set <reg> {unspec, unspec_volatile})
|
|
1548 effects: target-dependent
|
|
1549
|
|
1550 Rule 16:
|
|
1551 (set sp (and: sp <const_int>))
|
|
1552 constraints: cfa_store.reg == sp
|
|
1553 effects: cfun->fde.stack_realign = 1
|
|
1554 cfa_store.offset = 0
|
|
1555 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
|
|
1556
|
|
1557 Rule 17:
|
|
1558 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
|
|
1559 effects: cfa_store.offset += -/+ mode_size(mem)
|
|
1560
|
|
1561 Rule 18:
|
|
1562 (set (mem ({pre_inc, pre_dec} sp)) fp)
|
|
1563 constraints: fde->stack_realign == 1
|
|
1564 effects: cfa_store.offset = 0
|
|
1565 cfa.reg != HARD_FRAME_POINTER_REGNUM
|
|
1566
|
|
1567 Rule 19:
|
|
1568 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
|
|
1569 constraints: fde->stack_realign == 1
|
|
1570 && cfa.offset == 0
|
|
1571 && cfa.indirect == 0
|
|
1572 && cfa.reg != HARD_FRAME_POINTER_REGNUM
|
|
1573 effects: Use DW_CFA_def_cfa_expression to define cfa
|
|
1574 cfa.reg == fde->drap_reg */
|
|
1575
|
|
1576 static void
|
|
1577 dwarf2out_frame_debug_expr (rtx expr)
|
|
1578 {
|
|
1579 rtx src, dest, span;
|
131
|
1580 poly_int64 offset;
|
111
|
1581 dw_fde_ref fde;
|
|
1582
|
|
1583 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
|
|
1584 the PARALLEL independently. The first element is always processed if
|
|
1585 it is a SET. This is for backward compatibility. Other elements
|
|
1586 are processed only if they are SETs and the RTX_FRAME_RELATED_P
|
|
1587 flag is set in them. */
|
|
1588 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
|
|
1589 {
|
|
1590 int par_index;
|
|
1591 int limit = XVECLEN (expr, 0);
|
|
1592 rtx elem;
|
|
1593
|
|
1594 /* PARALLELs have strict read-modify-write semantics, so we
|
|
1595 ought to evaluate every rvalue before changing any lvalue.
|
|
1596 It's cumbersome to do that in general, but there's an
|
|
1597 easy approximation that is enough for all current users:
|
|
1598 handle register saves before register assignments. */
|
|
1599 if (GET_CODE (expr) == PARALLEL)
|
|
1600 for (par_index = 0; par_index < limit; par_index++)
|
|
1601 {
|
|
1602 elem = XVECEXP (expr, 0, par_index);
|
|
1603 if (GET_CODE (elem) == SET
|
|
1604 && MEM_P (SET_DEST (elem))
|
|
1605 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
|
|
1606 dwarf2out_frame_debug_expr (elem);
|
|
1607 }
|
|
1608
|
|
1609 for (par_index = 0; par_index < limit; par_index++)
|
|
1610 {
|
|
1611 elem = XVECEXP (expr, 0, par_index);
|
|
1612 if (GET_CODE (elem) == SET
|
|
1613 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
|
|
1614 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
|
|
1615 dwarf2out_frame_debug_expr (elem);
|
|
1616 }
|
|
1617 return;
|
|
1618 }
|
|
1619
|
|
1620 gcc_assert (GET_CODE (expr) == SET);
|
|
1621
|
|
1622 src = SET_SRC (expr);
|
|
1623 dest = SET_DEST (expr);
|
|
1624
|
|
1625 if (REG_P (src))
|
|
1626 {
|
|
1627 rtx rsi = reg_saved_in (src);
|
|
1628 if (rsi)
|
|
1629 src = rsi;
|
|
1630 }
|
|
1631
|
|
1632 fde = cfun->fde;
|
|
1633
|
|
1634 switch (GET_CODE (dest))
|
|
1635 {
|
|
1636 case REG:
|
|
1637 switch (GET_CODE (src))
|
|
1638 {
|
|
1639 /* Setting FP from SP. */
|
|
1640 case REG:
|
|
1641 if (cur_cfa->reg == dwf_regno (src))
|
|
1642 {
|
|
1643 /* Rule 1 */
|
|
1644 /* Update the CFA rule wrt SP or FP. Make sure src is
|
|
1645 relative to the current CFA register.
|
|
1646
|
|
1647 We used to require that dest be either SP or FP, but the
|
|
1648 ARM copies SP to a temporary register, and from there to
|
|
1649 FP. So we just rely on the backends to only set
|
|
1650 RTX_FRAME_RELATED_P on appropriate insns. */
|
|
1651 cur_cfa->reg = dwf_regno (dest);
|
|
1652 cur_trace->cfa_temp.reg = cur_cfa->reg;
|
|
1653 cur_trace->cfa_temp.offset = cur_cfa->offset;
|
|
1654 }
|
|
1655 else
|
|
1656 {
|
|
1657 /* Saving a register in a register. */
|
|
1658 gcc_assert (!fixed_regs [REGNO (dest)]
|
|
1659 /* For the SPARC and its register window. */
|
|
1660 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
|
|
1661
|
|
1662 /* After stack is aligned, we can only save SP in FP
|
|
1663 if drap register is used. In this case, we have
|
|
1664 to restore stack pointer with the CFA value and we
|
|
1665 don't generate this DWARF information. */
|
|
1666 if (fde
|
|
1667 && fde->stack_realign
|
|
1668 && REGNO (src) == STACK_POINTER_REGNUM)
|
|
1669 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
|
|
1670 && fde->drap_reg != INVALID_REGNUM
|
|
1671 && cur_cfa->reg != dwf_regno (src));
|
|
1672 else
|
|
1673 queue_reg_save (src, dest, 0);
|
|
1674 }
|
|
1675 break;
|
|
1676
|
|
1677 case PLUS:
|
|
1678 case MINUS:
|
|
1679 case LO_SUM:
|
|
1680 if (dest == stack_pointer_rtx)
|
|
1681 {
|
|
1682 /* Rule 2 */
|
|
1683 /* Adjusting SP. */
|
131
|
1684 if (REG_P (XEXP (src, 1)))
|
111
|
1685 {
|
|
1686 gcc_assert (dwf_regno (XEXP (src, 1))
|
|
1687 == cur_trace->cfa_temp.reg);
|
|
1688 offset = cur_trace->cfa_temp.offset;
|
|
1689 }
|
131
|
1690 else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
|
|
1691 gcc_unreachable ();
|
111
|
1692
|
|
1693 if (XEXP (src, 0) == hard_frame_pointer_rtx)
|
|
1694 {
|
|
1695 /* Restoring SP from FP in the epilogue. */
|
|
1696 gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
|
|
1697 cur_cfa->reg = dw_stack_pointer_regnum;
|
|
1698 }
|
|
1699 else if (GET_CODE (src) == LO_SUM)
|
|
1700 /* Assume we've set the source reg of the LO_SUM from sp. */
|
|
1701 ;
|
|
1702 else
|
|
1703 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
|
|
1704
|
|
1705 if (GET_CODE (src) != MINUS)
|
|
1706 offset = -offset;
|
|
1707 if (cur_cfa->reg == dw_stack_pointer_regnum)
|
|
1708 cur_cfa->offset += offset;
|
|
1709 if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
|
|
1710 cur_trace->cfa_store.offset += offset;
|
|
1711 }
|
|
1712 else if (dest == hard_frame_pointer_rtx)
|
|
1713 {
|
|
1714 /* Rule 3 */
|
|
1715 /* Either setting the FP from an offset of the SP,
|
|
1716 or adjusting the FP */
|
|
1717 gcc_assert (frame_pointer_needed);
|
|
1718
|
|
1719 gcc_assert (REG_P (XEXP (src, 0))
|
131
|
1720 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
|
|
1721 offset = rtx_to_poly_int64 (XEXP (src, 1));
|
111
|
1722 if (GET_CODE (src) != MINUS)
|
|
1723 offset = -offset;
|
|
1724 cur_cfa->offset += offset;
|
|
1725 cur_cfa->reg = dw_frame_pointer_regnum;
|
|
1726 }
|
|
1727 else
|
|
1728 {
|
|
1729 gcc_assert (GET_CODE (src) != MINUS);
|
|
1730
|
|
1731 /* Rule 4 */
|
|
1732 if (REG_P (XEXP (src, 0))
|
|
1733 && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
|
131
|
1734 && poly_int_rtx_p (XEXP (src, 1), &offset))
|
111
|
1735 {
|
|
1736 /* Setting a temporary CFA register that will be copied
|
|
1737 into the FP later on. */
|
131
|
1738 offset = -offset;
|
111
|
1739 cur_cfa->offset += offset;
|
|
1740 cur_cfa->reg = dwf_regno (dest);
|
|
1741 /* Or used to save regs to the stack. */
|
|
1742 cur_trace->cfa_temp.reg = cur_cfa->reg;
|
|
1743 cur_trace->cfa_temp.offset = cur_cfa->offset;
|
|
1744 }
|
|
1745
|
|
1746 /* Rule 5 */
|
|
1747 else if (REG_P (XEXP (src, 0))
|
|
1748 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
|
|
1749 && XEXP (src, 1) == stack_pointer_rtx)
|
|
1750 {
|
|
1751 /* Setting a scratch register that we will use instead
|
|
1752 of SP for saving registers to the stack. */
|
|
1753 gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
|
|
1754 cur_trace->cfa_store.reg = dwf_regno (dest);
|
|
1755 cur_trace->cfa_store.offset
|
|
1756 = cur_cfa->offset - cur_trace->cfa_temp.offset;
|
|
1757 }
|
|
1758
|
|
1759 /* Rule 9 */
|
|
1760 else if (GET_CODE (src) == LO_SUM
|
131
|
1761 && poly_int_rtx_p (XEXP (src, 1),
|
|
1762 &cur_trace->cfa_temp.offset))
|
|
1763 cur_trace->cfa_temp.reg = dwf_regno (dest);
|
111
|
1764 else
|
|
1765 gcc_unreachable ();
|
|
1766 }
|
|
1767 break;
|
|
1768
|
|
1769 /* Rule 6 */
|
|
1770 case CONST_INT:
|
131
|
1771 case POLY_INT_CST:
|
111
|
1772 cur_trace->cfa_temp.reg = dwf_regno (dest);
|
131
|
1773 cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
|
111
|
1774 break;
|
|
1775
|
|
1776 /* Rule 7 */
|
|
1777 case IOR:
|
|
1778 gcc_assert (REG_P (XEXP (src, 0))
|
|
1779 && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
|
|
1780 && CONST_INT_P (XEXP (src, 1)));
|
|
1781
|
|
1782 cur_trace->cfa_temp.reg = dwf_regno (dest);
|
131
|
1783 if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
|
|
1784 &cur_trace->cfa_temp.offset))
|
|
1785 /* The target shouldn't generate this kind of CFI note if we
|
|
1786 can't represent it. */
|
|
1787 gcc_unreachable ();
|
111
|
1788 break;
|
|
1789
|
|
1790 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
|
|
1791 which will fill in all of the bits. */
|
|
1792 /* Rule 8 */
|
|
1793 case HIGH:
|
|
1794 break;
|
|
1795
|
|
1796 /* Rule 15 */
|
|
1797 case UNSPEC:
|
|
1798 case UNSPEC_VOLATILE:
|
|
1799 /* All unspecs should be represented by REG_CFA_* notes. */
|
|
1800 gcc_unreachable ();
|
|
1801 return;
|
|
1802
|
|
1803 /* Rule 16 */
|
|
1804 case AND:
|
|
1805 /* If this AND operation happens on stack pointer in prologue,
|
|
1806 we assume the stack is realigned and we extract the
|
|
1807 alignment. */
|
|
1808 if (fde && XEXP (src, 0) == stack_pointer_rtx)
|
|
1809 {
|
|
1810 /* We interpret reg_save differently with stack_realign set.
|
|
1811 Thus we must flush whatever we have queued first. */
|
|
1812 dwarf2out_flush_queued_reg_saves ();
|
|
1813
|
|
1814 gcc_assert (cur_trace->cfa_store.reg
|
|
1815 == dwf_regno (XEXP (src, 0)));
|
|
1816 fde->stack_realign = 1;
|
|
1817 fde->stack_realignment = INTVAL (XEXP (src, 1));
|
|
1818 cur_trace->cfa_store.offset = 0;
|
|
1819
|
|
1820 if (cur_cfa->reg != dw_stack_pointer_regnum
|
|
1821 && cur_cfa->reg != dw_frame_pointer_regnum)
|
|
1822 fde->drap_reg = cur_cfa->reg;
|
|
1823 }
|
|
1824 return;
|
|
1825
|
|
1826 default:
|
|
1827 gcc_unreachable ();
|
|
1828 }
|
|
1829 break;
|
|
1830
|
|
1831 case MEM:
|
|
1832
|
|
1833 /* Saving a register to the stack. Make sure dest is relative to the
|
|
1834 CFA register. */
|
|
1835 switch (GET_CODE (XEXP (dest, 0)))
|
|
1836 {
|
|
1837 /* Rule 10 */
|
|
1838 /* With a push. */
|
|
1839 case PRE_MODIFY:
|
|
1840 case POST_MODIFY:
|
|
1841 /* We can't handle variable size modifications. */
|
131
|
1842 offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
|
111
|
1843
|
|
1844 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
|
|
1845 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
|
|
1846
|
|
1847 cur_trace->cfa_store.offset += offset;
|
|
1848 if (cur_cfa->reg == dw_stack_pointer_regnum)
|
|
1849 cur_cfa->offset = cur_trace->cfa_store.offset;
|
|
1850
|
|
1851 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
|
|
1852 offset -= cur_trace->cfa_store.offset;
|
|
1853 else
|
|
1854 offset = -cur_trace->cfa_store.offset;
|
|
1855 break;
|
|
1856
|
|
1857 /* Rule 11 */
|
|
1858 case PRE_INC:
|
|
1859 case PRE_DEC:
|
|
1860 case POST_DEC:
|
|
1861 offset = GET_MODE_SIZE (GET_MODE (dest));
|
|
1862 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
|
|
1863 offset = -offset;
|
|
1864
|
|
1865 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
|
|
1866 == STACK_POINTER_REGNUM)
|
|
1867 && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
|
|
1868
|
|
1869 cur_trace->cfa_store.offset += offset;
|
|
1870
|
|
1871 /* Rule 18: If stack is aligned, we will use FP as a
|
|
1872 reference to represent the address of the stored
|
|
1873 regiser. */
|
|
1874 if (fde
|
|
1875 && fde->stack_realign
|
|
1876 && REG_P (src)
|
|
1877 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
|
|
1878 {
|
|
1879 gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
|
|
1880 cur_trace->cfa_store.offset = 0;
|
|
1881 }
|
|
1882
|
|
1883 if (cur_cfa->reg == dw_stack_pointer_regnum)
|
|
1884 cur_cfa->offset = cur_trace->cfa_store.offset;
|
|
1885
|
|
1886 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
|
|
1887 offset += -cur_trace->cfa_store.offset;
|
|
1888 else
|
|
1889 offset = -cur_trace->cfa_store.offset;
|
|
1890 break;
|
|
1891
|
|
1892 /* Rule 12 */
|
|
1893 /* With an offset. */
|
|
1894 case PLUS:
|
|
1895 case MINUS:
|
|
1896 case LO_SUM:
|
|
1897 {
|
|
1898 unsigned int regno;
|
|
1899
|
131
|
1900 gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
|
|
1901 offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
|
111
|
1902 if (GET_CODE (XEXP (dest, 0)) == MINUS)
|
|
1903 offset = -offset;
|
|
1904
|
|
1905 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
|
|
1906
|
|
1907 if (cur_cfa->reg == regno)
|
|
1908 offset -= cur_cfa->offset;
|
|
1909 else if (cur_trace->cfa_store.reg == regno)
|
|
1910 offset -= cur_trace->cfa_store.offset;
|
|
1911 else
|
|
1912 {
|
|
1913 gcc_assert (cur_trace->cfa_temp.reg == regno);
|
|
1914 offset -= cur_trace->cfa_temp.offset;
|
|
1915 }
|
|
1916 }
|
|
1917 break;
|
|
1918
|
|
1919 /* Rule 13 */
|
|
1920 /* Without an offset. */
|
|
1921 case REG:
|
|
1922 {
|
|
1923 unsigned int regno = dwf_regno (XEXP (dest, 0));
|
|
1924
|
|
1925 if (cur_cfa->reg == regno)
|
|
1926 offset = -cur_cfa->offset;
|
|
1927 else if (cur_trace->cfa_store.reg == regno)
|
|
1928 offset = -cur_trace->cfa_store.offset;
|
|
1929 else
|
|
1930 {
|
|
1931 gcc_assert (cur_trace->cfa_temp.reg == regno);
|
|
1932 offset = -cur_trace->cfa_temp.offset;
|
|
1933 }
|
|
1934 }
|
|
1935 break;
|
|
1936
|
|
1937 /* Rule 14 */
|
|
1938 case POST_INC:
|
|
1939 gcc_assert (cur_trace->cfa_temp.reg
|
|
1940 == dwf_regno (XEXP (XEXP (dest, 0), 0)));
|
|
1941 offset = -cur_trace->cfa_temp.offset;
|
|
1942 cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
|
|
1943 break;
|
|
1944
|
|
1945 default:
|
|
1946 gcc_unreachable ();
|
|
1947 }
|
|
1948
|
|
1949 /* Rule 17 */
|
|
1950 /* If the source operand of this MEM operation is a memory,
|
|
1951 we only care how much stack grew. */
|
|
1952 if (MEM_P (src))
|
|
1953 break;
|
|
1954
|
|
1955 if (REG_P (src)
|
|
1956 && REGNO (src) != STACK_POINTER_REGNUM
|
|
1957 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
|
|
1958 && dwf_regno (src) == cur_cfa->reg)
|
|
1959 {
|
|
1960 /* We're storing the current CFA reg into the stack. */
|
|
1961
|
131
|
1962 if (known_eq (cur_cfa->offset, 0))
|
111
|
1963 {
|
|
1964 /* Rule 19 */
|
|
1965 /* If stack is aligned, putting CFA reg into stack means
|
|
1966 we can no longer use reg + offset to represent CFA.
|
|
1967 Here we use DW_CFA_def_cfa_expression instead. The
|
|
1968 result of this expression equals to the original CFA
|
|
1969 value. */
|
|
1970 if (fde
|
|
1971 && fde->stack_realign
|
|
1972 && cur_cfa->indirect == 0
|
|
1973 && cur_cfa->reg != dw_frame_pointer_regnum)
|
|
1974 {
|
|
1975 gcc_assert (fde->drap_reg == cur_cfa->reg);
|
|
1976
|
|
1977 cur_cfa->indirect = 1;
|
|
1978 cur_cfa->reg = dw_frame_pointer_regnum;
|
|
1979 cur_cfa->base_offset = offset;
|
|
1980 cur_cfa->offset = 0;
|
|
1981
|
|
1982 fde->drap_reg_saved = 1;
|
|
1983 break;
|
|
1984 }
|
|
1985
|
|
1986 /* If the source register is exactly the CFA, assume
|
|
1987 we're saving SP like any other register; this happens
|
|
1988 on the ARM. */
|
|
1989 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
|
|
1990 break;
|
|
1991 }
|
|
1992 else
|
|
1993 {
|
|
1994 /* Otherwise, we'll need to look in the stack to
|
|
1995 calculate the CFA. */
|
|
1996 rtx x = XEXP (dest, 0);
|
|
1997
|
|
1998 if (!REG_P (x))
|
|
1999 x = XEXP (x, 0);
|
|
2000 gcc_assert (REG_P (x));
|
|
2001
|
|
2002 cur_cfa->reg = dwf_regno (x);
|
|
2003 cur_cfa->base_offset = offset;
|
|
2004 cur_cfa->indirect = 1;
|
|
2005 break;
|
|
2006 }
|
|
2007 }
|
|
2008
|
|
2009 if (REG_P (src))
|
|
2010 span = targetm.dwarf_register_span (src);
|
|
2011 else
|
|
2012 span = NULL;
|
|
2013
|
|
2014 if (!span)
|
|
2015 queue_reg_save (src, NULL_RTX, offset);
|
|
2016 else
|
|
2017 {
|
|
2018 /* We have a PARALLEL describing where the contents of SRC live.
|
|
2019 Queue register saves for each piece of the PARALLEL. */
|
131
|
2020 poly_int64 span_offset = offset;
|
111
|
2021
|
|
2022 gcc_assert (GET_CODE (span) == PARALLEL);
|
|
2023
|
|
2024 const int par_len = XVECLEN (span, 0);
|
|
2025 for (int par_index = 0; par_index < par_len; par_index++)
|
|
2026 {
|
|
2027 rtx elem = XVECEXP (span, 0, par_index);
|
|
2028 queue_reg_save (elem, NULL_RTX, span_offset);
|
|
2029 span_offset += GET_MODE_SIZE (GET_MODE (elem));
|
|
2030 }
|
|
2031 }
|
|
2032 break;
|
|
2033
|
|
2034 default:
|
|
2035 gcc_unreachable ();
|
|
2036 }
|
|
2037 }
|
|
2038
|
|
2039 /* Record call frame debugging information for INSN, which either sets
|
|
2040 SP or FP (adjusting how we calculate the frame address) or saves a
|
|
2041 register to the stack. */
|
|
2042
|
|
2043 static void
|
|
2044 dwarf2out_frame_debug (rtx_insn *insn)
|
|
2045 {
|
|
2046 rtx note, n, pat;
|
|
2047 bool handled_one = false;
|
|
2048
|
|
2049 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
|
|
2050 switch (REG_NOTE_KIND (note))
|
|
2051 {
|
|
2052 case REG_FRAME_RELATED_EXPR:
|
|
2053 pat = XEXP (note, 0);
|
|
2054 goto do_frame_expr;
|
|
2055
|
|
2056 case REG_CFA_DEF_CFA:
|
|
2057 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
|
|
2058 handled_one = true;
|
|
2059 break;
|
|
2060
|
|
2061 case REG_CFA_ADJUST_CFA:
|
|
2062 n = XEXP (note, 0);
|
|
2063 if (n == NULL)
|
|
2064 {
|
|
2065 n = PATTERN (insn);
|
|
2066 if (GET_CODE (n) == PARALLEL)
|
|
2067 n = XVECEXP (n, 0, 0);
|
|
2068 }
|
|
2069 dwarf2out_frame_debug_adjust_cfa (n);
|
|
2070 handled_one = true;
|
|
2071 break;
|
|
2072
|
|
2073 case REG_CFA_OFFSET:
|
|
2074 n = XEXP (note, 0);
|
|
2075 if (n == NULL)
|
|
2076 n = single_set (insn);
|
|
2077 dwarf2out_frame_debug_cfa_offset (n);
|
|
2078 handled_one = true;
|
|
2079 break;
|
|
2080
|
|
2081 case REG_CFA_REGISTER:
|
|
2082 n = XEXP (note, 0);
|
|
2083 if (n == NULL)
|
|
2084 {
|
|
2085 n = PATTERN (insn);
|
|
2086 if (GET_CODE (n) == PARALLEL)
|
|
2087 n = XVECEXP (n, 0, 0);
|
|
2088 }
|
|
2089 dwarf2out_frame_debug_cfa_register (n);
|
|
2090 handled_one = true;
|
|
2091 break;
|
|
2092
|
|
2093 case REG_CFA_EXPRESSION:
|
|
2094 case REG_CFA_VAL_EXPRESSION:
|
|
2095 n = XEXP (note, 0);
|
|
2096 if (n == NULL)
|
|
2097 n = single_set (insn);
|
|
2098
|
|
2099 if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
|
|
2100 dwarf2out_frame_debug_cfa_expression (n);
|
|
2101 else
|
|
2102 dwarf2out_frame_debug_cfa_val_expression (n);
|
|
2103
|
|
2104 handled_one = true;
|
|
2105 break;
|
|
2106
|
|
2107 case REG_CFA_RESTORE:
|
|
2108 n = XEXP (note, 0);
|
|
2109 if (n == NULL)
|
|
2110 {
|
|
2111 n = PATTERN (insn);
|
|
2112 if (GET_CODE (n) == PARALLEL)
|
|
2113 n = XVECEXP (n, 0, 0);
|
|
2114 n = XEXP (n, 0);
|
|
2115 }
|
|
2116 dwarf2out_frame_debug_cfa_restore (n);
|
|
2117 handled_one = true;
|
|
2118 break;
|
|
2119
|
|
2120 case REG_CFA_SET_VDRAP:
|
|
2121 n = XEXP (note, 0);
|
|
2122 if (REG_P (n))
|
|
2123 {
|
|
2124 dw_fde_ref fde = cfun->fde;
|
|
2125 if (fde)
|
|
2126 {
|
|
2127 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
|
|
2128 if (REG_P (n))
|
|
2129 fde->vdrap_reg = dwf_regno (n);
|
|
2130 }
|
|
2131 }
|
|
2132 handled_one = true;
|
|
2133 break;
|
|
2134
|
|
2135 case REG_CFA_TOGGLE_RA_MANGLE:
|
|
2136 case REG_CFA_WINDOW_SAVE:
|
|
2137 /* We overload both of these operations onto the same DWARF opcode. */
|
|
2138 dwarf2out_frame_debug_cfa_window_save ();
|
|
2139 handled_one = true;
|
|
2140 break;
|
|
2141
|
|
2142 case REG_CFA_FLUSH_QUEUE:
|
|
2143 /* The actual flush happens elsewhere. */
|
|
2144 handled_one = true;
|
|
2145 break;
|
|
2146
|
|
2147 default:
|
|
2148 break;
|
|
2149 }
|
|
2150
|
|
2151 if (!handled_one)
|
|
2152 {
|
|
2153 pat = PATTERN (insn);
|
|
2154 do_frame_expr:
|
|
2155 dwarf2out_frame_debug_expr (pat);
|
|
2156
|
|
2157 /* Check again. A parallel can save and update the same register.
|
|
2158 We could probably check just once, here, but this is safer than
|
|
2159 removing the check at the start of the function. */
|
|
2160 if (clobbers_queued_reg_save (pat))
|
|
2161 dwarf2out_flush_queued_reg_saves ();
|
|
2162 }
|
|
2163 }
|
|
2164
|
|
2165 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW. */
|
|
2166
|
|
2167 static void
|
|
2168 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
|
|
2169 {
|
|
2170 size_t i, n_old, n_new, n_max;
|
|
2171 dw_cfi_ref cfi;
|
|
2172
|
|
2173 if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
|
|
2174 add_cfi (new_row->cfa_cfi);
|
|
2175 else
|
|
2176 {
|
|
2177 cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
|
|
2178 if (cfi)
|
|
2179 add_cfi (cfi);
|
|
2180 }
|
|
2181
|
|
2182 n_old = vec_safe_length (old_row->reg_save);
|
|
2183 n_new = vec_safe_length (new_row->reg_save);
|
|
2184 n_max = MAX (n_old, n_new);
|
|
2185
|
|
2186 for (i = 0; i < n_max; ++i)
|
|
2187 {
|
|
2188 dw_cfi_ref r_old = NULL, r_new = NULL;
|
|
2189
|
|
2190 if (i < n_old)
|
|
2191 r_old = (*old_row->reg_save)[i];
|
|
2192 if (i < n_new)
|
|
2193 r_new = (*new_row->reg_save)[i];
|
|
2194
|
|
2195 if (r_old == r_new)
|
|
2196 ;
|
|
2197 else if (r_new == NULL)
|
|
2198 add_cfi_restore (i);
|
|
2199 else if (!cfi_equal_p (r_old, r_new))
|
|
2200 add_cfi (r_new);
|
|
2201 }
|
|
2202 }
|
|
2203
|
|
2204 /* Examine CFI and return true if a cfi label and set_loc is needed
|
|
2205 beforehand. Even when generating CFI assembler instructions, we
|
|
2206 still have to add the cfi to the list so that lookup_cfa_1 works
|
|
2207 later on. When -g2 and above we even need to force emitting of
|
|
2208 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
|
|
2209 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
|
|
2210 and so don't use convert_cfa_to_fb_loc_list. */
|
|
2211
|
|
2212 static bool
|
|
2213 cfi_label_required_p (dw_cfi_ref cfi)
|
|
2214 {
|
|
2215 if (!dwarf2out_do_cfi_asm ())
|
|
2216 return true;
|
|
2217
|
|
2218 if (dwarf_version == 2
|
|
2219 && debug_info_level > DINFO_LEVEL_TERSE
|
|
2220 && (write_symbols == DWARF2_DEBUG
|
|
2221 || write_symbols == VMS_AND_DWARF2_DEBUG))
|
|
2222 {
|
|
2223 switch (cfi->dw_cfi_opc)
|
|
2224 {
|
|
2225 case DW_CFA_def_cfa_offset:
|
|
2226 case DW_CFA_def_cfa_offset_sf:
|
|
2227 case DW_CFA_def_cfa_register:
|
|
2228 case DW_CFA_def_cfa:
|
|
2229 case DW_CFA_def_cfa_sf:
|
|
2230 case DW_CFA_def_cfa_expression:
|
|
2231 case DW_CFA_restore_state:
|
|
2232 return true;
|
|
2233 default:
|
|
2234 return false;
|
|
2235 }
|
|
2236 }
|
|
2237 return false;
|
|
2238 }
|
|
2239
|
|
2240 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
|
|
2241 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
|
|
2242 necessary. */
|
|
2243 static void
|
|
2244 add_cfis_to_fde (void)
|
|
2245 {
|
|
2246 dw_fde_ref fde = cfun->fde;
|
|
2247 rtx_insn *insn, *next;
|
|
2248
|
|
2249 for (insn = get_insns (); insn; insn = next)
|
|
2250 {
|
|
2251 next = NEXT_INSN (insn);
|
|
2252
|
|
2253 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
|
|
2254 fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
|
|
2255
|
|
2256 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
|
|
2257 {
|
|
2258 bool required = cfi_label_required_p (NOTE_CFI (insn));
|
|
2259 while (next)
|
|
2260 if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
|
|
2261 {
|
|
2262 required |= cfi_label_required_p (NOTE_CFI (next));
|
|
2263 next = NEXT_INSN (next);
|
|
2264 }
|
|
2265 else if (active_insn_p (next)
|
|
2266 || (NOTE_P (next) && (NOTE_KIND (next)
|
|
2267 == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
|
|
2268 break;
|
|
2269 else
|
|
2270 next = NEXT_INSN (next);
|
|
2271 if (required)
|
|
2272 {
|
|
2273 int num = dwarf2out_cfi_label_num;
|
|
2274 const char *label = dwarf2out_cfi_label ();
|
|
2275 dw_cfi_ref xcfi;
|
|
2276
|
|
2277 /* Set the location counter to the new label. */
|
|
2278 xcfi = new_cfi ();
|
|
2279 xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
|
|
2280 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
|
|
2281 vec_safe_push (fde->dw_fde_cfi, xcfi);
|
|
2282
|
|
2283 rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
|
|
2284 NOTE_LABEL_NUMBER (tmp) = num;
|
|
2285 }
|
|
2286
|
|
2287 do
|
|
2288 {
|
|
2289 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
|
|
2290 vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
|
|
2291 insn = NEXT_INSN (insn);
|
|
2292 }
|
|
2293 while (insn != next);
|
|
2294 }
|
|
2295 }
|
|
2296 }
|
|
2297
|
|
2298 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
|
|
2299
|
|
2300 /* If LABEL is the start of a trace, then initialize the state of that
|
|
2301 trace from CUR_TRACE and CUR_ROW. */
|
|
2302
|
|
2303 static void
|
|
2304 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
|
|
2305 {
|
|
2306 dw_trace_info *ti;
|
|
2307
|
|
2308 ti = get_trace_info (start);
|
|
2309 gcc_assert (ti != NULL);
|
|
2310
|
|
2311 if (dump_file)
|
|
2312 {
|
|
2313 fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
|
|
2314 cur_trace->id, ti->id,
|
|
2315 (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
|
|
2316 (origin ? INSN_UID (origin) : 0));
|
|
2317 }
|
|
2318
|
131
|
2319 poly_int64 args_size = cur_trace->end_true_args_size;
|
111
|
2320 if (ti->beg_row == NULL)
|
|
2321 {
|
|
2322 /* This is the first time we've encountered this trace. Propagate
|
|
2323 state across the edge and push the trace onto the work list. */
|
|
2324 ti->beg_row = copy_cfi_row (cur_row);
|
|
2325 ti->beg_true_args_size = args_size;
|
|
2326
|
|
2327 ti->cfa_store = cur_trace->cfa_store;
|
|
2328 ti->cfa_temp = cur_trace->cfa_temp;
|
|
2329 ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
|
|
2330
|
|
2331 trace_work_list.safe_push (ti);
|
|
2332
|
|
2333 if (dump_file)
|
|
2334 fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
|
|
2335 }
|
|
2336 else
|
|
2337 {
|
|
2338
|
|
2339 /* We ought to have the same state incoming to a given trace no
|
|
2340 matter how we arrive at the trace. Anything else means we've
|
|
2341 got some kind of optimization error. */
|
|
2342 #if CHECKING_P
|
|
2343 if (!cfi_row_equal_p (cur_row, ti->beg_row))
|
|
2344 {
|
|
2345 if (dump_file)
|
|
2346 {
|
|
2347 fprintf (dump_file, "Inconsistent CFI state!\n");
|
|
2348 fprintf (dump_file, "SHOULD have:\n");
|
|
2349 dump_cfi_row (dump_file, ti->beg_row);
|
|
2350 fprintf (dump_file, "DO have:\n");
|
|
2351 dump_cfi_row (dump_file, cur_row);
|
|
2352 }
|
|
2353
|
|
2354 gcc_unreachable ();
|
|
2355 }
|
|
2356 #endif
|
|
2357
|
|
2358 /* The args_size is allowed to conflict if it isn't actually used. */
|
131
|
2359 if (maybe_ne (ti->beg_true_args_size, args_size))
|
111
|
2360 ti->args_size_undefined = true;
|
|
2361 }
|
|
2362 }
|
|
2363
|
|
2364 /* Similarly, but handle the args_size and CFA reset across EH
|
|
2365 and non-local goto edges. */
|
|
2366
|
|
2367 static void
|
|
2368 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
|
|
2369 {
|
131
|
2370 poly_int64 save_args_size, delta;
|
111
|
2371 dw_cfa_location save_cfa;
|
|
2372
|
|
2373 save_args_size = cur_trace->end_true_args_size;
|
131
|
2374 if (known_eq (save_args_size, 0))
|
111
|
2375 {
|
|
2376 maybe_record_trace_start (start, origin);
|
|
2377 return;
|
|
2378 }
|
|
2379
|
|
2380 delta = -save_args_size;
|
|
2381 cur_trace->end_true_args_size = 0;
|
|
2382
|
|
2383 save_cfa = cur_row->cfa;
|
|
2384 if (cur_row->cfa.reg == dw_stack_pointer_regnum)
|
|
2385 {
|
|
2386 /* Convert a change in args_size (always a positive in the
|
|
2387 direction of stack growth) to a change in stack pointer. */
|
|
2388 if (!STACK_GROWS_DOWNWARD)
|
|
2389 delta = -delta;
|
|
2390
|
|
2391 cur_row->cfa.offset += delta;
|
|
2392 }
|
|
2393
|
|
2394 maybe_record_trace_start (start, origin);
|
|
2395
|
|
2396 cur_trace->end_true_args_size = save_args_size;
|
|
2397 cur_row->cfa = save_cfa;
|
|
2398 }
|
|
2399
|
|
2400 /* Propagate CUR_TRACE state to the destinations implied by INSN. */
|
|
2401 /* ??? Sadly, this is in large part a duplicate of make_edges. */
|
|
2402
|
|
2403 static void
|
|
2404 create_trace_edges (rtx_insn *insn)
|
|
2405 {
|
|
2406 rtx tmp;
|
|
2407 int i, n;
|
|
2408
|
|
2409 if (JUMP_P (insn))
|
|
2410 {
|
|
2411 rtx_jump_table_data *table;
|
|
2412
|
|
2413 if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
|
|
2414 return;
|
|
2415
|
|
2416 if (tablejump_p (insn, NULL, &table))
|
|
2417 {
|
|
2418 rtvec vec = table->get_labels ();
|
|
2419
|
|
2420 n = GET_NUM_ELEM (vec);
|
|
2421 for (i = 0; i < n; ++i)
|
|
2422 {
|
|
2423 rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
|
|
2424 maybe_record_trace_start (lab, insn);
|
|
2425 }
|
|
2426 }
|
|
2427 else if (computed_jump_p (insn))
|
|
2428 {
|
|
2429 rtx_insn *temp;
|
|
2430 unsigned int i;
|
|
2431 FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
|
|
2432 maybe_record_trace_start (temp, insn);
|
|
2433 }
|
|
2434 else if (returnjump_p (insn))
|
|
2435 ;
|
|
2436 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
|
|
2437 {
|
|
2438 n = ASM_OPERANDS_LABEL_LENGTH (tmp);
|
|
2439 for (i = 0; i < n; ++i)
|
|
2440 {
|
|
2441 rtx_insn *lab =
|
|
2442 as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
|
|
2443 maybe_record_trace_start (lab, insn);
|
|
2444 }
|
|
2445 }
|
|
2446 else
|
|
2447 {
|
|
2448 rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
|
|
2449 gcc_assert (lab != NULL);
|
|
2450 maybe_record_trace_start (lab, insn);
|
|
2451 }
|
|
2452 }
|
|
2453 else if (CALL_P (insn))
|
|
2454 {
|
|
2455 /* Sibling calls don't have edges inside this function. */
|
|
2456 if (SIBLING_CALL_P (insn))
|
|
2457 return;
|
|
2458
|
|
2459 /* Process non-local goto edges. */
|
|
2460 if (can_nonlocal_goto (insn))
|
|
2461 for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
|
|
2462 lab;
|
|
2463 lab = lab->next ())
|
|
2464 maybe_record_trace_start_abnormal (lab->insn (), insn);
|
|
2465 }
|
|
2466 else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
|
|
2467 {
|
|
2468 int i, n = seq->len ();
|
|
2469 for (i = 0; i < n; ++i)
|
|
2470 create_trace_edges (seq->insn (i));
|
|
2471 return;
|
|
2472 }
|
|
2473
|
|
2474 /* Process EH edges. */
|
|
2475 if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
|
|
2476 {
|
|
2477 eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
|
|
2478 if (lp)
|
|
2479 maybe_record_trace_start_abnormal (lp->landing_pad, insn);
|
|
2480 }
|
|
2481 }
|
|
2482
|
|
2483 /* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
|
|
2484
|
|
2485 static void
|
|
2486 scan_insn_after (rtx_insn *insn)
|
|
2487 {
|
|
2488 if (RTX_FRAME_RELATED_P (insn))
|
|
2489 dwarf2out_frame_debug (insn);
|
|
2490 notice_args_size (insn);
|
|
2491 }
|
|
2492
|
|
2493 /* Scan the trace beginning at INSN and create the CFI notes for the
|
|
2494 instructions therein. */
|
|
2495
|
|
2496 static void
|
131
|
2497 scan_trace (dw_trace_info *trace, bool entry)
|
111
|
2498 {
|
|
2499 rtx_insn *prev, *insn = trace->head;
|
|
2500 dw_cfa_location this_cfa;
|
|
2501
|
|
2502 if (dump_file)
|
|
2503 fprintf (dump_file, "Processing trace %u : start at %s %d\n",
|
|
2504 trace->id, rtx_name[(int) GET_CODE (insn)],
|
|
2505 INSN_UID (insn));
|
|
2506
|
|
2507 trace->end_row = copy_cfi_row (trace->beg_row);
|
|
2508 trace->end_true_args_size = trace->beg_true_args_size;
|
|
2509
|
|
2510 cur_trace = trace;
|
|
2511 cur_row = trace->end_row;
|
|
2512
|
|
2513 this_cfa = cur_row->cfa;
|
|
2514 cur_cfa = &this_cfa;
|
|
2515
|
131
|
2516 /* If the current function starts with a non-standard incoming frame
|
|
2517 sp offset, emit a note before the first instruction. */
|
|
2518 if (entry
|
|
2519 && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
|
|
2520 {
|
|
2521 add_cfi_insn = insn;
|
|
2522 gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
|
|
2523 this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
|
|
2524 def_cfa_1 (&this_cfa);
|
|
2525 }
|
|
2526
|
111
|
2527 for (prev = insn, insn = NEXT_INSN (insn);
|
|
2528 insn;
|
|
2529 prev = insn, insn = NEXT_INSN (insn))
|
|
2530 {
|
|
2531 rtx_insn *control;
|
|
2532
|
|
2533 /* Do everything that happens "before" the insn. */
|
|
2534 add_cfi_insn = prev;
|
|
2535
|
|
2536 /* Notice the end of a trace. */
|
|
2537 if (BARRIER_P (insn))
|
|
2538 {
|
|
2539 /* Don't bother saving the unneeded queued registers at all. */
|
|
2540 queued_reg_saves.truncate (0);
|
|
2541 break;
|
|
2542 }
|
|
2543 if (save_point_p (insn))
|
|
2544 {
|
|
2545 /* Propagate across fallthru edges. */
|
|
2546 dwarf2out_flush_queued_reg_saves ();
|
|
2547 maybe_record_trace_start (insn, NULL);
|
|
2548 break;
|
|
2549 }
|
|
2550
|
|
2551 if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
|
|
2552 continue;
|
|
2553
|
|
2554 /* Handle all changes to the row state. Sequences require special
|
|
2555 handling for the positioning of the notes. */
|
|
2556 if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
|
|
2557 {
|
|
2558 rtx_insn *elt;
|
|
2559 int i, n = pat->len ();
|
|
2560
|
|
2561 control = pat->insn (0);
|
|
2562 if (can_throw_internal (control))
|
|
2563 notice_eh_throw (control);
|
|
2564 dwarf2out_flush_queued_reg_saves ();
|
|
2565
|
|
2566 if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
|
|
2567 {
|
|
2568 /* ??? Hopefully multiple delay slots are not annulled. */
|
|
2569 gcc_assert (n == 2);
|
|
2570 gcc_assert (!RTX_FRAME_RELATED_P (control));
|
|
2571 gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
|
|
2572
|
|
2573 elt = pat->insn (1);
|
|
2574
|
|
2575 if (INSN_FROM_TARGET_P (elt))
|
|
2576 {
|
|
2577 cfi_vec save_row_reg_save;
|
|
2578
|
|
2579 /* If ELT is an instruction from target of an annulled
|
|
2580 branch, the effects are for the target only and so
|
|
2581 the args_size and CFA along the current path
|
|
2582 shouldn't change. */
|
|
2583 add_cfi_insn = NULL;
|
131
|
2584 poly_int64 restore_args_size = cur_trace->end_true_args_size;
|
111
|
2585 cur_cfa = &cur_row->cfa;
|
|
2586 save_row_reg_save = vec_safe_copy (cur_row->reg_save);
|
|
2587
|
|
2588 scan_insn_after (elt);
|
|
2589
|
|
2590 /* ??? Should we instead save the entire row state? */
|
|
2591 gcc_assert (!queued_reg_saves.length ());
|
|
2592
|
|
2593 create_trace_edges (control);
|
|
2594
|
|
2595 cur_trace->end_true_args_size = restore_args_size;
|
|
2596 cur_row->cfa = this_cfa;
|
|
2597 cur_row->reg_save = save_row_reg_save;
|
|
2598 cur_cfa = &this_cfa;
|
|
2599 }
|
|
2600 else
|
|
2601 {
|
|
2602 /* If ELT is a annulled branch-taken instruction (i.e.
|
|
2603 executed only when branch is not taken), the args_size
|
|
2604 and CFA should not change through the jump. */
|
|
2605 create_trace_edges (control);
|
|
2606
|
|
2607 /* Update and continue with the trace. */
|
|
2608 add_cfi_insn = insn;
|
|
2609 scan_insn_after (elt);
|
|
2610 def_cfa_1 (&this_cfa);
|
|
2611 }
|
|
2612 continue;
|
|
2613 }
|
|
2614
|
|
2615 /* The insns in the delay slot should all be considered to happen
|
|
2616 "before" a call insn. Consider a call with a stack pointer
|
|
2617 adjustment in the delay slot. The backtrace from the callee
|
|
2618 should include the sp adjustment. Unfortunately, that leaves
|
|
2619 us with an unavoidable unwinding error exactly at the call insn
|
|
2620 itself. For jump insns we'd prefer to avoid this error by
|
|
2621 placing the notes after the sequence. */
|
|
2622 if (JUMP_P (control))
|
|
2623 add_cfi_insn = insn;
|
|
2624
|
|
2625 for (i = 1; i < n; ++i)
|
|
2626 {
|
|
2627 elt = pat->insn (i);
|
|
2628 scan_insn_after (elt);
|
|
2629 }
|
|
2630
|
|
2631 /* Make sure any register saves are visible at the jump target. */
|
|
2632 dwarf2out_flush_queued_reg_saves ();
|
|
2633 any_cfis_emitted = false;
|
|
2634
|
|
2635 /* However, if there is some adjustment on the call itself, e.g.
|
|
2636 a call_pop, that action should be considered to happen after
|
|
2637 the call returns. */
|
|
2638 add_cfi_insn = insn;
|
|
2639 scan_insn_after (control);
|
|
2640 }
|
|
2641 else
|
|
2642 {
|
|
2643 /* Flush data before calls and jumps, and of course if necessary. */
|
|
2644 if (can_throw_internal (insn))
|
|
2645 {
|
|
2646 notice_eh_throw (insn);
|
|
2647 dwarf2out_flush_queued_reg_saves ();
|
|
2648 }
|
|
2649 else if (!NONJUMP_INSN_P (insn)
|
|
2650 || clobbers_queued_reg_save (insn)
|
|
2651 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
|
|
2652 dwarf2out_flush_queued_reg_saves ();
|
|
2653 any_cfis_emitted = false;
|
|
2654
|
|
2655 add_cfi_insn = insn;
|
|
2656 scan_insn_after (insn);
|
|
2657 control = insn;
|
|
2658 }
|
|
2659
|
|
2660 /* Between frame-related-p and args_size we might have otherwise
|
|
2661 emitted two cfa adjustments. Do it now. */
|
|
2662 def_cfa_1 (&this_cfa);
|
|
2663
|
|
2664 /* Minimize the number of advances by emitting the entire queue
|
|
2665 once anything is emitted. */
|
|
2666 if (any_cfis_emitted
|
|
2667 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
|
|
2668 dwarf2out_flush_queued_reg_saves ();
|
|
2669
|
|
2670 /* Note that a test for control_flow_insn_p does exactly the
|
|
2671 same tests as are done to actually create the edges. So
|
|
2672 always call the routine and let it not create edges for
|
|
2673 non-control-flow insns. */
|
|
2674 create_trace_edges (control);
|
|
2675 }
|
|
2676
|
|
2677 add_cfi_insn = NULL;
|
|
2678 cur_row = NULL;
|
|
2679 cur_trace = NULL;
|
|
2680 cur_cfa = NULL;
|
|
2681 }
|
|
2682
|
|
2683 /* Scan the function and create the initial set of CFI notes. */
|
|
2684
|
|
2685 static void
|
|
2686 create_cfi_notes (void)
|
|
2687 {
|
|
2688 dw_trace_info *ti;
|
|
2689
|
|
2690 gcc_checking_assert (!queued_reg_saves.exists ());
|
|
2691 gcc_checking_assert (!trace_work_list.exists ());
|
|
2692
|
|
2693 /* Always begin at the entry trace. */
|
|
2694 ti = &trace_info[0];
|
131
|
2695 scan_trace (ti, true);
|
111
|
2696
|
|
2697 while (!trace_work_list.is_empty ())
|
|
2698 {
|
|
2699 ti = trace_work_list.pop ();
|
131
|
2700 scan_trace (ti, false);
|
111
|
2701 }
|
|
2702
|
|
2703 queued_reg_saves.release ();
|
|
2704 trace_work_list.release ();
|
|
2705 }
|
|
2706
|
|
2707 /* Return the insn before the first NOTE_INSN_CFI after START. */
|
|
2708
|
|
2709 static rtx_insn *
|
|
2710 before_next_cfi_note (rtx_insn *start)
|
|
2711 {
|
|
2712 rtx_insn *prev = start;
|
|
2713 while (start)
|
|
2714 {
|
|
2715 if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
|
|
2716 return prev;
|
|
2717 prev = start;
|
|
2718 start = NEXT_INSN (start);
|
|
2719 }
|
|
2720 gcc_unreachable ();
|
|
2721 }
|
|
2722
|
|
2723 /* Insert CFI notes between traces to properly change state between them. */
|
|
2724
|
|
2725 static void
|
|
2726 connect_traces (void)
|
|
2727 {
|
131
|
2728 unsigned i, n;
|
111
|
2729 dw_trace_info *prev_ti, *ti;
|
|
2730
|
|
2731 /* ??? Ideally, we should have both queued and processed every trace.
|
|
2732 However the current representation of constant pools on various targets
|
|
2733 is indistinguishable from unreachable code. Assume for the moment that
|
|
2734 we can simply skip over such traces. */
|
|
2735 /* ??? Consider creating a DATA_INSN rtx code to indicate that
|
|
2736 these are not "real" instructions, and should not be considered.
|
|
2737 This could be generically useful for tablejump data as well. */
|
|
2738 /* Remove all unprocessed traces from the list. */
|
131
|
2739 unsigned ix, ix2;
|
|
2740 VEC_ORDERED_REMOVE_IF_FROM_TO (trace_info, ix, ix2, ti, 1,
|
|
2741 trace_info.length (), ti->beg_row == NULL);
|
|
2742 FOR_EACH_VEC_ELT (trace_info, ix, ti)
|
|
2743 gcc_assert (ti->end_row != NULL);
|
111
|
2744
|
|
2745 /* Work from the end back to the beginning. This lets us easily insert
|
|
2746 remember/restore_state notes in the correct order wrt other notes. */
|
131
|
2747 n = trace_info.length ();
|
111
|
2748 prev_ti = &trace_info[n - 1];
|
|
2749 for (i = n - 1; i > 0; --i)
|
|
2750 {
|
|
2751 dw_cfi_row *old_row;
|
|
2752
|
|
2753 ti = prev_ti;
|
|
2754 prev_ti = &trace_info[i - 1];
|
|
2755
|
|
2756 add_cfi_insn = ti->head;
|
|
2757
|
|
2758 /* In dwarf2out_switch_text_section, we'll begin a new FDE
|
|
2759 for the portion of the function in the alternate text
|
|
2760 section. The row state at the very beginning of that
|
|
2761 new FDE will be exactly the row state from the CIE. */
|
|
2762 if (ti->switch_sections)
|
|
2763 old_row = cie_cfi_row;
|
|
2764 else
|
|
2765 {
|
|
2766 old_row = prev_ti->end_row;
|
|
2767 /* If there's no change from the previous end state, fine. */
|
|
2768 if (cfi_row_equal_p (old_row, ti->beg_row))
|
|
2769 ;
|
|
2770 /* Otherwise check for the common case of sharing state with
|
|
2771 the beginning of an epilogue, but not the end. Insert
|
|
2772 remember/restore opcodes in that case. */
|
|
2773 else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
|
|
2774 {
|
|
2775 dw_cfi_ref cfi;
|
|
2776
|
|
2777 /* Note that if we blindly insert the remember at the
|
|
2778 start of the trace, we can wind up increasing the
|
|
2779 size of the unwind info due to extra advance opcodes.
|
|
2780 Instead, put the remember immediately before the next
|
|
2781 state change. We know there must be one, because the
|
|
2782 state at the beginning and head of the trace differ. */
|
|
2783 add_cfi_insn = before_next_cfi_note (prev_ti->head);
|
|
2784 cfi = new_cfi ();
|
|
2785 cfi->dw_cfi_opc = DW_CFA_remember_state;
|
|
2786 add_cfi (cfi);
|
|
2787
|
|
2788 add_cfi_insn = ti->head;
|
|
2789 cfi = new_cfi ();
|
|
2790 cfi->dw_cfi_opc = DW_CFA_restore_state;
|
|
2791 add_cfi (cfi);
|
|
2792
|
|
2793 old_row = prev_ti->beg_row;
|
|
2794 }
|
|
2795 /* Otherwise, we'll simply change state from the previous end. */
|
|
2796 }
|
|
2797
|
|
2798 change_cfi_row (old_row, ti->beg_row);
|
|
2799
|
|
2800 if (dump_file && add_cfi_insn != ti->head)
|
|
2801 {
|
|
2802 rtx_insn *note;
|
|
2803
|
|
2804 fprintf (dump_file, "Fixup between trace %u and %u:\n",
|
|
2805 prev_ti->id, ti->id);
|
|
2806
|
|
2807 note = ti->head;
|
|
2808 do
|
|
2809 {
|
|
2810 note = NEXT_INSN (note);
|
|
2811 gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
|
|
2812 output_cfi_directive (dump_file, NOTE_CFI (note));
|
|
2813 }
|
|
2814 while (note != add_cfi_insn);
|
|
2815 }
|
|
2816 }
|
|
2817
|
|
2818 /* Connect args_size between traces that have can_throw_internal insns. */
|
|
2819 if (cfun->eh->lp_array)
|
|
2820 {
|
131
|
2821 poly_int64 prev_args_size = 0;
|
111
|
2822
|
|
2823 for (i = 0; i < n; ++i)
|
|
2824 {
|
|
2825 ti = &trace_info[i];
|
|
2826
|
|
2827 if (ti->switch_sections)
|
|
2828 prev_args_size = 0;
|
131
|
2829
|
111
|
2830 if (ti->eh_head == NULL)
|
|
2831 continue;
|
131
|
2832
|
|
2833 /* We require either the incoming args_size values to match or the
|
|
2834 presence of an insn setting it before the first EH insn. */
|
|
2835 gcc_assert (!ti->args_size_undefined || ti->args_size_defined_for_eh);
|
|
2836
|
|
2837 /* In the latter case, we force the creation of a CFI note. */
|
|
2838 if (ti->args_size_undefined
|
|
2839 || maybe_ne (ti->beg_delay_args_size, prev_args_size))
|
111
|
2840 {
|
|
2841 /* ??? Search back to previous CFI note. */
|
|
2842 add_cfi_insn = PREV_INSN (ti->eh_head);
|
|
2843 add_cfi_args_size (ti->beg_delay_args_size);
|
|
2844 }
|
|
2845
|
|
2846 prev_args_size = ti->end_delay_args_size;
|
|
2847 }
|
|
2848 }
|
|
2849 }
|
|
2850
|
|
2851 /* Set up the pseudo-cfg of instruction traces, as described at the
|
|
2852 block comment at the top of the file. */
|
|
2853
|
|
2854 static void
|
|
2855 create_pseudo_cfg (void)
|
|
2856 {
|
|
2857 bool saw_barrier, switch_sections;
|
|
2858 dw_trace_info ti;
|
|
2859 rtx_insn *insn;
|
|
2860 unsigned i;
|
|
2861
|
|
2862 /* The first trace begins at the start of the function,
|
|
2863 and begins with the CIE row state. */
|
|
2864 trace_info.create (16);
|
|
2865 memset (&ti, 0, sizeof (ti));
|
|
2866 ti.head = get_insns ();
|
|
2867 ti.beg_row = cie_cfi_row;
|
|
2868 ti.cfa_store = cie_cfi_row->cfa;
|
|
2869 ti.cfa_temp.reg = INVALID_REGNUM;
|
|
2870 trace_info.quick_push (ti);
|
|
2871
|
|
2872 if (cie_return_save)
|
|
2873 ti.regs_saved_in_regs.safe_push (*cie_return_save);
|
|
2874
|
|
2875 /* Walk all the insns, collecting start of trace locations. */
|
|
2876 saw_barrier = false;
|
|
2877 switch_sections = false;
|
|
2878 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
|
|
2879 {
|
|
2880 if (BARRIER_P (insn))
|
|
2881 saw_barrier = true;
|
|
2882 else if (NOTE_P (insn)
|
|
2883 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
|
|
2884 {
|
|
2885 /* We should have just seen a barrier. */
|
|
2886 gcc_assert (saw_barrier);
|
|
2887 switch_sections = true;
|
|
2888 }
|
|
2889 /* Watch out for save_point notes between basic blocks.
|
|
2890 In particular, a note after a barrier. Do not record these,
|
|
2891 delaying trace creation until the label. */
|
|
2892 else if (save_point_p (insn)
|
|
2893 && (LABEL_P (insn) || !saw_barrier))
|
|
2894 {
|
|
2895 memset (&ti, 0, sizeof (ti));
|
|
2896 ti.head = insn;
|
|
2897 ti.switch_sections = switch_sections;
|
|
2898 ti.id = trace_info.length ();
|
|
2899 trace_info.safe_push (ti);
|
|
2900
|
|
2901 saw_barrier = false;
|
|
2902 switch_sections = false;
|
|
2903 }
|
|
2904 }
|
|
2905
|
|
2906 /* Create the trace index after we've finished building trace_info,
|
|
2907 avoiding stale pointer problems due to reallocation. */
|
|
2908 trace_index
|
|
2909 = new hash_table<trace_info_hasher> (trace_info.length ());
|
|
2910 dw_trace_info *tp;
|
|
2911 FOR_EACH_VEC_ELT (trace_info, i, tp)
|
|
2912 {
|
|
2913 dw_trace_info **slot;
|
|
2914
|
|
2915 if (dump_file)
|
|
2916 fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
|
|
2917 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
|
|
2918 tp->switch_sections ? " (section switch)" : "");
|
|
2919
|
|
2920 slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
|
|
2921 gcc_assert (*slot == NULL);
|
|
2922 *slot = tp;
|
|
2923 }
|
|
2924 }
|
|
2925
|
|
2926 /* Record the initial position of the return address. RTL is
|
|
2927 INCOMING_RETURN_ADDR_RTX. */
|
|
2928
|
|
2929 static void
|
|
2930 initial_return_save (rtx rtl)
|
|
2931 {
|
|
2932 unsigned int reg = INVALID_REGNUM;
|
131
|
2933 poly_int64 offset = 0;
|
111
|
2934
|
|
2935 switch (GET_CODE (rtl))
|
|
2936 {
|
|
2937 case REG:
|
|
2938 /* RA is in a register. */
|
|
2939 reg = dwf_regno (rtl);
|
|
2940 break;
|
|
2941
|
|
2942 case MEM:
|
|
2943 /* RA is on the stack. */
|
|
2944 rtl = XEXP (rtl, 0);
|
|
2945 switch (GET_CODE (rtl))
|
|
2946 {
|
|
2947 case REG:
|
|
2948 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
|
|
2949 offset = 0;
|
|
2950 break;
|
|
2951
|
|
2952 case PLUS:
|
|
2953 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
|
131
|
2954 offset = rtx_to_poly_int64 (XEXP (rtl, 1));
|
111
|
2955 break;
|
|
2956
|
|
2957 case MINUS:
|
|
2958 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
|
131
|
2959 offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
|
111
|
2960 break;
|
|
2961
|
|
2962 default:
|
|
2963 gcc_unreachable ();
|
|
2964 }
|
|
2965
|
|
2966 break;
|
|
2967
|
|
2968 case PLUS:
|
|
2969 /* The return address is at some offset from any value we can
|
|
2970 actually load. For instance, on the SPARC it is in %i7+8. Just
|
|
2971 ignore the offset for now; it doesn't matter for unwinding frames. */
|
|
2972 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
|
|
2973 initial_return_save (XEXP (rtl, 0));
|
|
2974 return;
|
|
2975
|
|
2976 default:
|
|
2977 gcc_unreachable ();
|
|
2978 }
|
|
2979
|
|
2980 if (reg != DWARF_FRAME_RETURN_COLUMN)
|
|
2981 {
|
|
2982 if (reg != INVALID_REGNUM)
|
|
2983 record_reg_saved_in_reg (rtl, pc_rtx);
|
|
2984 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
|
|
2985 }
|
|
2986 }
|
|
2987
|
|
2988 static void
|
|
2989 create_cie_data (void)
|
|
2990 {
|
|
2991 dw_cfa_location loc;
|
|
2992 dw_trace_info cie_trace;
|
|
2993
|
|
2994 dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
|
|
2995
|
|
2996 memset (&cie_trace, 0, sizeof (cie_trace));
|
|
2997 cur_trace = &cie_trace;
|
|
2998
|
|
2999 add_cfi_vec = &cie_cfi_vec;
|
|
3000 cie_cfi_row = cur_row = new_cfi_row ();
|
|
3001
|
|
3002 /* On entry, the Canonical Frame Address is at SP. */
|
|
3003 memset (&loc, 0, sizeof (loc));
|
|
3004 loc.reg = dw_stack_pointer_regnum;
|
131
|
3005 /* create_cie_data is called just once per TU, and when using .cfi_startproc
|
|
3006 is even done by the assembler rather than the compiler. If the target
|
|
3007 has different incoming frame sp offsets depending on what kind of
|
|
3008 function it is, use a single constant offset for the target and
|
|
3009 if needed, adjust before the first instruction in insn stream. */
|
|
3010 loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
|
111
|
3011 def_cfa_1 (&loc);
|
|
3012
|
|
3013 if (targetm.debug_unwind_info () == UI_DWARF2
|
|
3014 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
|
|
3015 {
|
|
3016 initial_return_save (INCOMING_RETURN_ADDR_RTX);
|
|
3017
|
|
3018 /* For a few targets, we have the return address incoming into a
|
|
3019 register, but choose a different return column. This will result
|
|
3020 in a DW_CFA_register for the return, and an entry in
|
|
3021 regs_saved_in_regs to match. If the target later stores that
|
|
3022 return address register to the stack, we want to be able to emit
|
|
3023 the DW_CFA_offset against the return column, not the intermediate
|
|
3024 save register. Save the contents of regs_saved_in_regs so that
|
|
3025 we can re-initialize it at the start of each function. */
|
|
3026 switch (cie_trace.regs_saved_in_regs.length ())
|
|
3027 {
|
|
3028 case 0:
|
|
3029 break;
|
|
3030 case 1:
|
|
3031 cie_return_save = ggc_alloc<reg_saved_in_data> ();
|
|
3032 *cie_return_save = cie_trace.regs_saved_in_regs[0];
|
|
3033 cie_trace.regs_saved_in_regs.release ();
|
|
3034 break;
|
|
3035 default:
|
|
3036 gcc_unreachable ();
|
|
3037 }
|
|
3038 }
|
|
3039
|
|
3040 add_cfi_vec = NULL;
|
|
3041 cur_row = NULL;
|
|
3042 cur_trace = NULL;
|
|
3043 }
|
|
3044
|
|
3045 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
|
|
3046 state at each location within the function. These notes will be
|
|
3047 emitted during pass_final. */
|
|
3048
|
|
3049 static unsigned int
|
|
3050 execute_dwarf2_frame (void)
|
|
3051 {
|
|
3052 /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file. */
|
|
3053 dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
|
|
3054
|
|
3055 /* The first time we're called, compute the incoming frame state. */
|
|
3056 if (cie_cfi_vec == NULL)
|
|
3057 create_cie_data ();
|
|
3058
|
|
3059 dwarf2out_alloc_current_fde ();
|
|
3060
|
|
3061 create_pseudo_cfg ();
|
|
3062
|
|
3063 /* Do the work. */
|
|
3064 create_cfi_notes ();
|
|
3065 connect_traces ();
|
|
3066 add_cfis_to_fde ();
|
|
3067
|
|
3068 /* Free all the data we allocated. */
|
|
3069 {
|
|
3070 size_t i;
|
|
3071 dw_trace_info *ti;
|
|
3072
|
|
3073 FOR_EACH_VEC_ELT (trace_info, i, ti)
|
|
3074 ti->regs_saved_in_regs.release ();
|
|
3075 }
|
|
3076 trace_info.release ();
|
|
3077
|
|
3078 delete trace_index;
|
|
3079 trace_index = NULL;
|
|
3080
|
|
3081 return 0;
|
|
3082 }
|
|
3083
|
|
3084 /* Convert a DWARF call frame info. operation to its string name */
|
|
3085
|
|
3086 static const char *
|
|
3087 dwarf_cfi_name (unsigned int cfi_opc)
|
|
3088 {
|
|
3089 const char *name = get_DW_CFA_name (cfi_opc);
|
|
3090
|
|
3091 if (name != NULL)
|
|
3092 return name;
|
|
3093
|
|
3094 return "DW_CFA_<unknown>";
|
|
3095 }
|
|
3096
|
|
3097 /* This routine will generate the correct assembly data for a location
|
|
3098 description based on a cfi entry with a complex address. */
|
|
3099
|
|
3100 static void
|
|
3101 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
|
|
3102 {
|
|
3103 dw_loc_descr_ref loc;
|
|
3104 unsigned long size;
|
|
3105
|
|
3106 if (cfi->dw_cfi_opc == DW_CFA_expression
|
|
3107 || cfi->dw_cfi_opc == DW_CFA_val_expression)
|
|
3108 {
|
|
3109 unsigned r =
|
|
3110 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3111 dw2_asm_output_data (1, r, NULL);
|
|
3112 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
|
|
3113 }
|
|
3114 else
|
|
3115 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
|
|
3116
|
|
3117 /* Output the size of the block. */
|
|
3118 size = size_of_locs (loc);
|
|
3119 dw2_asm_output_data_uleb128 (size, NULL);
|
|
3120
|
|
3121 /* Now output the operations themselves. */
|
|
3122 output_loc_sequence (loc, for_eh);
|
|
3123 }
|
|
3124
|
|
3125 /* Similar, but used for .cfi_escape. */
|
|
3126
|
|
3127 static void
|
|
3128 output_cfa_loc_raw (dw_cfi_ref cfi)
|
|
3129 {
|
|
3130 dw_loc_descr_ref loc;
|
|
3131 unsigned long size;
|
|
3132
|
|
3133 if (cfi->dw_cfi_opc == DW_CFA_expression
|
|
3134 || cfi->dw_cfi_opc == DW_CFA_val_expression)
|
|
3135 {
|
|
3136 unsigned r =
|
|
3137 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3138 fprintf (asm_out_file, "%#x,", r);
|
|
3139 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
|
|
3140 }
|
|
3141 else
|
|
3142 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
|
|
3143
|
|
3144 /* Output the size of the block. */
|
|
3145 size = size_of_locs (loc);
|
|
3146 dw2_asm_output_data_uleb128_raw (size);
|
|
3147 fputc (',', asm_out_file);
|
|
3148
|
|
3149 /* Now output the operations themselves. */
|
|
3150 output_loc_sequence_raw (loc);
|
|
3151 }
|
|
3152
|
|
3153 /* Output a Call Frame Information opcode and its operand(s). */
|
|
3154
|
|
3155 void
|
|
3156 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
|
|
3157 {
|
|
3158 unsigned long r;
|
|
3159 HOST_WIDE_INT off;
|
|
3160
|
|
3161 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
|
|
3162 dw2_asm_output_data (1, (cfi->dw_cfi_opc
|
|
3163 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
|
|
3164 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
|
|
3165 ((unsigned HOST_WIDE_INT)
|
|
3166 cfi->dw_cfi_oprnd1.dw_cfi_offset));
|
|
3167 else if (cfi->dw_cfi_opc == DW_CFA_offset)
|
|
3168 {
|
|
3169 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3170 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
|
|
3171 "DW_CFA_offset, column %#lx", r);
|
|
3172 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
|
|
3173 dw2_asm_output_data_uleb128 (off, NULL);
|
|
3174 }
|
|
3175 else if (cfi->dw_cfi_opc == DW_CFA_restore)
|
|
3176 {
|
|
3177 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3178 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
|
|
3179 "DW_CFA_restore, column %#lx", r);
|
|
3180 }
|
|
3181 else
|
|
3182 {
|
|
3183 dw2_asm_output_data (1, cfi->dw_cfi_opc,
|
|
3184 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
|
|
3185
|
|
3186 switch (cfi->dw_cfi_opc)
|
|
3187 {
|
|
3188 case DW_CFA_set_loc:
|
|
3189 if (for_eh)
|
|
3190 dw2_asm_output_encoded_addr_rtx (
|
|
3191 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
|
|
3192 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
|
|
3193 false, NULL);
|
|
3194 else
|
|
3195 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
|
|
3196 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
|
|
3197 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
|
|
3198 break;
|
|
3199
|
|
3200 case DW_CFA_advance_loc1:
|
|
3201 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
|
|
3202 fde->dw_fde_current_label, NULL);
|
|
3203 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
|
|
3204 break;
|
|
3205
|
|
3206 case DW_CFA_advance_loc2:
|
|
3207 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
|
|
3208 fde->dw_fde_current_label, NULL);
|
|
3209 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
|
|
3210 break;
|
|
3211
|
|
3212 case DW_CFA_advance_loc4:
|
|
3213 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
|
|
3214 fde->dw_fde_current_label, NULL);
|
|
3215 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
|
|
3216 break;
|
|
3217
|
|
3218 case DW_CFA_MIPS_advance_loc8:
|
|
3219 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
|
|
3220 fde->dw_fde_current_label, NULL);
|
|
3221 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
|
|
3222 break;
|
|
3223
|
|
3224 case DW_CFA_offset_extended:
|
|
3225 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3226 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3227 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
|
|
3228 dw2_asm_output_data_uleb128 (off, NULL);
|
|
3229 break;
|
|
3230
|
|
3231 case DW_CFA_def_cfa:
|
|
3232 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3233 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3234 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
|
|
3235 break;
|
|
3236
|
|
3237 case DW_CFA_offset_extended_sf:
|
|
3238 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3239 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3240 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
|
|
3241 dw2_asm_output_data_sleb128 (off, NULL);
|
|
3242 break;
|
|
3243
|
|
3244 case DW_CFA_def_cfa_sf:
|
|
3245 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3246 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3247 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
|
|
3248 dw2_asm_output_data_sleb128 (off, NULL);
|
|
3249 break;
|
|
3250
|
|
3251 case DW_CFA_restore_extended:
|
|
3252 case DW_CFA_undefined:
|
|
3253 case DW_CFA_same_value:
|
|
3254 case DW_CFA_def_cfa_register:
|
|
3255 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3256 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3257 break;
|
|
3258
|
|
3259 case DW_CFA_register:
|
|
3260 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
|
|
3261 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3262 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
|
|
3263 dw2_asm_output_data_uleb128 (r, NULL);
|
|
3264 break;
|
|
3265
|
|
3266 case DW_CFA_def_cfa_offset:
|
|
3267 case DW_CFA_GNU_args_size:
|
|
3268 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
|
|
3269 break;
|
|
3270
|
|
3271 case DW_CFA_def_cfa_offset_sf:
|
|
3272 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
|
|
3273 dw2_asm_output_data_sleb128 (off, NULL);
|
|
3274 break;
|
|
3275
|
|
3276 case DW_CFA_GNU_window_save:
|
|
3277 break;
|
|
3278
|
|
3279 case DW_CFA_def_cfa_expression:
|
|
3280 case DW_CFA_expression:
|
|
3281 case DW_CFA_val_expression:
|
|
3282 output_cfa_loc (cfi, for_eh);
|
|
3283 break;
|
|
3284
|
|
3285 case DW_CFA_GNU_negative_offset_extended:
|
|
3286 /* Obsoleted by DW_CFA_offset_extended_sf. */
|
|
3287 gcc_unreachable ();
|
|
3288
|
|
3289 default:
|
|
3290 break;
|
|
3291 }
|
|
3292 }
|
|
3293 }
|
|
3294
|
|
3295 /* Similar, but do it via assembler directives instead. */
|
|
3296
|
|
3297 void
|
|
3298 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
|
|
3299 {
|
|
3300 unsigned long r, r2;
|
|
3301
|
|
3302 switch (cfi->dw_cfi_opc)
|
|
3303 {
|
|
3304 case DW_CFA_advance_loc:
|
|
3305 case DW_CFA_advance_loc1:
|
|
3306 case DW_CFA_advance_loc2:
|
|
3307 case DW_CFA_advance_loc4:
|
|
3308 case DW_CFA_MIPS_advance_loc8:
|
|
3309 case DW_CFA_set_loc:
|
|
3310 /* Should only be created in a code path not followed when emitting
|
|
3311 via directives. The assembler is going to take care of this for
|
|
3312 us. But this routines is also used for debugging dumps, so
|
|
3313 print something. */
|
|
3314 gcc_assert (f != asm_out_file);
|
|
3315 fprintf (f, "\t.cfi_advance_loc\n");
|
|
3316 break;
|
|
3317
|
|
3318 case DW_CFA_offset:
|
|
3319 case DW_CFA_offset_extended:
|
|
3320 case DW_CFA_offset_extended_sf:
|
|
3321 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3322 fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
|
|
3323 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
|
|
3324 break;
|
|
3325
|
|
3326 case DW_CFA_restore:
|
|
3327 case DW_CFA_restore_extended:
|
|
3328 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3329 fprintf (f, "\t.cfi_restore %lu\n", r);
|
|
3330 break;
|
|
3331
|
|
3332 case DW_CFA_undefined:
|
|
3333 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3334 fprintf (f, "\t.cfi_undefined %lu\n", r);
|
|
3335 break;
|
|
3336
|
|
3337 case DW_CFA_same_value:
|
|
3338 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3339 fprintf (f, "\t.cfi_same_value %lu\n", r);
|
|
3340 break;
|
|
3341
|
|
3342 case DW_CFA_def_cfa:
|
|
3343 case DW_CFA_def_cfa_sf:
|
|
3344 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3345 fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
|
|
3346 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
|
|
3347 break;
|
|
3348
|
|
3349 case DW_CFA_def_cfa_register:
|
|
3350 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3351 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
|
|
3352 break;
|
|
3353
|
|
3354 case DW_CFA_register:
|
|
3355 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
|
|
3356 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
|
|
3357 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
|
|
3358 break;
|
|
3359
|
|
3360 case DW_CFA_def_cfa_offset:
|
|
3361 case DW_CFA_def_cfa_offset_sf:
|
|
3362 fprintf (f, "\t.cfi_def_cfa_offset "
|
|
3363 HOST_WIDE_INT_PRINT_DEC"\n",
|
|
3364 cfi->dw_cfi_oprnd1.dw_cfi_offset);
|
|
3365 break;
|
|
3366
|
|
3367 case DW_CFA_remember_state:
|
|
3368 fprintf (f, "\t.cfi_remember_state\n");
|
|
3369 break;
|
|
3370 case DW_CFA_restore_state:
|
|
3371 fprintf (f, "\t.cfi_restore_state\n");
|
|
3372 break;
|
|
3373
|
|
3374 case DW_CFA_GNU_args_size:
|
|
3375 if (f == asm_out_file)
|
|
3376 {
|
|
3377 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
|
|
3378 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
|
|
3379 if (flag_debug_asm)
|
|
3380 fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
|
|
3381 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
|
|
3382 fputc ('\n', f);
|
|
3383 }
|
|
3384 else
|
|
3385 {
|
|
3386 fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
|
|
3387 cfi->dw_cfi_oprnd1.dw_cfi_offset);
|
|
3388 }
|
|
3389 break;
|
|
3390
|
|
3391 case DW_CFA_GNU_window_save:
|
|
3392 fprintf (f, "\t.cfi_window_save\n");
|
|
3393 break;
|
|
3394
|
|
3395 case DW_CFA_def_cfa_expression:
|
|
3396 case DW_CFA_expression:
|
|
3397 case DW_CFA_val_expression:
|
|
3398 if (f != asm_out_file)
|
|
3399 {
|
|
3400 fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
|
|
3401 cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
|
|
3402 cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
|
|
3403 break;
|
|
3404 }
|
|
3405 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
|
|
3406 output_cfa_loc_raw (cfi);
|
|
3407 fputc ('\n', f);
|
|
3408 break;
|
|
3409
|
|
3410 default:
|
|
3411 gcc_unreachable ();
|
|
3412 }
|
|
3413 }
|
|
3414
|
|
3415 void
|
|
3416 dwarf2out_emit_cfi (dw_cfi_ref cfi)
|
|
3417 {
|
|
3418 if (dwarf2out_do_cfi_asm ())
|
|
3419 output_cfi_directive (asm_out_file, cfi);
|
|
3420 }
|
|
3421
|
|
3422 static void
|
|
3423 dump_cfi_row (FILE *f, dw_cfi_row *row)
|
|
3424 {
|
|
3425 dw_cfi_ref cfi;
|
|
3426 unsigned i;
|
|
3427
|
|
3428 cfi = row->cfa_cfi;
|
|
3429 if (!cfi)
|
|
3430 {
|
|
3431 dw_cfa_location dummy;
|
|
3432 memset (&dummy, 0, sizeof (dummy));
|
|
3433 dummy.reg = INVALID_REGNUM;
|
|
3434 cfi = def_cfa_0 (&dummy, &row->cfa);
|
|
3435 }
|
|
3436 output_cfi_directive (f, cfi);
|
|
3437
|
|
3438 FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
|
|
3439 if (cfi)
|
|
3440 output_cfi_directive (f, cfi);
|
|
3441 }
|
|
3442
|
|
3443 void debug_cfi_row (dw_cfi_row *row);
|
|
3444
|
|
3445 void
|
|
3446 debug_cfi_row (dw_cfi_row *row)
|
|
3447 {
|
|
3448 dump_cfi_row (stderr, row);
|
|
3449 }
|
|
3450
|
|
3451
|
|
3452 /* Save the result of dwarf2out_do_frame across PCH.
|
|
3453 This variable is tri-state, with 0 unset, >0 true, <0 false. */
|
|
3454 static GTY(()) signed char saved_do_cfi_asm = 0;
|
|
3455
|
131
|
3456 /* Decide whether to emit EH frame unwind information for the current
|
|
3457 translation unit. */
|
|
3458
|
|
3459 bool
|
|
3460 dwarf2out_do_eh_frame (void)
|
|
3461 {
|
|
3462 return
|
|
3463 (flag_unwind_tables || flag_exceptions)
|
|
3464 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
|
|
3465 }
|
|
3466
|
111
|
3467 /* Decide whether we want to emit frame unwind information for the current
|
|
3468 translation unit. */
|
|
3469
|
|
3470 bool
|
|
3471 dwarf2out_do_frame (void)
|
|
3472 {
|
|
3473 /* We want to emit correct CFA location expressions or lists, so we
|
|
3474 have to return true if we're going to output debug info, even if
|
|
3475 we're not going to output frame or unwind info. */
|
|
3476 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
|
|
3477 return true;
|
|
3478
|
|
3479 if (saved_do_cfi_asm > 0)
|
|
3480 return true;
|
|
3481
|
|
3482 if (targetm.debug_unwind_info () == UI_DWARF2)
|
|
3483 return true;
|
|
3484
|
131
|
3485 if (dwarf2out_do_eh_frame ())
|
111
|
3486 return true;
|
|
3487
|
|
3488 return false;
|
|
3489 }
|
|
3490
|
|
3491 /* Decide whether to emit frame unwind via assembler directives. */
|
|
3492
|
|
3493 bool
|
|
3494 dwarf2out_do_cfi_asm (void)
|
|
3495 {
|
|
3496 int enc;
|
|
3497
|
|
3498 if (saved_do_cfi_asm != 0)
|
|
3499 return saved_do_cfi_asm > 0;
|
|
3500
|
|
3501 /* Assume failure for a moment. */
|
|
3502 saved_do_cfi_asm = -1;
|
|
3503
|
|
3504 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
|
|
3505 return false;
|
|
3506 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
|
|
3507 return false;
|
|
3508
|
|
3509 /* Make sure the personality encoding is one the assembler can support.
|
|
3510 In particular, aligned addresses can't be handled. */
|
|
3511 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
|
|
3512 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
|
|
3513 return false;
|
|
3514 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
|
|
3515 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
|
|
3516 return false;
|
|
3517
|
|
3518 /* If we can't get the assembler to emit only .debug_frame, and we don't need
|
|
3519 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
|
131
|
3520 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
|
111
|
3521 return false;
|
|
3522
|
|
3523 /* Success! */
|
|
3524 saved_do_cfi_asm = 1;
|
|
3525 return true;
|
|
3526 }
|
|
3527
|
|
3528 namespace {
|
|
3529
|
|
3530 const pass_data pass_data_dwarf2_frame =
|
|
3531 {
|
|
3532 RTL_PASS, /* type */
|
|
3533 "dwarf2", /* name */
|
|
3534 OPTGROUP_NONE, /* optinfo_flags */
|
|
3535 TV_FINAL, /* tv_id */
|
|
3536 0, /* properties_required */
|
|
3537 0, /* properties_provided */
|
|
3538 0, /* properties_destroyed */
|
|
3539 0, /* todo_flags_start */
|
|
3540 0, /* todo_flags_finish */
|
|
3541 };
|
|
3542
|
|
3543 class pass_dwarf2_frame : public rtl_opt_pass
|
|
3544 {
|
|
3545 public:
|
|
3546 pass_dwarf2_frame (gcc::context *ctxt)
|
|
3547 : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
|
|
3548 {}
|
|
3549
|
|
3550 /* opt_pass methods: */
|
|
3551 virtual bool gate (function *);
|
|
3552 virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
|
|
3553
|
|
3554 }; // class pass_dwarf2_frame
|
|
3555
|
|
3556 bool
|
|
3557 pass_dwarf2_frame::gate (function *)
|
|
3558 {
|
|
3559 /* Targets which still implement the prologue in assembler text
|
|
3560 cannot use the generic dwarf2 unwinding. */
|
|
3561 if (!targetm.have_prologue ())
|
|
3562 return false;
|
|
3563
|
|
3564 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
|
|
3565 from the optimized shrink-wrapping annotations that we will compute.
|
|
3566 For now, only produce the CFI notes for dwarf2. */
|
|
3567 return dwarf2out_do_frame ();
|
|
3568 }
|
|
3569
|
|
3570 } // anon namespace
|
|
3571
|
|
3572 rtl_opt_pass *
|
|
3573 make_pass_dwarf2_frame (gcc::context *ctxt)
|
|
3574 {
|
|
3575 return new pass_dwarf2_frame (ctxt);
|
|
3576 }
|
|
3577
|
|
3578 #include "gt-dwarf2cfi.h"
|