111
|
1 /* LRA (local register allocator) driver and LRA utilities.
|
145
|
2 Copyright (C) 2010-2020 Free Software Foundation, Inc.
|
111
|
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
|
|
4
|
|
5 This file is part of GCC.
|
|
6
|
|
7 GCC is free software; you can redistribute it and/or modify it under
|
|
8 the terms of the GNU General Public License as published by the Free
|
|
9 Software Foundation; either version 3, or (at your option) any later
|
|
10 version.
|
|
11
|
|
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
15 for more details.
|
|
16
|
|
17 You should have received a copy of the GNU General Public License
|
|
18 along with GCC; see the file COPYING3. If not see
|
|
19 <http://www.gnu.org/licenses/>. */
|
|
20
|
|
21
|
|
22 /* The Local Register Allocator (LRA) is a replacement of former
|
|
23 reload pass. It is focused to simplify code solving the reload
|
|
24 pass tasks, to make the code maintenance easier, and to implement new
|
|
25 perspective optimizations.
|
|
26
|
|
27 The major LRA design solutions are:
|
|
28 o division small manageable, separated sub-tasks
|
|
29 o reflection of all transformations and decisions in RTL as more
|
|
30 as possible
|
|
31 o insn constraints as a primary source of the info (minimizing
|
|
32 number of target-depended macros/hooks)
|
|
33
|
|
34 In brief LRA works by iterative insn process with the final goal is
|
|
35 to satisfy all insn and address constraints:
|
|
36 o New reload insns (in brief reloads) and reload pseudos might be
|
|
37 generated;
|
|
38 o Some pseudos might be spilled to assign hard registers to
|
|
39 new reload pseudos;
|
|
40 o Recalculating spilled pseudo values (rematerialization);
|
|
41 o Changing spilled pseudos to stack memory or their equivalences;
|
|
42 o Allocation stack memory changes the address displacement and
|
|
43 new iteration is needed.
|
|
44
|
|
45 Here is block diagram of LRA passes:
|
|
46
|
|
47 ------------------------
|
|
48 --------------- | Undo inheritance for | ---------------
|
|
49 | Memory-memory | | spilled pseudos, | | New (and old) |
|
|
50 | move coalesce |<---| splits for pseudos got |<-- | pseudos |
|
|
51 --------------- | the same hard regs, | | assignment |
|
|
52 Start | | and optional reloads | ---------------
|
|
53 | | ------------------------ ^
|
|
54 V | ---------------- |
|
|
55 ----------- V | Update virtual | |
|
|
56 | Remove |----> ------------>| register | |
|
|
57 | scratches | ^ | displacements | |
|
|
58 ----------- | ---------------- |
|
|
59 | | |
|
|
60 | V New |
|
|
61 | ------------ pseudos -------------------
|
|
62 | |Constraints:| or insns | Inheritance/split |
|
|
63 | | RTL |--------->| transformations |
|
|
64 | | transfor- | | in EBB scope |
|
|
65 | substi- | mations | -------------------
|
|
66 | tutions ------------
|
|
67 | | No change
|
|
68 ---------------- V
|
|
69 | Spilled pseudo | -------------------
|
|
70 | to memory |<----| Rematerialization |
|
|
71 | substitution | -------------------
|
|
72 ----------------
|
|
73 | No susbtitions
|
|
74 V
|
|
75 -------------------------
|
|
76 | Hard regs substitution, |
|
|
77 | devirtalization, and |------> Finish
|
|
78 | restoring scratches got |
|
|
79 | memory |
|
|
80 -------------------------
|
|
81
|
|
82 To speed up the process:
|
|
83 o We process only insns affected by changes on previous
|
|
84 iterations;
|
|
85 o We don't use DFA-infrastructure because it results in much slower
|
|
86 compiler speed than a special IR described below does;
|
|
87 o We use a special insn representation for quick access to insn
|
|
88 info which is always *synchronized* with the current RTL;
|
|
89 o Insn IR is minimized by memory. It is divided on three parts:
|
|
90 o one specific for each insn in RTL (only operand locations);
|
|
91 o one common for all insns in RTL with the same insn code
|
|
92 (different operand attributes from machine descriptions);
|
|
93 o one oriented for maintenance of live info (list of pseudos).
|
|
94 o Pseudo data:
|
|
95 o all insns where the pseudo is referenced;
|
|
96 o live info (conflicting hard regs, live ranges, # of
|
|
97 references etc);
|
|
98 o data used for assigning (preferred hard regs, costs etc).
|
|
99
|
|
100 This file contains LRA driver, LRA utility functions and data, and
|
|
101 code for dealing with scratches. */
|
|
102
|
|
103 #include "config.h"
|
|
104 #include "system.h"
|
|
105 #include "coretypes.h"
|
|
106 #include "backend.h"
|
|
107 #include "target.h"
|
|
108 #include "rtl.h"
|
|
109 #include "tree.h"
|
|
110 #include "predict.h"
|
|
111 #include "df.h"
|
|
112 #include "memmodel.h"
|
|
113 #include "tm_p.h"
|
|
114 #include "optabs.h"
|
|
115 #include "regs.h"
|
|
116 #include "ira.h"
|
|
117 #include "recog.h"
|
|
118 #include "expr.h"
|
|
119 #include "cfgrtl.h"
|
|
120 #include "cfgbuild.h"
|
|
121 #include "lra.h"
|
|
122 #include "lra-int.h"
|
|
123 #include "print-rtl.h"
|
145
|
124 #include "function-abi.h"
|
111
|
125
|
|
126 /* Dump bitmap SET with TITLE and BB INDEX. */
|
|
127 void
|
|
128 lra_dump_bitmap_with_title (const char *title, bitmap set, int index)
|
|
129 {
|
|
130 unsigned int i;
|
|
131 int count;
|
|
132 bitmap_iterator bi;
|
|
133 static const int max_nums_on_line = 10;
|
|
134
|
|
135 if (bitmap_empty_p (set))
|
|
136 return;
|
|
137 fprintf (lra_dump_file, " %s %d:", title, index);
|
|
138 fprintf (lra_dump_file, "\n");
|
|
139 count = max_nums_on_line + 1;
|
|
140 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, bi)
|
|
141 {
|
|
142 if (count > max_nums_on_line)
|
|
143 {
|
|
144 fprintf (lra_dump_file, "\n ");
|
|
145 count = 0;
|
|
146 }
|
|
147 fprintf (lra_dump_file, " %4u", i);
|
|
148 count++;
|
|
149 }
|
|
150 fprintf (lra_dump_file, "\n");
|
|
151 }
|
|
152
|
|
153 /* Hard registers currently not available for allocation. It can
|
|
154 changed after some hard registers become not eliminable. */
|
|
155 HARD_REG_SET lra_no_alloc_regs;
|
|
156
|
|
157 static int get_new_reg_value (void);
|
|
158 static void expand_reg_info (void);
|
|
159 static void invalidate_insn_recog_data (int);
|
|
160 static int get_insn_freq (rtx_insn *);
|
|
161 static void invalidate_insn_data_regno_info (lra_insn_recog_data_t,
|
|
162 rtx_insn *, int);
|
145
|
163 static void remove_scratches_1 (rtx_insn *);
|
111
|
164
|
|
165 /* Expand all regno related info needed for LRA. */
|
|
166 static void
|
|
167 expand_reg_data (int old)
|
|
168 {
|
|
169 resize_reg_info ();
|
|
170 expand_reg_info ();
|
|
171 ira_expand_reg_equiv ();
|
|
172 for (int i = (int) max_reg_num () - 1; i >= old; i--)
|
|
173 lra_change_class (i, ALL_REGS, " Set", true);
|
|
174 }
|
|
175
|
|
176 /* Create and return a new reg of ORIGINAL mode. If ORIGINAL is NULL
|
|
177 or of VOIDmode, use MD_MODE for the new reg. Initialize its
|
|
178 register class to RCLASS. Print message about assigning class
|
|
179 RCLASS containing new register name TITLE unless it is NULL. Use
|
|
180 attributes of ORIGINAL if it is a register. The created register
|
|
181 will have unique held value. */
|
|
182 rtx
|
|
183 lra_create_new_reg_with_unique_value (machine_mode md_mode, rtx original,
|
|
184 enum reg_class rclass, const char *title)
|
|
185 {
|
|
186 machine_mode mode;
|
|
187 rtx new_reg;
|
|
188
|
|
189 if (original == NULL_RTX || (mode = GET_MODE (original)) == VOIDmode)
|
|
190 mode = md_mode;
|
|
191 lra_assert (mode != VOIDmode);
|
|
192 new_reg = gen_reg_rtx (mode);
|
|
193 if (original == NULL_RTX || ! REG_P (original))
|
|
194 {
|
|
195 if (lra_dump_file != NULL)
|
|
196 fprintf (lra_dump_file, " Creating newreg=%i", REGNO (new_reg));
|
|
197 }
|
|
198 else
|
|
199 {
|
|
200 if (ORIGINAL_REGNO (original) >= FIRST_PSEUDO_REGISTER)
|
|
201 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original);
|
|
202 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original);
|
|
203 REG_POINTER (new_reg) = REG_POINTER (original);
|
|
204 REG_ATTRS (new_reg) = REG_ATTRS (original);
|
|
205 if (lra_dump_file != NULL)
|
|
206 fprintf (lra_dump_file, " Creating newreg=%i from oldreg=%i",
|
|
207 REGNO (new_reg), REGNO (original));
|
|
208 }
|
|
209 if (lra_dump_file != NULL)
|
|
210 {
|
|
211 if (title != NULL)
|
|
212 fprintf (lra_dump_file, ", assigning class %s to%s%s r%d",
|
|
213 reg_class_names[rclass], *title == '\0' ? "" : " ",
|
|
214 title, REGNO (new_reg));
|
|
215 fprintf (lra_dump_file, "\n");
|
|
216 }
|
|
217 expand_reg_data (max_reg_num ());
|
|
218 setup_reg_classes (REGNO (new_reg), rclass, NO_REGS, rclass);
|
|
219 return new_reg;
|
|
220 }
|
|
221
|
|
222 /* Analogous to the previous function but also inherits value of
|
|
223 ORIGINAL. */
|
|
224 rtx
|
|
225 lra_create_new_reg (machine_mode md_mode, rtx original,
|
|
226 enum reg_class rclass, const char *title)
|
|
227 {
|
|
228 rtx new_reg;
|
|
229
|
|
230 new_reg
|
|
231 = lra_create_new_reg_with_unique_value (md_mode, original, rclass, title);
|
|
232 if (original != NULL_RTX && REG_P (original))
|
|
233 lra_assign_reg_val (REGNO (original), REGNO (new_reg));
|
|
234 return new_reg;
|
|
235 }
|
|
236
|
|
237 /* Set up for REGNO unique hold value. */
|
|
238 void
|
|
239 lra_set_regno_unique_value (int regno)
|
|
240 {
|
|
241 lra_reg_info[regno].val = get_new_reg_value ();
|
|
242 }
|
|
243
|
|
244 /* Invalidate INSN related info used by LRA. The info should never be
|
|
245 used after that. */
|
|
246 void
|
|
247 lra_invalidate_insn_data (rtx_insn *insn)
|
|
248 {
|
|
249 lra_invalidate_insn_regno_info (insn);
|
|
250 invalidate_insn_recog_data (INSN_UID (insn));
|
|
251 }
|
|
252
|
|
253 /* Mark INSN deleted and invalidate the insn related info used by
|
|
254 LRA. */
|
|
255 void
|
|
256 lra_set_insn_deleted (rtx_insn *insn)
|
|
257 {
|
|
258 lra_invalidate_insn_data (insn);
|
|
259 SET_INSN_DELETED (insn);
|
|
260 }
|
|
261
|
|
262 /* Delete an unneeded INSN and any previous insns who sole purpose is
|
|
263 loading data that is dead in INSN. */
|
|
264 void
|
|
265 lra_delete_dead_insn (rtx_insn *insn)
|
|
266 {
|
|
267 rtx_insn *prev = prev_real_insn (insn);
|
|
268 rtx prev_dest;
|
|
269
|
|
270 /* If the previous insn sets a register that dies in our insn,
|
|
271 delete it too. */
|
|
272 if (prev && GET_CODE (PATTERN (prev)) == SET
|
|
273 && (prev_dest = SET_DEST (PATTERN (prev)), REG_P (prev_dest))
|
|
274 && reg_mentioned_p (prev_dest, PATTERN (insn))
|
|
275 && find_regno_note (insn, REG_DEAD, REGNO (prev_dest))
|
|
276 && ! side_effects_p (SET_SRC (PATTERN (prev))))
|
|
277 lra_delete_dead_insn (prev);
|
|
278
|
|
279 lra_set_insn_deleted (insn);
|
|
280 }
|
|
281
|
|
282 /* Emit insn x = y + z. Return NULL if we failed to do it.
|
|
283 Otherwise, return the insn. We don't use gen_add3_insn as it might
|
|
284 clobber CC. */
|
|
285 static rtx_insn *
|
|
286 emit_add3_insn (rtx x, rtx y, rtx z)
|
|
287 {
|
|
288 rtx_insn *last;
|
|
289
|
|
290 last = get_last_insn ();
|
|
291
|
|
292 if (have_addptr3_insn (x, y, z))
|
|
293 {
|
|
294 rtx_insn *insn = gen_addptr3_insn (x, y, z);
|
|
295
|
|
296 /* If the target provides an "addptr" pattern it hopefully does
|
|
297 for a reason. So falling back to the normal add would be
|
|
298 a bug. */
|
|
299 lra_assert (insn != NULL_RTX);
|
|
300 emit_insn (insn);
|
|
301 return insn;
|
|
302 }
|
|
303
|
|
304 rtx_insn *insn = emit_insn (gen_rtx_SET (x, gen_rtx_PLUS (GET_MODE (y),
|
|
305 y, z)));
|
|
306 if (recog_memoized (insn) < 0)
|
|
307 {
|
|
308 delete_insns_since (last);
|
|
309 insn = NULL;
|
|
310 }
|
|
311 return insn;
|
|
312 }
|
|
313
|
|
314 /* Emit insn x = x + y. Return the insn. We use gen_add2_insn as the
|
|
315 last resort. */
|
|
316 static rtx_insn *
|
|
317 emit_add2_insn (rtx x, rtx y)
|
|
318 {
|
|
319 rtx_insn *insn = emit_add3_insn (x, x, y);
|
|
320 if (insn == NULL_RTX)
|
|
321 {
|
|
322 insn = gen_add2_insn (x, y);
|
|
323 if (insn != NULL_RTX)
|
|
324 emit_insn (insn);
|
|
325 }
|
|
326 return insn;
|
|
327 }
|
|
328
|
|
329 /* Target checks operands through operand predicates to recognize an
|
|
330 insn. We should have a special precaution to generate add insns
|
|
331 which are frequent results of elimination.
|
|
332
|
|
333 Emit insns for x = y + z. X can be used to store intermediate
|
|
334 values and should be not in Y and Z when we use X to store an
|
|
335 intermediate value. Y + Z should form [base] [+ index[ * scale]] [
|
|
336 + disp] where base and index are registers, disp and scale are
|
|
337 constants. Y should contain base if it is present, Z should
|
|
338 contain disp if any. index[*scale] can be part of Y or Z. */
|
|
339 void
|
|
340 lra_emit_add (rtx x, rtx y, rtx z)
|
|
341 {
|
|
342 int old;
|
|
343 rtx_insn *last;
|
|
344 rtx a1, a2, base, index, disp, scale, index_scale;
|
|
345 bool ok_p;
|
|
346
|
|
347 rtx_insn *add3_insn = emit_add3_insn (x, y, z);
|
|
348 old = max_reg_num ();
|
|
349 if (add3_insn != NULL)
|
|
350 ;
|
|
351 else
|
|
352 {
|
|
353 disp = a2 = NULL_RTX;
|
|
354 if (GET_CODE (y) == PLUS)
|
|
355 {
|
|
356 a1 = XEXP (y, 0);
|
|
357 a2 = XEXP (y, 1);
|
|
358 disp = z;
|
|
359 }
|
|
360 else
|
|
361 {
|
|
362 a1 = y;
|
|
363 if (CONSTANT_P (z))
|
|
364 disp = z;
|
|
365 else
|
|
366 a2 = z;
|
|
367 }
|
|
368 index_scale = scale = NULL_RTX;
|
|
369 if (GET_CODE (a1) == MULT)
|
|
370 {
|
|
371 index_scale = a1;
|
|
372 index = XEXP (a1, 0);
|
|
373 scale = XEXP (a1, 1);
|
|
374 base = a2;
|
|
375 }
|
|
376 else if (a2 != NULL_RTX && GET_CODE (a2) == MULT)
|
|
377 {
|
|
378 index_scale = a2;
|
|
379 index = XEXP (a2, 0);
|
|
380 scale = XEXP (a2, 1);
|
|
381 base = a1;
|
|
382 }
|
|
383 else
|
|
384 {
|
|
385 base = a1;
|
|
386 index = a2;
|
|
387 }
|
|
388 if ((base != NULL_RTX && ! (REG_P (base) || GET_CODE (base) == SUBREG))
|
|
389 || (index != NULL_RTX
|
|
390 && ! (REG_P (index) || GET_CODE (index) == SUBREG))
|
|
391 || (disp != NULL_RTX && ! CONSTANT_P (disp))
|
|
392 || (scale != NULL_RTX && ! CONSTANT_P (scale)))
|
|
393 {
|
|
394 /* Probably we have no 3 op add. Last chance is to use 2-op
|
|
395 add insn. To succeed, don't move Z to X as an address
|
|
396 segment always comes in Y. Otherwise, we might fail when
|
|
397 adding the address segment to register. */
|
|
398 lra_assert (x != y && x != z);
|
|
399 emit_move_insn (x, y);
|
|
400 rtx_insn *insn = emit_add2_insn (x, z);
|
|
401 lra_assert (insn != NULL_RTX);
|
|
402 }
|
|
403 else
|
|
404 {
|
|
405 if (index_scale == NULL_RTX)
|
|
406 index_scale = index;
|
|
407 if (disp == NULL_RTX)
|
|
408 {
|
|
409 /* Generate x = index_scale; x = x + base. */
|
|
410 lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
|
|
411 emit_move_insn (x, index_scale);
|
|
412 rtx_insn *insn = emit_add2_insn (x, base);
|
|
413 lra_assert (insn != NULL_RTX);
|
|
414 }
|
|
415 else if (scale == NULL_RTX)
|
|
416 {
|
|
417 /* Try x = base + disp. */
|
|
418 lra_assert (base != NULL_RTX);
|
|
419 last = get_last_insn ();
|
|
420 rtx_insn *move_insn =
|
|
421 emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp));
|
|
422 if (recog_memoized (move_insn) < 0)
|
|
423 {
|
|
424 delete_insns_since (last);
|
|
425 /* Generate x = disp; x = x + base. */
|
|
426 emit_move_insn (x, disp);
|
|
427 rtx_insn *add2_insn = emit_add2_insn (x, base);
|
|
428 lra_assert (add2_insn != NULL_RTX);
|
|
429 }
|
|
430 /* Generate x = x + index. */
|
|
431 if (index != NULL_RTX)
|
|
432 {
|
|
433 rtx_insn *insn = emit_add2_insn (x, index);
|
|
434 lra_assert (insn != NULL_RTX);
|
|
435 }
|
|
436 }
|
|
437 else
|
|
438 {
|
|
439 /* Try x = index_scale; x = x + disp; x = x + base. */
|
|
440 last = get_last_insn ();
|
|
441 rtx_insn *move_insn = emit_move_insn (x, index_scale);
|
|
442 ok_p = false;
|
|
443 if (recog_memoized (move_insn) >= 0)
|
|
444 {
|
|
445 rtx_insn *insn = emit_add2_insn (x, disp);
|
|
446 if (insn != NULL_RTX)
|
|
447 {
|
|
448 if (base == NULL_RTX)
|
|
449 ok_p = true;
|
|
450 else
|
|
451 {
|
|
452 insn = emit_add2_insn (x, base);
|
|
453 if (insn != NULL_RTX)
|
|
454 ok_p = true;
|
|
455 }
|
|
456 }
|
|
457 }
|
|
458 if (! ok_p)
|
|
459 {
|
|
460 rtx_insn *insn;
|
|
461
|
|
462 delete_insns_since (last);
|
|
463 /* Generate x = disp; x = x + base; x = x + index_scale. */
|
|
464 emit_move_insn (x, disp);
|
|
465 if (base != NULL_RTX)
|
|
466 {
|
|
467 insn = emit_add2_insn (x, base);
|
|
468 lra_assert (insn != NULL_RTX);
|
|
469 }
|
|
470 insn = emit_add2_insn (x, index_scale);
|
|
471 lra_assert (insn != NULL_RTX);
|
|
472 }
|
|
473 }
|
|
474 }
|
|
475 }
|
|
476 /* Functions emit_... can create pseudos -- so expand the pseudo
|
|
477 data. */
|
|
478 if (old != max_reg_num ())
|
|
479 expand_reg_data (old);
|
|
480 }
|
|
481
|
|
482 /* The number of emitted reload insns so far. */
|
|
483 int lra_curr_reload_num;
|
|
484
|
|
485 /* Emit x := y, processing special case when y = u + v or y = u + v *
|
|
486 scale + w through emit_add (Y can be an address which is base +
|
|
487 index reg * scale + displacement in general case). X may be used
|
|
488 as intermediate result therefore it should be not in Y. */
|
|
489 void
|
|
490 lra_emit_move (rtx x, rtx y)
|
|
491 {
|
|
492 int old;
|
|
493
|
|
494 if (GET_CODE (y) != PLUS)
|
|
495 {
|
|
496 if (rtx_equal_p (x, y))
|
|
497 return;
|
|
498 old = max_reg_num ();
|
145
|
499 rtx_insn *insn = emit_move_insn (x, y);
|
|
500 /* The move pattern may require scratch registers, so convert them
|
|
501 into real registers now. */
|
|
502 if (insn != NULL_RTX)
|
|
503 remove_scratches_1 (insn);
|
111
|
504 if (REG_P (x))
|
|
505 lra_reg_info[ORIGINAL_REGNO (x)].last_reload = ++lra_curr_reload_num;
|
|
506 /* Function emit_move can create pseudos -- so expand the pseudo
|
|
507 data. */
|
|
508 if (old != max_reg_num ())
|
|
509 expand_reg_data (old);
|
|
510 return;
|
|
511 }
|
|
512 lra_emit_add (x, XEXP (y, 0), XEXP (y, 1));
|
|
513 }
|
|
514
|
|
515 /* Update insn operands which are duplication of operands whose
|
|
516 numbers are in array of NOPS (with end marker -1). The insn is
|
|
517 represented by its LRA internal representation ID. */
|
|
518 void
|
|
519 lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
|
|
520 {
|
|
521 int i, j, nop;
|
|
522 struct lra_static_insn_data *static_id = id->insn_static_data;
|
|
523
|
|
524 for (i = 0; i < static_id->n_dups; i++)
|
|
525 for (j = 0; (nop = nops[j]) >= 0; j++)
|
|
526 if (static_id->dup_num[i] == nop)
|
|
527 *id->dup_loc[i] = *id->operand_loc[nop];
|
|
528 }
|
|
529
|
|
530
|
|
531
|
|
532 /* This page contains code dealing with info about registers in the
|
|
533 insns. */
|
|
534
|
|
535 /* Pools for insn reg info. */
|
|
536 object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
|
|
537
|
|
538 /* Create LRA insn related info about a reference to REGNO in INSN
|
|
539 with TYPE (in/out/inout), biggest reference mode MODE, flag that it
|
145
|
540 is reference through subreg (SUBREG_P), and reference to the next
|
111
|
541 insn reg info (NEXT). If REGNO can be early clobbered,
|
|
542 alternatives in which it can be early clobbered are given by
|
145
|
543 EARLY_CLOBBER_ALTS. */
|
111
|
544 static struct lra_insn_reg *
|
|
545 new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
|
145
|
546 machine_mode mode, bool subreg_p,
|
111
|
547 alternative_mask early_clobber_alts,
|
145
|
548 struct lra_insn_reg *next)
|
111
|
549 {
|
|
550 lra_insn_reg *ir = lra_insn_reg_pool.allocate ();
|
|
551 ir->type = type;
|
|
552 ir->biggest_mode = mode;
|
|
553 if (NONDEBUG_INSN_P (insn)
|
|
554 && partial_subreg_p (lra_reg_info[regno].biggest_mode, mode))
|
|
555 lra_reg_info[regno].biggest_mode = mode;
|
|
556 ir->subreg_p = subreg_p;
|
|
557 ir->early_clobber_alts = early_clobber_alts;
|
|
558 ir->regno = regno;
|
|
559 ir->next = next;
|
|
560 return ir;
|
|
561 }
|
|
562
|
|
563 /* Free insn reg info list IR. */
|
|
564 static void
|
|
565 free_insn_regs (struct lra_insn_reg *ir)
|
|
566 {
|
|
567 struct lra_insn_reg *next_ir;
|
|
568
|
|
569 for (; ir != NULL; ir = next_ir)
|
|
570 {
|
|
571 next_ir = ir->next;
|
|
572 lra_insn_reg_pool.remove (ir);
|
|
573 }
|
|
574 }
|
|
575
|
|
576 /* Finish pool for insn reg info. */
|
|
577 static void
|
|
578 finish_insn_regs (void)
|
|
579 {
|
|
580 lra_insn_reg_pool.release ();
|
|
581 }
|
|
582
|
|
583
|
|
584
|
|
585 /* This page contains code dealing LRA insn info (or in other words
|
|
586 LRA internal insn representation). */
|
|
587
|
|
588 /* Map INSN_CODE -> the static insn data. This info is valid during
|
|
589 all translation unit. */
|
|
590 struct lra_static_insn_data *insn_code_data[NUM_INSN_CODES];
|
|
591
|
|
592 /* Debug insns are represented as a special insn with one input
|
|
593 operand which is RTL expression in var_location. */
|
|
594
|
|
595 /* The following data are used as static insn operand data for all
|
|
596 debug insns. If structure lra_operand_data is changed, the
|
|
597 initializer should be changed too. */
|
|
598 static struct lra_operand_data debug_operand_data =
|
|
599 {
|
|
600 NULL, /* alternative */
|
|
601 0, /* early_clobber_alts */
|
|
602 E_VOIDmode, /* We are not interesting in the operand mode. */
|
|
603 OP_IN,
|
145
|
604 0, 0, 0
|
111
|
605 };
|
|
606
|
|
607 /* The following data are used as static insn data for all debug
|
131
|
608 bind insns. If structure lra_static_insn_data is changed, the
|
111
|
609 initializer should be changed too. */
|
131
|
610 static struct lra_static_insn_data debug_bind_static_data =
|
111
|
611 {
|
|
612 &debug_operand_data,
|
|
613 0, /* Duplication operands #. */
|
|
614 -1, /* Commutative operand #. */
|
|
615 1, /* Operands #. There is only one operand which is debug RTL
|
|
616 expression. */
|
|
617 0, /* Duplications #. */
|
|
618 0, /* Alternatives #. We are not interesting in alternatives
|
|
619 because we does not proceed debug_insns for reloads. */
|
|
620 NULL, /* Hard registers referenced in machine description. */
|
|
621 NULL /* Descriptions of operands in alternatives. */
|
|
622 };
|
|
623
|
131
|
624 /* The following data are used as static insn data for all debug
|
|
625 marker insns. If structure lra_static_insn_data is changed, the
|
|
626 initializer should be changed too. */
|
|
627 static struct lra_static_insn_data debug_marker_static_data =
|
|
628 {
|
|
629 &debug_operand_data,
|
|
630 0, /* Duplication operands #. */
|
|
631 -1, /* Commutative operand #. */
|
|
632 0, /* Operands #. There isn't any operand. */
|
|
633 0, /* Duplications #. */
|
|
634 0, /* Alternatives #. We are not interesting in alternatives
|
|
635 because we does not proceed debug_insns for reloads. */
|
|
636 NULL, /* Hard registers referenced in machine description. */
|
|
637 NULL /* Descriptions of operands in alternatives. */
|
|
638 };
|
|
639
|
111
|
640 /* Called once per compiler work to initialize some LRA data related
|
|
641 to insns. */
|
|
642 static void
|
|
643 init_insn_code_data_once (void)
|
|
644 {
|
|
645 memset (insn_code_data, 0, sizeof (insn_code_data));
|
|
646 }
|
|
647
|
|
648 /* Called once per compiler work to finalize some LRA data related to
|
|
649 insns. */
|
|
650 static void
|
|
651 finish_insn_code_data_once (void)
|
|
652 {
|
|
653 for (unsigned int i = 0; i < NUM_INSN_CODES; i++)
|
|
654 {
|
|
655 if (insn_code_data[i] != NULL)
|
|
656 free (insn_code_data[i]);
|
|
657 }
|
|
658 }
|
|
659
|
|
660 /* Return static insn data, allocate and setup if necessary. Although
|
|
661 dup_num is static data (it depends only on icode), to set it up we
|
|
662 need to extract insn first. So recog_data should be valid for
|
|
663 normal insn (ICODE >= 0) before the call. */
|
|
664 static struct lra_static_insn_data *
|
|
665 get_static_insn_data (int icode, int nop, int ndup, int nalt)
|
|
666 {
|
|
667 struct lra_static_insn_data *data;
|
|
668 size_t n_bytes;
|
|
669
|
|
670 lra_assert (icode < (int) NUM_INSN_CODES);
|
|
671 if (icode >= 0 && (data = insn_code_data[icode]) != NULL)
|
|
672 return data;
|
|
673 lra_assert (nop >= 0 && ndup >= 0 && nalt >= 0);
|
|
674 n_bytes = sizeof (struct lra_static_insn_data)
|
|
675 + sizeof (struct lra_operand_data) * nop
|
|
676 + sizeof (int) * ndup;
|
|
677 data = XNEWVAR (struct lra_static_insn_data, n_bytes);
|
|
678 data->operand_alternative = NULL;
|
|
679 data->n_operands = nop;
|
|
680 data->n_dups = ndup;
|
|
681 data->n_alternatives = nalt;
|
|
682 data->operand = ((struct lra_operand_data *)
|
|
683 ((char *) data + sizeof (struct lra_static_insn_data)));
|
|
684 data->dup_num = ((int *) ((char *) data->operand
|
|
685 + sizeof (struct lra_operand_data) * nop));
|
|
686 if (icode >= 0)
|
|
687 {
|
|
688 int i;
|
|
689
|
|
690 insn_code_data[icode] = data;
|
|
691 for (i = 0; i < nop; i++)
|
|
692 {
|
|
693 data->operand[i].constraint
|
|
694 = insn_data[icode].operand[i].constraint;
|
|
695 data->operand[i].mode = insn_data[icode].operand[i].mode;
|
|
696 data->operand[i].strict_low = insn_data[icode].operand[i].strict_low;
|
|
697 data->operand[i].is_operator
|
|
698 = insn_data[icode].operand[i].is_operator;
|
|
699 data->operand[i].type
|
|
700 = (data->operand[i].constraint[0] == '=' ? OP_OUT
|
|
701 : data->operand[i].constraint[0] == '+' ? OP_INOUT
|
|
702 : OP_IN);
|
|
703 data->operand[i].is_address = false;
|
|
704 }
|
|
705 for (i = 0; i < ndup; i++)
|
|
706 data->dup_num[i] = recog_data.dup_num[i];
|
|
707 }
|
|
708 return data;
|
|
709 }
|
|
710
|
|
711 /* The current length of the following array. */
|
|
712 int lra_insn_recog_data_len;
|
|
713
|
|
714 /* Map INSN_UID -> the insn recog data (NULL if unknown). */
|
|
715 lra_insn_recog_data_t *lra_insn_recog_data;
|
|
716
|
145
|
717 /* Alloc pool we allocate entries for lra_insn_recog_data from. */
|
|
718 static object_allocator<class lra_insn_recog_data>
|
|
719 lra_insn_recog_data_pool ("insn recog data pool");
|
|
720
|
111
|
721 /* Initialize LRA data about insns. */
|
|
722 static void
|
|
723 init_insn_recog_data (void)
|
|
724 {
|
|
725 lra_insn_recog_data_len = 0;
|
|
726 lra_insn_recog_data = NULL;
|
|
727 }
|
|
728
|
|
729 /* Expand, if necessary, LRA data about insns. */
|
|
730 static void
|
|
731 check_and_expand_insn_recog_data (int index)
|
|
732 {
|
|
733 int i, old;
|
|
734
|
|
735 if (lra_insn_recog_data_len > index)
|
|
736 return;
|
|
737 old = lra_insn_recog_data_len;
|
|
738 lra_insn_recog_data_len = index * 3 / 2 + 1;
|
|
739 lra_insn_recog_data = XRESIZEVEC (lra_insn_recog_data_t,
|
|
740 lra_insn_recog_data,
|
|
741 lra_insn_recog_data_len);
|
|
742 for (i = old; i < lra_insn_recog_data_len; i++)
|
|
743 lra_insn_recog_data[i] = NULL;
|
|
744 }
|
|
745
|
|
746 /* Finish LRA DATA about insn. */
|
|
747 static void
|
|
748 free_insn_recog_data (lra_insn_recog_data_t data)
|
|
749 {
|
|
750 if (data->operand_loc != NULL)
|
|
751 free (data->operand_loc);
|
|
752 if (data->dup_loc != NULL)
|
|
753 free (data->dup_loc);
|
|
754 if (data->arg_hard_regs != NULL)
|
|
755 free (data->arg_hard_regs);
|
|
756 if (data->icode < 0 && NONDEBUG_INSN_P (data->insn))
|
|
757 {
|
|
758 if (data->insn_static_data->operand_alternative != NULL)
|
|
759 free (const_cast <operand_alternative *>
|
|
760 (data->insn_static_data->operand_alternative));
|
|
761 free_insn_regs (data->insn_static_data->hard_regs);
|
|
762 free (data->insn_static_data);
|
|
763 }
|
|
764 free_insn_regs (data->regs);
|
|
765 data->regs = NULL;
|
145
|
766 lra_insn_recog_data_pool.remove (data);
|
111
|
767 }
|
|
768
|
|
769 /* Pools for copies. */
|
|
770 static object_allocator<lra_copy> lra_copy_pool ("lra copies");
|
|
771
|
|
772 /* Finish LRA data about all insns. */
|
|
773 static void
|
|
774 finish_insn_recog_data (void)
|
|
775 {
|
|
776 int i;
|
|
777 lra_insn_recog_data_t data;
|
|
778
|
|
779 for (i = 0; i < lra_insn_recog_data_len; i++)
|
|
780 if ((data = lra_insn_recog_data[i]) != NULL)
|
|
781 free_insn_recog_data (data);
|
|
782 finish_insn_regs ();
|
|
783 lra_copy_pool.release ();
|
|
784 lra_insn_reg_pool.release ();
|
145
|
785 lra_insn_recog_data_pool.release ();
|
111
|
786 free (lra_insn_recog_data);
|
|
787 }
|
|
788
|
|
789 /* Setup info about operands in alternatives of LRA DATA of insn. */
|
|
790 static void
|
|
791 setup_operand_alternative (lra_insn_recog_data_t data,
|
|
792 const operand_alternative *op_alt)
|
|
793 {
|
|
794 int i, j, nop, nalt;
|
|
795 int icode = data->icode;
|
|
796 struct lra_static_insn_data *static_data = data->insn_static_data;
|
|
797
|
|
798 static_data->commutative = -1;
|
|
799 nop = static_data->n_operands;
|
|
800 nalt = static_data->n_alternatives;
|
|
801 static_data->operand_alternative = op_alt;
|
|
802 for (i = 0; i < nop; i++)
|
|
803 {
|
|
804 static_data->operand[i].early_clobber_alts = 0;
|
|
805 static_data->operand[i].is_address = false;
|
|
806 if (static_data->operand[i].constraint[0] == '%')
|
|
807 {
|
|
808 /* We currently only support one commutative pair of operands. */
|
|
809 if (static_data->commutative < 0)
|
|
810 static_data->commutative = i;
|
|
811 else
|
|
812 lra_assert (icode < 0); /* Asm */
|
|
813 /* The last operand should not be marked commutative. */
|
|
814 lra_assert (i != nop - 1);
|
|
815 }
|
|
816 }
|
|
817 for (j = 0; j < nalt; j++)
|
|
818 for (i = 0; i < nop; i++, op_alt++)
|
|
819 {
|
|
820 if (op_alt->earlyclobber)
|
|
821 static_data->operand[i].early_clobber_alts |= (alternative_mask) 1 << j;
|
|
822 static_data->operand[i].is_address |= op_alt->is_address;
|
|
823 }
|
|
824 }
|
|
825
|
|
826 /* Recursively process X and collect info about registers, which are
|
|
827 not the insn operands, in X with TYPE (in/out/inout) and flag that
|
|
828 it is early clobbered in the insn (EARLY_CLOBBER) and add the info
|
|
829 to LIST. X is a part of insn given by DATA. Return the result
|
145
|
830 list. */
|
111
|
831 static struct lra_insn_reg *
|
131
|
832 collect_non_operand_hard_regs (rtx_insn *insn, rtx *x,
|
|
833 lra_insn_recog_data_t data,
|
111
|
834 struct lra_insn_reg *list,
|
145
|
835 enum op_type type, bool early_clobber)
|
111
|
836 {
|
|
837 int i, j, regno, last;
|
|
838 bool subreg_p;
|
|
839 machine_mode mode;
|
|
840 struct lra_insn_reg *curr;
|
|
841 rtx op = *x;
|
|
842 enum rtx_code code = GET_CODE (op);
|
|
843 const char *fmt = GET_RTX_FORMAT (code);
|
|
844
|
|
845 for (i = 0; i < data->insn_static_data->n_operands; i++)
|
|
846 if (! data->insn_static_data->operand[i].is_operator
|
|
847 && x == data->operand_loc[i])
|
|
848 /* It is an operand loc. Stop here. */
|
|
849 return list;
|
|
850 for (i = 0; i < data->insn_static_data->n_dups; i++)
|
|
851 if (x == data->dup_loc[i])
|
|
852 /* It is a dup loc. Stop here. */
|
|
853 return list;
|
|
854 mode = GET_MODE (op);
|
|
855 subreg_p = false;
|
|
856 if (code == SUBREG)
|
|
857 {
|
|
858 mode = wider_subreg_mode (op);
|
|
859 if (read_modify_subreg_p (op))
|
|
860 subreg_p = true;
|
|
861 op = SUBREG_REG (op);
|
|
862 code = GET_CODE (op);
|
|
863 }
|
|
864 if (REG_P (op))
|
|
865 {
|
|
866 if ((regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER)
|
|
867 return list;
|
|
868 /* Process all regs even unallocatable ones as we need info
|
|
869 about all regs for rematerialization pass. */
|
|
870 for (last = end_hard_regno (mode, regno); regno < last; regno++)
|
|
871 {
|
|
872 for (curr = list; curr != NULL; curr = curr->next)
|
|
873 if (curr->regno == regno && curr->subreg_p == subreg_p
|
|
874 && curr->biggest_mode == mode)
|
|
875 {
|
|
876 if (curr->type != type)
|
|
877 curr->type = OP_INOUT;
|
|
878 if (early_clobber)
|
145
|
879 curr->early_clobber_alts = ALL_ALTERNATIVES;
|
111
|
880 break;
|
|
881 }
|
|
882 if (curr == NULL)
|
|
883 {
|
145
|
884 /* This is a new hard regno or the info cannot be
|
111
|
885 integrated into the found structure. */
|
|
886 #ifdef STACK_REGS
|
|
887 early_clobber
|
|
888 = (early_clobber
|
|
889 /* This clobber is to inform popping floating
|
|
890 point stack only. */
|
|
891 && ! (FIRST_STACK_REG <= regno
|
|
892 && regno <= LAST_STACK_REG));
|
|
893 #endif
|
|
894 list = new_insn_reg (data->insn, regno, type, mode, subreg_p,
|
145
|
895 early_clobber ? ALL_ALTERNATIVES : 0, list);
|
111
|
896 }
|
|
897 }
|
|
898 return list;
|
|
899 }
|
|
900 switch (code)
|
|
901 {
|
|
902 case SET:
|
131
|
903 list = collect_non_operand_hard_regs (insn, &SET_DEST (op), data,
|
145
|
904 list, OP_OUT, false);
|
131
|
905 list = collect_non_operand_hard_regs (insn, &SET_SRC (op), data,
|
145
|
906 list, OP_IN, false);
|
111
|
907 break;
|
|
908 case CLOBBER:
|
131
|
909 /* We treat clobber of non-operand hard registers as early clobber. */
|
|
910 list = collect_non_operand_hard_regs (insn, &XEXP (op, 0), data,
|
145
|
911 list, OP_OUT, true);
|
111
|
912 break;
|
|
913 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
|
131
|
914 list = collect_non_operand_hard_regs (insn, &XEXP (op, 0), data,
|
145
|
915 list, OP_INOUT, false);
|
111
|
916 break;
|
|
917 case PRE_MODIFY: case POST_MODIFY:
|
131
|
918 list = collect_non_operand_hard_regs (insn, &XEXP (op, 0), data,
|
145
|
919 list, OP_INOUT, false);
|
131
|
920 list = collect_non_operand_hard_regs (insn, &XEXP (op, 1), data,
|
145
|
921 list, OP_IN, false);
|
111
|
922 break;
|
|
923 default:
|
|
924 fmt = GET_RTX_FORMAT (code);
|
|
925 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
926 {
|
|
927 if (fmt[i] == 'e')
|
131
|
928 list = collect_non_operand_hard_regs (insn, &XEXP (op, i), data,
|
145
|
929 list, OP_IN, false);
|
111
|
930 else if (fmt[i] == 'E')
|
|
931 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
|
131
|
932 list = collect_non_operand_hard_regs (insn, &XVECEXP (op, i, j),
|
145
|
933 data, list, OP_IN, false);
|
111
|
934 }
|
|
935 }
|
|
936 return list;
|
|
937 }
|
|
938
|
|
939 /* Set up and return info about INSN. Set up the info if it is not set up
|
|
940 yet. */
|
|
941 lra_insn_recog_data_t
|
|
942 lra_set_insn_recog_data (rtx_insn *insn)
|
|
943 {
|
|
944 lra_insn_recog_data_t data;
|
|
945 int i, n, icode;
|
|
946 rtx **locs;
|
|
947 unsigned int uid = INSN_UID (insn);
|
|
948 struct lra_static_insn_data *insn_static_data;
|
|
949
|
|
950 check_and_expand_insn_recog_data (uid);
|
|
951 if (DEBUG_INSN_P (insn))
|
|
952 icode = -1;
|
|
953 else
|
|
954 {
|
|
955 icode = INSN_CODE (insn);
|
|
956 if (icode < 0)
|
|
957 /* It might be a new simple insn which is not recognized yet. */
|
|
958 INSN_CODE (insn) = icode = recog_memoized (insn);
|
|
959 }
|
145
|
960 data = lra_insn_recog_data_pool.allocate ();
|
111
|
961 lra_insn_recog_data[uid] = data;
|
|
962 data->insn = insn;
|
131
|
963 data->used_insn_alternative = LRA_UNKNOWN_ALT;
|
111
|
964 data->icode = icode;
|
|
965 data->regs = NULL;
|
|
966 if (DEBUG_INSN_P (insn))
|
|
967 {
|
|
968 data->dup_loc = NULL;
|
|
969 data->arg_hard_regs = NULL;
|
|
970 data->preferred_alternatives = ALL_ALTERNATIVES;
|
131
|
971 if (DEBUG_BIND_INSN_P (insn))
|
|
972 {
|
|
973 data->insn_static_data = &debug_bind_static_data;
|
|
974 data->operand_loc = XNEWVEC (rtx *, 1);
|
|
975 data->operand_loc[0] = &INSN_VAR_LOCATION_LOC (insn);
|
|
976 }
|
|
977 else if (DEBUG_MARKER_INSN_P (insn))
|
|
978 {
|
|
979 data->insn_static_data = &debug_marker_static_data;
|
|
980 data->operand_loc = NULL;
|
|
981 }
|
111
|
982 return data;
|
|
983 }
|
|
984 if (icode < 0)
|
|
985 {
|
|
986 int nop, nalt;
|
|
987 machine_mode operand_mode[MAX_RECOG_OPERANDS];
|
|
988 const char *constraints[MAX_RECOG_OPERANDS];
|
|
989
|
|
990 nop = asm_noperands (PATTERN (insn));
|
|
991 data->operand_loc = data->dup_loc = NULL;
|
|
992 nalt = 1;
|
|
993 if (nop < 0)
|
|
994 {
|
|
995 /* It is a special insn like USE or CLOBBER. We should
|
|
996 recognize any regular insn otherwise LRA can do nothing
|
|
997 with this insn. */
|
|
998 gcc_assert (GET_CODE (PATTERN (insn)) == USE
|
|
999 || GET_CODE (PATTERN (insn)) == CLOBBER
|
|
1000 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
|
|
1001 data->insn_static_data = insn_static_data
|
|
1002 = get_static_insn_data (-1, 0, 0, nalt);
|
|
1003 }
|
|
1004 else
|
|
1005 {
|
|
1006 /* expand_asm_operands makes sure there aren't too many
|
|
1007 operands. */
|
|
1008 lra_assert (nop <= MAX_RECOG_OPERANDS);
|
|
1009 if (nop != 0)
|
|
1010 data->operand_loc = XNEWVEC (rtx *, nop);
|
|
1011 /* Now get the operand values and constraints out of the
|
|
1012 insn. */
|
|
1013 decode_asm_operands (PATTERN (insn), NULL,
|
|
1014 data->operand_loc,
|
|
1015 constraints, operand_mode, NULL);
|
|
1016 if (nop > 0)
|
145
|
1017 for (const char *p =constraints[0]; *p; p++)
|
|
1018 nalt += *p == ',';
|
111
|
1019 data->insn_static_data = insn_static_data
|
|
1020 = get_static_insn_data (-1, nop, 0, nalt);
|
|
1021 for (i = 0; i < nop; i++)
|
|
1022 {
|
|
1023 insn_static_data->operand[i].mode = operand_mode[i];
|
|
1024 insn_static_data->operand[i].constraint = constraints[i];
|
|
1025 insn_static_data->operand[i].strict_low = false;
|
|
1026 insn_static_data->operand[i].is_operator = false;
|
|
1027 insn_static_data->operand[i].is_address = false;
|
|
1028 }
|
|
1029 }
|
|
1030 for (i = 0; i < insn_static_data->n_operands; i++)
|
|
1031 insn_static_data->operand[i].type
|
|
1032 = (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
|
|
1033 : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
|
|
1034 : OP_IN);
|
|
1035 data->preferred_alternatives = ALL_ALTERNATIVES;
|
|
1036 if (nop > 0)
|
|
1037 {
|
|
1038 operand_alternative *op_alt = XCNEWVEC (operand_alternative,
|
|
1039 nalt * nop);
|
131
|
1040 preprocess_constraints (nop, nalt, constraints, op_alt,
|
|
1041 data->operand_loc);
|
111
|
1042 setup_operand_alternative (data, op_alt);
|
|
1043 }
|
|
1044 }
|
|
1045 else
|
|
1046 {
|
|
1047 insn_extract (insn);
|
|
1048 data->insn_static_data = insn_static_data
|
|
1049 = get_static_insn_data (icode, insn_data[icode].n_operands,
|
|
1050 insn_data[icode].n_dups,
|
|
1051 insn_data[icode].n_alternatives);
|
|
1052 n = insn_static_data->n_operands;
|
|
1053 if (n == 0)
|
|
1054 locs = NULL;
|
|
1055 else
|
|
1056 {
|
|
1057 locs = XNEWVEC (rtx *, n);
|
|
1058 memcpy (locs, recog_data.operand_loc, n * sizeof (rtx *));
|
|
1059 }
|
|
1060 data->operand_loc = locs;
|
|
1061 n = insn_static_data->n_dups;
|
|
1062 if (n == 0)
|
|
1063 locs = NULL;
|
|
1064 else
|
|
1065 {
|
|
1066 locs = XNEWVEC (rtx *, n);
|
|
1067 memcpy (locs, recog_data.dup_loc, n * sizeof (rtx *));
|
|
1068 }
|
|
1069 data->dup_loc = locs;
|
|
1070 data->preferred_alternatives = get_preferred_alternatives (insn);
|
|
1071 const operand_alternative *op_alt = preprocess_insn_constraints (icode);
|
|
1072 if (!insn_static_data->operand_alternative)
|
|
1073 setup_operand_alternative (data, op_alt);
|
|
1074 else if (op_alt != insn_static_data->operand_alternative)
|
|
1075 insn_static_data->operand_alternative = op_alt;
|
|
1076 }
|
|
1077 if (GET_CODE (PATTERN (insn)) == CLOBBER || GET_CODE (PATTERN (insn)) == USE)
|
|
1078 insn_static_data->hard_regs = NULL;
|
|
1079 else
|
|
1080 insn_static_data->hard_regs
|
131
|
1081 = collect_non_operand_hard_regs (insn, &PATTERN (insn), data,
|
145
|
1082 NULL, OP_IN, false);
|
111
|
1083 data->arg_hard_regs = NULL;
|
|
1084 if (CALL_P (insn))
|
|
1085 {
|
|
1086 bool use_p;
|
|
1087 rtx link;
|
|
1088 int n_hard_regs, regno, arg_hard_regs[FIRST_PSEUDO_REGISTER];
|
|
1089
|
|
1090 n_hard_regs = 0;
|
|
1091 /* Finding implicit hard register usage. We believe it will be
|
|
1092 not changed whatever transformations are used. Call insns
|
|
1093 are such example. */
|
|
1094 for (link = CALL_INSN_FUNCTION_USAGE (insn);
|
|
1095 link != NULL_RTX;
|
|
1096 link = XEXP (link, 1))
|
|
1097 if (((use_p = GET_CODE (XEXP (link, 0)) == USE)
|
|
1098 || GET_CODE (XEXP (link, 0)) == CLOBBER)
|
|
1099 && REG_P (XEXP (XEXP (link, 0), 0)))
|
|
1100 {
|
|
1101 regno = REGNO (XEXP (XEXP (link, 0), 0));
|
|
1102 lra_assert (regno < FIRST_PSEUDO_REGISTER);
|
|
1103 /* It is an argument register. */
|
|
1104 for (i = REG_NREGS (XEXP (XEXP (link, 0), 0)) - 1; i >= 0; i--)
|
|
1105 arg_hard_regs[n_hard_regs++]
|
|
1106 = regno + i + (use_p ? 0 : FIRST_PSEUDO_REGISTER);
|
|
1107 }
|
131
|
1108
|
111
|
1109 if (n_hard_regs != 0)
|
|
1110 {
|
|
1111 arg_hard_regs[n_hard_regs++] = -1;
|
|
1112 data->arg_hard_regs = XNEWVEC (int, n_hard_regs);
|
|
1113 memcpy (data->arg_hard_regs, arg_hard_regs,
|
|
1114 sizeof (int) * n_hard_regs);
|
|
1115 }
|
|
1116 }
|
|
1117 /* Some output operand can be recognized only from the context not
|
|
1118 from the constraints which are empty in this case. Call insn may
|
|
1119 contain a hard register in set destination with empty constraint
|
|
1120 and extract_insn treats them as an input. */
|
|
1121 for (i = 0; i < insn_static_data->n_operands; i++)
|
|
1122 {
|
|
1123 int j;
|
|
1124 rtx pat, set;
|
|
1125 struct lra_operand_data *operand = &insn_static_data->operand[i];
|
|
1126
|
|
1127 /* ??? Should we treat 'X' the same way. It looks to me that
|
|
1128 'X' means anything and empty constraint means we do not
|
|
1129 care. */
|
|
1130 if (operand->type != OP_IN || *operand->constraint != '\0'
|
|
1131 || operand->is_operator)
|
|
1132 continue;
|
|
1133 pat = PATTERN (insn);
|
|
1134 if (GET_CODE (pat) == SET)
|
|
1135 {
|
|
1136 if (data->operand_loc[i] != &SET_DEST (pat))
|
|
1137 continue;
|
|
1138 }
|
|
1139 else if (GET_CODE (pat) == PARALLEL)
|
|
1140 {
|
|
1141 for (j = XVECLEN (pat, 0) - 1; j >= 0; j--)
|
|
1142 {
|
|
1143 set = XVECEXP (PATTERN (insn), 0, j);
|
|
1144 if (GET_CODE (set) == SET
|
|
1145 && &SET_DEST (set) == data->operand_loc[i])
|
|
1146 break;
|
|
1147 }
|
|
1148 if (j < 0)
|
|
1149 continue;
|
|
1150 }
|
|
1151 else
|
|
1152 continue;
|
|
1153 operand->type = OP_OUT;
|
|
1154 }
|
|
1155 return data;
|
|
1156 }
|
|
1157
|
|
1158 /* Return info about insn give by UID. The info should be already set
|
|
1159 up. */
|
|
1160 static lra_insn_recog_data_t
|
|
1161 get_insn_recog_data_by_uid (int uid)
|
|
1162 {
|
|
1163 lra_insn_recog_data_t data;
|
|
1164
|
|
1165 data = lra_insn_recog_data[uid];
|
|
1166 lra_assert (data != NULL);
|
|
1167 return data;
|
|
1168 }
|
|
1169
|
|
1170 /* Invalidate all info about insn given by its UID. */
|
|
1171 static void
|
|
1172 invalidate_insn_recog_data (int uid)
|
|
1173 {
|
|
1174 lra_insn_recog_data_t data;
|
|
1175
|
|
1176 data = lra_insn_recog_data[uid];
|
|
1177 lra_assert (data != NULL);
|
|
1178 free_insn_recog_data (data);
|
|
1179 lra_insn_recog_data[uid] = NULL;
|
|
1180 }
|
|
1181
|
|
1182 /* Update all the insn info about INSN. It is usually called when
|
|
1183 something in the insn was changed. Return the updated info. */
|
|
1184 lra_insn_recog_data_t
|
|
1185 lra_update_insn_recog_data (rtx_insn *insn)
|
|
1186 {
|
|
1187 lra_insn_recog_data_t data;
|
|
1188 int n;
|
|
1189 unsigned int uid = INSN_UID (insn);
|
|
1190 struct lra_static_insn_data *insn_static_data;
|
131
|
1191 poly_int64 sp_offset = 0;
|
111
|
1192
|
|
1193 check_and_expand_insn_recog_data (uid);
|
|
1194 if ((data = lra_insn_recog_data[uid]) != NULL
|
|
1195 && data->icode != INSN_CODE (insn))
|
|
1196 {
|
|
1197 sp_offset = data->sp_offset;
|
|
1198 invalidate_insn_data_regno_info (data, insn, get_insn_freq (insn));
|
|
1199 invalidate_insn_recog_data (uid);
|
|
1200 data = NULL;
|
|
1201 }
|
|
1202 if (data == NULL)
|
|
1203 {
|
|
1204 data = lra_get_insn_recog_data (insn);
|
|
1205 /* Initiate or restore SP offset. */
|
|
1206 data->sp_offset = sp_offset;
|
|
1207 return data;
|
|
1208 }
|
|
1209 insn_static_data = data->insn_static_data;
|
131
|
1210 data->used_insn_alternative = LRA_UNKNOWN_ALT;
|
111
|
1211 if (DEBUG_INSN_P (insn))
|
|
1212 return data;
|
|
1213 if (data->icode < 0)
|
|
1214 {
|
|
1215 int nop;
|
|
1216 machine_mode operand_mode[MAX_RECOG_OPERANDS];
|
|
1217 const char *constraints[MAX_RECOG_OPERANDS];
|
|
1218
|
|
1219 nop = asm_noperands (PATTERN (insn));
|
|
1220 if (nop >= 0)
|
|
1221 {
|
|
1222 lra_assert (nop == data->insn_static_data->n_operands);
|
|
1223 /* Now get the operand values and constraints out of the
|
|
1224 insn. */
|
|
1225 decode_asm_operands (PATTERN (insn), NULL,
|
|
1226 data->operand_loc,
|
|
1227 constraints, operand_mode, NULL);
|
|
1228
|
|
1229 if (flag_checking)
|
|
1230 for (int i = 0; i < nop; i++)
|
|
1231 lra_assert
|
|
1232 (insn_static_data->operand[i].mode == operand_mode[i]
|
|
1233 && insn_static_data->operand[i].constraint == constraints[i]
|
|
1234 && ! insn_static_data->operand[i].is_operator);
|
|
1235 }
|
|
1236
|
|
1237 if (flag_checking)
|
|
1238 for (int i = 0; i < insn_static_data->n_operands; i++)
|
|
1239 lra_assert
|
|
1240 (insn_static_data->operand[i].type
|
|
1241 == (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
|
|
1242 : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
|
|
1243 : OP_IN));
|
|
1244 }
|
|
1245 else
|
|
1246 {
|
|
1247 insn_extract (insn);
|
|
1248 n = insn_static_data->n_operands;
|
|
1249 if (n != 0)
|
|
1250 memcpy (data->operand_loc, recog_data.operand_loc, n * sizeof (rtx *));
|
|
1251 n = insn_static_data->n_dups;
|
|
1252 if (n != 0)
|
|
1253 memcpy (data->dup_loc, recog_data.dup_loc, n * sizeof (rtx *));
|
|
1254 lra_assert (check_bool_attrs (insn));
|
|
1255 }
|
|
1256 return data;
|
|
1257 }
|
|
1258
|
|
1259 /* Set up that INSN is using alternative ALT now. */
|
|
1260 void
|
|
1261 lra_set_used_insn_alternative (rtx_insn *insn, int alt)
|
|
1262 {
|
|
1263 lra_insn_recog_data_t data;
|
|
1264
|
|
1265 data = lra_get_insn_recog_data (insn);
|
|
1266 data->used_insn_alternative = alt;
|
|
1267 }
|
|
1268
|
|
1269 /* Set up that insn with UID is using alternative ALT now. The insn
|
|
1270 info should be already set up. */
|
|
1271 void
|
|
1272 lra_set_used_insn_alternative_by_uid (int uid, int alt)
|
|
1273 {
|
|
1274 lra_insn_recog_data_t data;
|
|
1275
|
|
1276 check_and_expand_insn_recog_data (uid);
|
|
1277 data = lra_insn_recog_data[uid];
|
|
1278 lra_assert (data != NULL);
|
|
1279 data->used_insn_alternative = alt;
|
|
1280 }
|
|
1281
|
|
1282
|
|
1283
|
|
1284 /* This page contains code dealing with common register info and
|
|
1285 pseudo copies. */
|
|
1286
|
|
1287 /* The size of the following array. */
|
|
1288 static int reg_info_size;
|
|
1289 /* Common info about each register. */
|
145
|
1290 class lra_reg *lra_reg_info;
|
111
|
1291
|
131
|
1292 HARD_REG_SET hard_regs_spilled_into;
|
|
1293
|
111
|
1294 /* Last register value. */
|
|
1295 static int last_reg_value;
|
|
1296
|
|
1297 /* Return new register value. */
|
|
1298 static int
|
|
1299 get_new_reg_value (void)
|
|
1300 {
|
|
1301 return ++last_reg_value;
|
|
1302 }
|
|
1303
|
|
1304 /* Vec referring to pseudo copies. */
|
|
1305 static vec<lra_copy_t> copy_vec;
|
|
1306
|
|
1307 /* Initialize I-th element of lra_reg_info. */
|
|
1308 static inline void
|
|
1309 initialize_lra_reg_info_element (int i)
|
|
1310 {
|
|
1311 bitmap_initialize (&lra_reg_info[i].insn_bitmap, ®_obstack);
|
|
1312 #ifdef STACK_REGS
|
|
1313 lra_reg_info[i].no_stack_p = false;
|
|
1314 #endif
|
|
1315 CLEAR_HARD_REG_SET (lra_reg_info[i].conflict_hard_regs);
|
|
1316 lra_reg_info[i].preferred_hard_regno1 = -1;
|
|
1317 lra_reg_info[i].preferred_hard_regno2 = -1;
|
|
1318 lra_reg_info[i].preferred_hard_regno_profit1 = 0;
|
|
1319 lra_reg_info[i].preferred_hard_regno_profit2 = 0;
|
|
1320 lra_reg_info[i].biggest_mode = VOIDmode;
|
|
1321 lra_reg_info[i].live_ranges = NULL;
|
|
1322 lra_reg_info[i].nrefs = lra_reg_info[i].freq = 0;
|
|
1323 lra_reg_info[i].last_reload = 0;
|
|
1324 lra_reg_info[i].restore_rtx = NULL_RTX;
|
|
1325 lra_reg_info[i].val = get_new_reg_value ();
|
|
1326 lra_reg_info[i].offset = 0;
|
|
1327 lra_reg_info[i].copies = NULL;
|
|
1328 }
|
|
1329
|
|
1330 /* Initialize common reg info and copies. */
|
|
1331 static void
|
|
1332 init_reg_info (void)
|
|
1333 {
|
|
1334 int i;
|
|
1335
|
|
1336 last_reg_value = 0;
|
|
1337 reg_info_size = max_reg_num () * 3 / 2 + 1;
|
145
|
1338 lra_reg_info = XNEWVEC (class lra_reg, reg_info_size);
|
111
|
1339 for (i = 0; i < reg_info_size; i++)
|
|
1340 initialize_lra_reg_info_element (i);
|
|
1341 copy_vec.truncate (0);
|
131
|
1342 CLEAR_HARD_REG_SET (hard_regs_spilled_into);
|
111
|
1343 }
|
|
1344
|
|
1345
|
|
1346 /* Finish common reg info and copies. */
|
|
1347 static void
|
|
1348 finish_reg_info (void)
|
|
1349 {
|
|
1350 int i;
|
|
1351
|
|
1352 for (i = 0; i < reg_info_size; i++)
|
|
1353 bitmap_clear (&lra_reg_info[i].insn_bitmap);
|
|
1354 free (lra_reg_info);
|
|
1355 reg_info_size = 0;
|
|
1356 }
|
|
1357
|
|
1358 /* Expand common reg info if it is necessary. */
|
|
1359 static void
|
|
1360 expand_reg_info (void)
|
|
1361 {
|
|
1362 int i, old = reg_info_size;
|
|
1363
|
|
1364 if (reg_info_size > max_reg_num ())
|
|
1365 return;
|
|
1366 reg_info_size = max_reg_num () * 3 / 2 + 1;
|
145
|
1367 lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size);
|
111
|
1368 for (i = old; i < reg_info_size; i++)
|
|
1369 initialize_lra_reg_info_element (i);
|
|
1370 }
|
|
1371
|
|
1372 /* Free all copies. */
|
|
1373 void
|
|
1374 lra_free_copies (void)
|
|
1375 {
|
|
1376 lra_copy_t cp;
|
|
1377
|
|
1378 while (copy_vec.length () != 0)
|
|
1379 {
|
|
1380 cp = copy_vec.pop ();
|
|
1381 lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
|
|
1382 lra_copy_pool.remove (cp);
|
|
1383 }
|
|
1384 }
|
|
1385
|
|
1386 /* Create copy of two pseudos REGNO1 and REGNO2. The copy execution
|
|
1387 frequency is FREQ. */
|
|
1388 void
|
|
1389 lra_create_copy (int regno1, int regno2, int freq)
|
|
1390 {
|
|
1391 bool regno1_dest_p;
|
|
1392 lra_copy_t cp;
|
|
1393
|
|
1394 lra_assert (regno1 != regno2);
|
|
1395 regno1_dest_p = true;
|
|
1396 if (regno1 > regno2)
|
|
1397 {
|
|
1398 std::swap (regno1, regno2);
|
|
1399 regno1_dest_p = false;
|
|
1400 }
|
|
1401 cp = lra_copy_pool.allocate ();
|
|
1402 copy_vec.safe_push (cp);
|
|
1403 cp->regno1_dest_p = regno1_dest_p;
|
|
1404 cp->freq = freq;
|
|
1405 cp->regno1 = regno1;
|
|
1406 cp->regno2 = regno2;
|
|
1407 cp->regno1_next = lra_reg_info[regno1].copies;
|
|
1408 lra_reg_info[regno1].copies = cp;
|
|
1409 cp->regno2_next = lra_reg_info[regno2].copies;
|
|
1410 lra_reg_info[regno2].copies = cp;
|
|
1411 if (lra_dump_file != NULL)
|
|
1412 fprintf (lra_dump_file, " Creating copy r%d%sr%d@%d\n",
|
|
1413 regno1, regno1_dest_p ? "<-" : "->", regno2, freq);
|
|
1414 }
|
|
1415
|
|
1416 /* Return N-th (0, 1, ...) copy. If there is no copy, return
|
|
1417 NULL. */
|
|
1418 lra_copy_t
|
|
1419 lra_get_copy (int n)
|
|
1420 {
|
|
1421 if (n >= (int) copy_vec.length ())
|
|
1422 return NULL;
|
|
1423 return copy_vec[n];
|
|
1424 }
|
|
1425
|
|
1426
|
|
1427
|
|
1428 /* This page contains code dealing with info about registers in
|
|
1429 insns. */
|
|
1430
|
145
|
1431 /* Process X of INSN recursively and add info (operand type is given
|
|
1432 by TYPE) about registers in X to the insn DATA. If X can be early
|
|
1433 clobbered, alternatives in which it can be early clobbered are given
|
|
1434 by EARLY_CLOBBER_ALTS. */
|
111
|
1435 static void
|
131
|
1436 add_regs_to_insn_regno_info (lra_insn_recog_data_t data, rtx x,
|
145
|
1437 rtx_insn *insn, enum op_type type,
|
111
|
1438 alternative_mask early_clobber_alts)
|
|
1439 {
|
|
1440 int i, j, regno;
|
|
1441 bool subreg_p;
|
|
1442 machine_mode mode;
|
|
1443 const char *fmt;
|
|
1444 enum rtx_code code;
|
|
1445 struct lra_insn_reg *curr;
|
|
1446
|
|
1447 code = GET_CODE (x);
|
|
1448 mode = GET_MODE (x);
|
|
1449 subreg_p = false;
|
|
1450 if (GET_CODE (x) == SUBREG)
|
|
1451 {
|
|
1452 mode = wider_subreg_mode (x);
|
|
1453 if (read_modify_subreg_p (x))
|
|
1454 subreg_p = true;
|
|
1455 x = SUBREG_REG (x);
|
|
1456 code = GET_CODE (x);
|
|
1457 }
|
|
1458 if (REG_P (x))
|
|
1459 {
|
|
1460 regno = REGNO (x);
|
|
1461 /* Process all regs even unallocatable ones as we need info about
|
|
1462 all regs for rematerialization pass. */
|
|
1463 expand_reg_info ();
|
131
|
1464 if (bitmap_set_bit (&lra_reg_info[regno].insn_bitmap, INSN_UID (insn)))
|
111
|
1465 {
|
|
1466 data->regs = new_insn_reg (data->insn, regno, type, mode, subreg_p,
|
145
|
1467 early_clobber_alts, data->regs);
|
111
|
1468 return;
|
|
1469 }
|
|
1470 else
|
|
1471 {
|
|
1472 for (curr = data->regs; curr != NULL; curr = curr->next)
|
|
1473 if (curr->regno == regno)
|
|
1474 {
|
|
1475 if (curr->subreg_p != subreg_p || curr->biggest_mode != mode)
|
145
|
1476 /* The info cannot be integrated into the found
|
111
|
1477 structure. */
|
|
1478 data->regs = new_insn_reg (data->insn, regno, type, mode,
|
145
|
1479 subreg_p, early_clobber_alts,
|
|
1480 data->regs);
|
111
|
1481 else
|
|
1482 {
|
|
1483 if (curr->type != type)
|
|
1484 curr->type = OP_INOUT;
|
|
1485 curr->early_clobber_alts |= early_clobber_alts;
|
|
1486 }
|
|
1487 return;
|
|
1488 }
|
|
1489 gcc_unreachable ();
|
|
1490 }
|
|
1491 }
|
|
1492
|
|
1493 switch (code)
|
|
1494 {
|
|
1495 case SET:
|
145
|
1496 add_regs_to_insn_regno_info (data, SET_DEST (x), insn, OP_OUT, 0);
|
|
1497 add_regs_to_insn_regno_info (data, SET_SRC (x), insn, OP_IN, 0);
|
111
|
1498 break;
|
|
1499 case CLOBBER:
|
|
1500 /* We treat clobber of non-operand hard registers as early
|
131
|
1501 clobber. */
|
|
1502 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, OP_OUT,
|
145
|
1503 ALL_ALTERNATIVES);
|
111
|
1504 break;
|
|
1505 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
|
145
|
1506 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, OP_INOUT, 0);
|
111
|
1507 break;
|
|
1508 case PRE_MODIFY: case POST_MODIFY:
|
145
|
1509 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, OP_INOUT, 0);
|
|
1510 add_regs_to_insn_regno_info (data, XEXP (x, 1), insn, OP_IN, 0);
|
111
|
1511 break;
|
|
1512 default:
|
|
1513 if ((code != PARALLEL && code != EXPR_LIST) || type != OP_OUT)
|
|
1514 /* Some targets place small structures in registers for return
|
|
1515 values of functions, and those registers are wrapped in
|
|
1516 PARALLEL that we may see as the destination of a SET. Here
|
|
1517 is an example:
|
|
1518
|
|
1519 (call_insn 13 12 14 2 (set (parallel:BLK [
|
|
1520 (expr_list:REG_DEP_TRUE (reg:DI 0 ax)
|
|
1521 (const_int 0 [0]))
|
|
1522 (expr_list:REG_DEP_TRUE (reg:DI 1 dx)
|
|
1523 (const_int 8 [0x8]))
|
|
1524 ])
|
|
1525 (call (mem:QI (symbol_ref:DI (... */
|
|
1526 type = OP_IN;
|
|
1527 fmt = GET_RTX_FORMAT (code);
|
|
1528 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
1529 {
|
|
1530 if (fmt[i] == 'e')
|
145
|
1531 add_regs_to_insn_regno_info (data, XEXP (x, i), insn, type, 0);
|
111
|
1532 else if (fmt[i] == 'E')
|
|
1533 {
|
|
1534 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
|
131
|
1535 add_regs_to_insn_regno_info (data, XVECEXP (x, i, j), insn,
|
145
|
1536 type, 0);
|
111
|
1537 }
|
|
1538 }
|
|
1539 }
|
|
1540 }
|
|
1541
|
|
1542 /* Return execution frequency of INSN. */
|
|
1543 static int
|
|
1544 get_insn_freq (rtx_insn *insn)
|
|
1545 {
|
|
1546 basic_block bb = BLOCK_FOR_INSN (insn);
|
|
1547
|
|
1548 gcc_checking_assert (bb != NULL);
|
|
1549 return REG_FREQ_FROM_BB (bb);
|
|
1550 }
|
|
1551
|
|
1552 /* Invalidate all reg info of INSN with DATA and execution frequency
|
|
1553 FREQ. Update common info about the invalidated registers. */
|
|
1554 static void
|
|
1555 invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn,
|
|
1556 int freq)
|
|
1557 {
|
|
1558 int uid;
|
|
1559 bool debug_p;
|
|
1560 unsigned int i;
|
|
1561 struct lra_insn_reg *ir, *next_ir;
|
|
1562
|
|
1563 uid = INSN_UID (insn);
|
|
1564 debug_p = DEBUG_INSN_P (insn);
|
|
1565 for (ir = data->regs; ir != NULL; ir = next_ir)
|
|
1566 {
|
|
1567 i = ir->regno;
|
|
1568 next_ir = ir->next;
|
|
1569 lra_insn_reg_pool.remove (ir);
|
|
1570 bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
|
|
1571 if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
|
|
1572 {
|
|
1573 lra_reg_info[i].nrefs--;
|
|
1574 lra_reg_info[i].freq -= freq;
|
|
1575 lra_assert (lra_reg_info[i].nrefs >= 0 && lra_reg_info[i].freq >= 0);
|
|
1576 }
|
|
1577 }
|
|
1578 data->regs = NULL;
|
|
1579 }
|
|
1580
|
|
1581 /* Invalidate all reg info of INSN. Update common info about the
|
|
1582 invalidated registers. */
|
|
1583 void
|
|
1584 lra_invalidate_insn_regno_info (rtx_insn *insn)
|
|
1585 {
|
|
1586 invalidate_insn_data_regno_info (lra_get_insn_recog_data (insn), insn,
|
|
1587 get_insn_freq (insn));
|
|
1588 }
|
|
1589
|
|
1590 /* Update common reg info from reg info of insn given by its DATA and
|
|
1591 execution frequency FREQ. */
|
|
1592 static void
|
|
1593 setup_insn_reg_info (lra_insn_recog_data_t data, int freq)
|
|
1594 {
|
|
1595 unsigned int i;
|
|
1596 struct lra_insn_reg *ir;
|
|
1597
|
|
1598 for (ir = data->regs; ir != NULL; ir = ir->next)
|
|
1599 if ((i = ir->regno) >= FIRST_PSEUDO_REGISTER)
|
|
1600 {
|
|
1601 lra_reg_info[i].nrefs++;
|
|
1602 lra_reg_info[i].freq += freq;
|
|
1603 }
|
|
1604 }
|
|
1605
|
|
1606 /* Set up insn reg info of INSN. Update common reg info from reg info
|
|
1607 of INSN. */
|
|
1608 void
|
|
1609 lra_update_insn_regno_info (rtx_insn *insn)
|
|
1610 {
|
131
|
1611 int i, freq;
|
111
|
1612 lra_insn_recog_data_t data;
|
|
1613 struct lra_static_insn_data *static_data;
|
|
1614 enum rtx_code code;
|
|
1615 rtx link;
|
|
1616
|
|
1617 if (! INSN_P (insn))
|
|
1618 return;
|
|
1619 data = lra_get_insn_recog_data (insn);
|
|
1620 static_data = data->insn_static_data;
|
131
|
1621 freq = NONDEBUG_INSN_P (insn) ? get_insn_freq (insn) : 0;
|
111
|
1622 invalidate_insn_data_regno_info (data, insn, freq);
|
|
1623 for (i = static_data->n_operands - 1; i >= 0; i--)
|
131
|
1624 add_regs_to_insn_regno_info (data, *data->operand_loc[i], insn,
|
111
|
1625 static_data->operand[i].type,
|
|
1626 static_data->operand[i].early_clobber_alts);
|
|
1627 if ((code = GET_CODE (PATTERN (insn))) == CLOBBER || code == USE)
|
131
|
1628 add_regs_to_insn_regno_info (data, XEXP (PATTERN (insn), 0), insn,
|
145
|
1629 code == USE ? OP_IN : OP_OUT, 0);
|
111
|
1630 if (CALL_P (insn))
|
|
1631 /* On some targets call insns can refer to pseudos in memory in
|
|
1632 CALL_INSN_FUNCTION_USAGE list. Process them in order to
|
|
1633 consider their occurrences in calls for different
|
|
1634 transformations (e.g. inheritance) with given pseudos. */
|
|
1635 for (link = CALL_INSN_FUNCTION_USAGE (insn);
|
|
1636 link != NULL_RTX;
|
|
1637 link = XEXP (link, 1))
|
131
|
1638 {
|
|
1639 code = GET_CODE (XEXP (link, 0));
|
|
1640 if ((code == USE || code == CLOBBER)
|
|
1641 && MEM_P (XEXP (XEXP (link, 0), 0)))
|
|
1642 add_regs_to_insn_regno_info (data, XEXP (XEXP (link, 0), 0), insn,
|
145
|
1643 code == USE ? OP_IN : OP_OUT, 0);
|
131
|
1644 }
|
111
|
1645 if (NONDEBUG_INSN_P (insn))
|
|
1646 setup_insn_reg_info (data, freq);
|
|
1647 }
|
|
1648
|
|
1649 /* Return reg info of insn given by it UID. */
|
|
1650 struct lra_insn_reg *
|
|
1651 lra_get_insn_regs (int uid)
|
|
1652 {
|
|
1653 lra_insn_recog_data_t data;
|
|
1654
|
|
1655 data = get_insn_recog_data_by_uid (uid);
|
|
1656 return data->regs;
|
|
1657 }
|
|
1658
|
|
1659
|
|
1660
|
|
1661 /* Recursive hash function for RTL X. */
|
|
1662 hashval_t
|
|
1663 lra_rtx_hash (rtx x)
|
|
1664 {
|
|
1665 int i, j;
|
|
1666 enum rtx_code code;
|
|
1667 const char *fmt;
|
|
1668 hashval_t val = 0;
|
|
1669
|
|
1670 if (x == 0)
|
|
1671 return val;
|
|
1672
|
|
1673 code = GET_CODE (x);
|
|
1674 val += (int) code + 4095;
|
|
1675
|
|
1676 /* Some RTL can be compared nonrecursively. */
|
|
1677 switch (code)
|
|
1678 {
|
|
1679 case REG:
|
|
1680 return val + REGNO (x);
|
|
1681
|
|
1682 case LABEL_REF:
|
|
1683 return iterative_hash_object (XEXP (x, 0), val);
|
|
1684
|
|
1685 case SYMBOL_REF:
|
|
1686 return iterative_hash_object (XSTR (x, 0), val);
|
|
1687
|
|
1688 case SCRATCH:
|
|
1689 case CONST_DOUBLE:
|
|
1690 case CONST_VECTOR:
|
|
1691 return val;
|
|
1692
|
145
|
1693 case CONST_INT:
|
|
1694 return val + UINTVAL (x);
|
|
1695
|
111
|
1696 default:
|
|
1697 break;
|
|
1698 }
|
|
1699
|
|
1700 /* Hash the elements. */
|
|
1701 fmt = GET_RTX_FORMAT (code);
|
|
1702 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
1703 {
|
|
1704 switch (fmt[i])
|
|
1705 {
|
|
1706 case 'w':
|
|
1707 val += XWINT (x, i);
|
|
1708 break;
|
|
1709
|
|
1710 case 'n':
|
|
1711 case 'i':
|
|
1712 val += XINT (x, i);
|
|
1713 break;
|
|
1714
|
|
1715 case 'V':
|
|
1716 case 'E':
|
|
1717 val += XVECLEN (x, i);
|
|
1718
|
|
1719 for (j = 0; j < XVECLEN (x, i); j++)
|
|
1720 val += lra_rtx_hash (XVECEXP (x, i, j));
|
|
1721 break;
|
|
1722
|
|
1723 case 'e':
|
|
1724 val += lra_rtx_hash (XEXP (x, i));
|
|
1725 break;
|
|
1726
|
|
1727 case 'S':
|
|
1728 case 's':
|
|
1729 val += htab_hash_string (XSTR (x, i));
|
|
1730 break;
|
|
1731
|
|
1732 case 'u':
|
|
1733 case '0':
|
|
1734 case 't':
|
|
1735 break;
|
|
1736
|
|
1737 /* It is believed that rtx's at this level will never
|
|
1738 contain anything but integers and other rtx's, except for
|
|
1739 within LABEL_REFs and SYMBOL_REFs. */
|
|
1740 default:
|
|
1741 abort ();
|
|
1742 }
|
|
1743 }
|
|
1744 return val;
|
|
1745 }
|
|
1746
|
|
1747
|
|
1748
|
|
1749 /* This page contains code dealing with stack of the insns which
|
|
1750 should be processed by the next constraint pass. */
|
|
1751
|
|
1752 /* Bitmap used to put an insn on the stack only in one exemplar. */
|
|
1753 static sbitmap lra_constraint_insn_stack_bitmap;
|
|
1754
|
|
1755 /* The stack itself. */
|
|
1756 vec<rtx_insn *> lra_constraint_insn_stack;
|
|
1757
|
|
1758 /* Put INSN on the stack. If ALWAYS_UPDATE is true, always update the reg
|
|
1759 info for INSN, otherwise only update it if INSN is not already on the
|
|
1760 stack. */
|
|
1761 static inline void
|
|
1762 lra_push_insn_1 (rtx_insn *insn, bool always_update)
|
|
1763 {
|
|
1764 unsigned int uid = INSN_UID (insn);
|
|
1765 if (always_update)
|
|
1766 lra_update_insn_regno_info (insn);
|
|
1767 if (uid >= SBITMAP_SIZE (lra_constraint_insn_stack_bitmap))
|
|
1768 lra_constraint_insn_stack_bitmap =
|
|
1769 sbitmap_resize (lra_constraint_insn_stack_bitmap, 3 * uid / 2, 0);
|
|
1770 if (bitmap_bit_p (lra_constraint_insn_stack_bitmap, uid))
|
|
1771 return;
|
|
1772 bitmap_set_bit (lra_constraint_insn_stack_bitmap, uid);
|
|
1773 if (! always_update)
|
|
1774 lra_update_insn_regno_info (insn);
|
|
1775 lra_constraint_insn_stack.safe_push (insn);
|
|
1776 }
|
|
1777
|
|
1778 /* Put INSN on the stack. */
|
|
1779 void
|
|
1780 lra_push_insn (rtx_insn *insn)
|
|
1781 {
|
|
1782 lra_push_insn_1 (insn, false);
|
|
1783 }
|
|
1784
|
|
1785 /* Put INSN on the stack and update its reg info. */
|
|
1786 void
|
|
1787 lra_push_insn_and_update_insn_regno_info (rtx_insn *insn)
|
|
1788 {
|
|
1789 lra_push_insn_1 (insn, true);
|
|
1790 }
|
|
1791
|
|
1792 /* Put insn with UID on the stack. */
|
|
1793 void
|
|
1794 lra_push_insn_by_uid (unsigned int uid)
|
|
1795 {
|
|
1796 lra_push_insn (lra_insn_recog_data[uid]->insn);
|
|
1797 }
|
|
1798
|
|
1799 /* Take the last-inserted insns off the stack and return it. */
|
|
1800 rtx_insn *
|
|
1801 lra_pop_insn (void)
|
|
1802 {
|
|
1803 rtx_insn *insn = lra_constraint_insn_stack.pop ();
|
|
1804 bitmap_clear_bit (lra_constraint_insn_stack_bitmap, INSN_UID (insn));
|
|
1805 return insn;
|
|
1806 }
|
|
1807
|
|
1808 /* Return the current size of the insn stack. */
|
|
1809 unsigned int
|
|
1810 lra_insn_stack_length (void)
|
|
1811 {
|
|
1812 return lra_constraint_insn_stack.length ();
|
|
1813 }
|
|
1814
|
|
1815 /* Push insns FROM to TO (excluding it) going in reverse order. */
|
|
1816 static void
|
|
1817 push_insns (rtx_insn *from, rtx_insn *to)
|
|
1818 {
|
|
1819 rtx_insn *insn;
|
|
1820
|
|
1821 if (from == NULL_RTX)
|
|
1822 return;
|
|
1823 for (insn = from; insn != to; insn = PREV_INSN (insn))
|
|
1824 if (INSN_P (insn))
|
|
1825 lra_push_insn (insn);
|
|
1826 }
|
|
1827
|
|
1828 /* Set up sp offset for insn in range [FROM, LAST]. The offset is
|
|
1829 taken from the next BB insn after LAST or zero if there in such
|
|
1830 insn. */
|
|
1831 static void
|
|
1832 setup_sp_offset (rtx_insn *from, rtx_insn *last)
|
|
1833 {
|
131
|
1834 rtx_insn *before = next_nonnote_nondebug_insn_bb (last);
|
|
1835 poly_int64 offset = (before == NULL_RTX || ! INSN_P (before)
|
|
1836 ? 0 : lra_get_insn_recog_data (before)->sp_offset);
|
111
|
1837
|
|
1838 for (rtx_insn *insn = from; insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
|
|
1839 lra_get_insn_recog_data (insn)->sp_offset = offset;
|
|
1840 }
|
|
1841
|
|
1842 /* Emit insns BEFORE before INSN and insns AFTER after INSN. Put the
|
|
1843 insns onto the stack. Print about emitting the insns with
|
|
1844 TITLE. */
|
|
1845 void
|
|
1846 lra_process_new_insns (rtx_insn *insn, rtx_insn *before, rtx_insn *after,
|
|
1847 const char *title)
|
|
1848 {
|
|
1849 rtx_insn *last;
|
|
1850
|
|
1851 if (before == NULL_RTX && after == NULL_RTX)
|
|
1852 return;
|
|
1853 if (lra_dump_file != NULL)
|
|
1854 {
|
|
1855 dump_insn_slim (lra_dump_file, insn);
|
|
1856 if (before != NULL_RTX)
|
|
1857 {
|
|
1858 fprintf (lra_dump_file," %s before:\n", title);
|
|
1859 dump_rtl_slim (lra_dump_file, before, NULL, -1, 0);
|
|
1860 }
|
|
1861 if (after != NULL_RTX)
|
|
1862 {
|
|
1863 fprintf (lra_dump_file, " %s after:\n", title);
|
|
1864 dump_rtl_slim (lra_dump_file, after, NULL, -1, 0);
|
|
1865 }
|
|
1866 fprintf (lra_dump_file, "\n");
|
|
1867 }
|
|
1868 if (before != NULL_RTX)
|
|
1869 {
|
|
1870 if (cfun->can_throw_non_call_exceptions)
|
|
1871 copy_reg_eh_region_note_forward (insn, before, NULL);
|
|
1872 emit_insn_before (before, insn);
|
|
1873 push_insns (PREV_INSN (insn), PREV_INSN (before));
|
|
1874 setup_sp_offset (before, PREV_INSN (insn));
|
|
1875 }
|
|
1876 if (after != NULL_RTX)
|
|
1877 {
|
|
1878 if (cfun->can_throw_non_call_exceptions)
|
|
1879 copy_reg_eh_region_note_forward (insn, after, NULL);
|
|
1880 for (last = after; NEXT_INSN (last) != NULL_RTX; last = NEXT_INSN (last))
|
|
1881 ;
|
|
1882 emit_insn_after (after, insn);
|
|
1883 push_insns (last, insn);
|
|
1884 setup_sp_offset (after, last);
|
|
1885 }
|
|
1886 if (cfun->can_throw_non_call_exceptions)
|
|
1887 {
|
|
1888 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
|
|
1889 if (note && !insn_could_throw_p (insn))
|
|
1890 remove_note (insn, note);
|
|
1891 }
|
|
1892 }
|
|
1893
|
|
1894
|
|
1895 /* Replace all references to register OLD_REGNO in *LOC with pseudo
|
|
1896 register NEW_REG. Try to simplify subreg of constant if SUBREG_P.
|
131
|
1897 DEBUG_P is if LOC is within a DEBUG_INSN. Return true if any
|
|
1898 change was made. */
|
111
|
1899 bool
|
131
|
1900 lra_substitute_pseudo (rtx *loc, int old_regno, rtx new_reg, bool subreg_p,
|
|
1901 bool debug_p)
|
111
|
1902 {
|
|
1903 rtx x = *loc;
|
|
1904 bool result = false;
|
|
1905 enum rtx_code code;
|
|
1906 const char *fmt;
|
|
1907 int i, j;
|
|
1908
|
|
1909 if (x == NULL_RTX)
|
|
1910 return false;
|
|
1911
|
|
1912 code = GET_CODE (x);
|
|
1913 if (code == SUBREG && subreg_p)
|
|
1914 {
|
|
1915 rtx subst, inner = SUBREG_REG (x);
|
|
1916 /* Transform subreg of constant while we still have inner mode
|
|
1917 of the subreg. The subreg internal should not be an insn
|
|
1918 operand. */
|
|
1919 if (REG_P (inner) && (int) REGNO (inner) == old_regno
|
|
1920 && CONSTANT_P (new_reg)
|
|
1921 && (subst = simplify_subreg (GET_MODE (x), new_reg, GET_MODE (inner),
|
|
1922 SUBREG_BYTE (x))) != NULL_RTX)
|
|
1923 {
|
|
1924 *loc = subst;
|
|
1925 return true;
|
|
1926 }
|
|
1927
|
|
1928 }
|
|
1929 else if (code == REG && (int) REGNO (x) == old_regno)
|
|
1930 {
|
|
1931 machine_mode mode = GET_MODE (x);
|
|
1932 machine_mode inner_mode = GET_MODE (new_reg);
|
|
1933
|
|
1934 if (mode != inner_mode
|
145
|
1935 && ! (CONST_SCALAR_INT_P (new_reg) && SCALAR_INT_MODE_P (mode)))
|
111
|
1936 {
|
131
|
1937 poly_uint64 offset = 0;
|
|
1938 if (partial_subreg_p (mode, inner_mode)
|
|
1939 && SCALAR_INT_MODE_P (inner_mode))
|
|
1940 offset = subreg_lowpart_offset (mode, inner_mode);
|
|
1941 if (debug_p)
|
|
1942 new_reg = gen_rtx_raw_SUBREG (mode, new_reg, offset);
|
111
|
1943 else
|
131
|
1944 new_reg = gen_rtx_SUBREG (mode, new_reg, offset);
|
111
|
1945 }
|
|
1946 *loc = new_reg;
|
|
1947 return true;
|
|
1948 }
|
|
1949
|
|
1950 /* Scan all the operand sub-expressions. */
|
|
1951 fmt = GET_RTX_FORMAT (code);
|
|
1952 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
1953 {
|
|
1954 if (fmt[i] == 'e')
|
|
1955 {
|
|
1956 if (lra_substitute_pseudo (&XEXP (x, i), old_regno,
|
131
|
1957 new_reg, subreg_p, debug_p))
|
111
|
1958 result = true;
|
|
1959 }
|
|
1960 else if (fmt[i] == 'E')
|
|
1961 {
|
|
1962 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
|
|
1963 if (lra_substitute_pseudo (&XVECEXP (x, i, j), old_regno,
|
131
|
1964 new_reg, subreg_p, debug_p))
|
111
|
1965 result = true;
|
|
1966 }
|
|
1967 }
|
|
1968 return result;
|
|
1969 }
|
|
1970
|
|
1971 /* Call lra_substitute_pseudo within an insn. Try to simplify subreg
|
|
1972 of constant if SUBREG_P. This won't update the insn ptr, just the
|
|
1973 contents of the insn. */
|
|
1974 bool
|
|
1975 lra_substitute_pseudo_within_insn (rtx_insn *insn, int old_regno,
|
|
1976 rtx new_reg, bool subreg_p)
|
|
1977 {
|
|
1978 rtx loc = insn;
|
131
|
1979 return lra_substitute_pseudo (&loc, old_regno, new_reg, subreg_p,
|
|
1980 DEBUG_INSN_P (insn));
|
111
|
1981 }
|
|
1982
|
|
1983
|
|
1984
|
|
1985 /* This page contains code dealing with scratches (changing them onto
|
|
1986 pseudos and restoring them from the pseudos).
|
|
1987
|
|
1988 We change scratches into pseudos at the beginning of LRA to
|
|
1989 simplify dealing with them (conflicts, hard register assignments).
|
|
1990
|
|
1991 If the pseudo denoting scratch was spilled it means that we do need
|
|
1992 a hard register for it. Such pseudos are transformed back to
|
|
1993 scratches at the end of LRA. */
|
|
1994
|
|
1995 /* Description of location of a former scratch operand. */
|
|
1996 struct sloc
|
|
1997 {
|
|
1998 rtx_insn *insn; /* Insn where the scratch was. */
|
|
1999 int nop; /* Number of the operand which was a scratch. */
|
145
|
2000 int icode; /* Original icode from which scratch was removed. */
|
111
|
2001 };
|
|
2002
|
|
2003 typedef struct sloc *sloc_t;
|
|
2004
|
|
2005 /* Locations of the former scratches. */
|
|
2006 static vec<sloc_t> scratches;
|
|
2007
|
|
2008 /* Bitmap of scratch regnos. */
|
|
2009 static bitmap_head scratch_bitmap;
|
|
2010
|
|
2011 /* Bitmap of scratch operands. */
|
|
2012 static bitmap_head scratch_operand_bitmap;
|
|
2013
|
|
2014 /* Return true if pseudo REGNO is made of SCRATCH. */
|
|
2015 bool
|
|
2016 lra_former_scratch_p (int regno)
|
|
2017 {
|
|
2018 return bitmap_bit_p (&scratch_bitmap, regno);
|
|
2019 }
|
|
2020
|
|
2021 /* Return true if the operand NOP of INSN is a former scratch. */
|
|
2022 bool
|
|
2023 lra_former_scratch_operand_p (rtx_insn *insn, int nop)
|
|
2024 {
|
|
2025 return bitmap_bit_p (&scratch_operand_bitmap,
|
|
2026 INSN_UID (insn) * MAX_RECOG_OPERANDS + nop) != 0;
|
|
2027 }
|
|
2028
|
|
2029 /* Register operand NOP in INSN as a former scratch. It will be
|
|
2030 changed to scratch back, if it is necessary, at the LRA end. */
|
|
2031 void
|
145
|
2032 lra_register_new_scratch_op (rtx_insn *insn, int nop, int icode)
|
111
|
2033 {
|
|
2034 lra_insn_recog_data_t id = lra_get_insn_recog_data (insn);
|
|
2035 rtx op = *id->operand_loc[nop];
|
|
2036 sloc_t loc = XNEW (struct sloc);
|
|
2037 lra_assert (REG_P (op));
|
|
2038 loc->insn = insn;
|
|
2039 loc->nop = nop;
|
145
|
2040 loc->icode = icode;
|
111
|
2041 scratches.safe_push (loc);
|
|
2042 bitmap_set_bit (&scratch_bitmap, REGNO (op));
|
|
2043 bitmap_set_bit (&scratch_operand_bitmap,
|
|
2044 INSN_UID (insn) * MAX_RECOG_OPERANDS + nop);
|
|
2045 add_reg_note (insn, REG_UNUSED, op);
|
|
2046 }
|
|
2047
|
145
|
2048 /* Change INSN's scratches into pseudos and save their location. */
|
111
|
2049 static void
|
145
|
2050 remove_scratches_1 (rtx_insn *insn)
|
111
|
2051 {
|
|
2052 int i;
|
|
2053 bool insn_changed_p;
|
|
2054 rtx reg;
|
|
2055 lra_insn_recog_data_t id;
|
|
2056 struct lra_static_insn_data *static_id;
|
|
2057
|
145
|
2058 id = lra_get_insn_recog_data (insn);
|
|
2059 static_id = id->insn_static_data;
|
|
2060 insn_changed_p = false;
|
|
2061 for (i = 0; i < static_id->n_operands; i++)
|
|
2062 if (GET_CODE (*id->operand_loc[i]) == SCRATCH
|
|
2063 && GET_MODE (*id->operand_loc[i]) != VOIDmode)
|
|
2064 {
|
|
2065 insn_changed_p = true;
|
|
2066 *id->operand_loc[i] = reg
|
|
2067 = lra_create_new_reg (static_id->operand[i].mode,
|
|
2068 *id->operand_loc[i], ALL_REGS, NULL);
|
|
2069 lra_register_new_scratch_op (insn, i, id->icode);
|
|
2070 if (lra_dump_file != NULL)
|
|
2071 fprintf (lra_dump_file,
|
|
2072 "Removing SCRATCH in insn #%u (nop %d)\n",
|
|
2073 INSN_UID (insn), i);
|
|
2074 }
|
|
2075 if (insn_changed_p)
|
|
2076 /* Because we might use DF right after caller-saves sub-pass
|
|
2077 we need to keep DF info up to date. */
|
|
2078 df_insn_rescan (insn);
|
|
2079 }
|
|
2080
|
|
2081 /* Change scratches into pseudos and save their location. */
|
|
2082 static void
|
|
2083 remove_scratches (void)
|
|
2084 {
|
|
2085 basic_block bb;
|
|
2086 rtx_insn *insn;
|
|
2087
|
111
|
2088 scratches.create (get_max_uid ());
|
|
2089 bitmap_initialize (&scratch_bitmap, ®_obstack);
|
|
2090 bitmap_initialize (&scratch_operand_bitmap, ®_obstack);
|
|
2091 FOR_EACH_BB_FN (bb, cfun)
|
|
2092 FOR_BB_INSNS (bb, insn)
|
|
2093 if (INSN_P (insn))
|
145
|
2094 remove_scratches_1 (insn);
|
111
|
2095 }
|
|
2096
|
|
2097 /* Changes pseudos created by function remove_scratches onto scratches. */
|
|
2098 static void
|
|
2099 restore_scratches (void)
|
|
2100 {
|
|
2101 int regno;
|
|
2102 unsigned i;
|
|
2103 sloc_t loc;
|
|
2104 rtx_insn *last = NULL;
|
|
2105 lra_insn_recog_data_t id = NULL;
|
|
2106
|
|
2107 for (i = 0; scratches.iterate (i, &loc); i++)
|
|
2108 {
|
|
2109 /* Ignore already deleted insns. */
|
|
2110 if (NOTE_P (loc->insn)
|
|
2111 && NOTE_KIND (loc->insn) == NOTE_INSN_DELETED)
|
|
2112 continue;
|
|
2113 if (last != loc->insn)
|
|
2114 {
|
|
2115 last = loc->insn;
|
|
2116 id = lra_get_insn_recog_data (last);
|
|
2117 }
|
145
|
2118 if (loc->icode != id->icode)
|
|
2119 {
|
|
2120 /* The icode doesn't match, which means the insn has been modified
|
|
2121 (e.g. register elimination). The scratch cannot be restored. */
|
|
2122 continue;
|
|
2123 }
|
111
|
2124 if (REG_P (*id->operand_loc[loc->nop])
|
|
2125 && ((regno = REGNO (*id->operand_loc[loc->nop]))
|
|
2126 >= FIRST_PSEUDO_REGISTER)
|
|
2127 && lra_get_regno_hard_regno (regno) < 0)
|
|
2128 {
|
|
2129 /* It should be only case when scratch register with chosen
|
|
2130 constraint 'X' did not get memory or hard register. */
|
|
2131 lra_assert (lra_former_scratch_p (regno));
|
|
2132 *id->operand_loc[loc->nop]
|
|
2133 = gen_rtx_SCRATCH (GET_MODE (*id->operand_loc[loc->nop]));
|
|
2134 lra_update_dup (id, loc->nop);
|
|
2135 if (lra_dump_file != NULL)
|
|
2136 fprintf (lra_dump_file, "Restoring SCRATCH in insn #%u(nop %d)\n",
|
|
2137 INSN_UID (loc->insn), loc->nop);
|
|
2138 }
|
|
2139 }
|
|
2140 for (i = 0; scratches.iterate (i, &loc); i++)
|
|
2141 free (loc);
|
|
2142 scratches.release ();
|
|
2143 bitmap_clear (&scratch_bitmap);
|
|
2144 bitmap_clear (&scratch_operand_bitmap);
|
|
2145 }
|
|
2146
|
|
2147
|
|
2148
|
|
2149 /* Function checks RTL for correctness. If FINAL_P is true, it is
|
|
2150 done at the end of LRA and the check is more rigorous. */
|
|
2151 static void
|
|
2152 check_rtl (bool final_p)
|
|
2153 {
|
|
2154 basic_block bb;
|
|
2155 rtx_insn *insn;
|
|
2156
|
|
2157 lra_assert (! final_p || reload_completed);
|
|
2158 FOR_EACH_BB_FN (bb, cfun)
|
|
2159 FOR_BB_INSNS (bb, insn)
|
|
2160 if (NONDEBUG_INSN_P (insn)
|
|
2161 && GET_CODE (PATTERN (insn)) != USE
|
|
2162 && GET_CODE (PATTERN (insn)) != CLOBBER
|
|
2163 && GET_CODE (PATTERN (insn)) != ASM_INPUT)
|
|
2164 {
|
|
2165 if (final_p)
|
|
2166 {
|
|
2167 extract_constrain_insn (insn);
|
|
2168 continue;
|
|
2169 }
|
|
2170 /* LRA code is based on assumption that all addresses can be
|
|
2171 correctly decomposed. LRA can generate reloads for
|
|
2172 decomposable addresses. The decomposition code checks the
|
|
2173 correctness of the addresses. So we don't need to check
|
|
2174 the addresses here. Don't call insn_invalid_p here, it can
|
|
2175 change the code at this stage. */
|
|
2176 if (recog_memoized (insn) < 0 && asm_noperands (PATTERN (insn)) < 0)
|
|
2177 fatal_insn_not_found (insn);
|
|
2178 }
|
|
2179 }
|
|
2180
|
|
2181 /* Determine if the current function has an exception receiver block
|
|
2182 that reaches the exit block via non-exceptional edges */
|
|
2183 static bool
|
|
2184 has_nonexceptional_receiver (void)
|
|
2185 {
|
|
2186 edge e;
|
|
2187 edge_iterator ei;
|
|
2188 basic_block *tos, *worklist, bb;
|
|
2189
|
|
2190 /* If we're not optimizing, then just err on the safe side. */
|
|
2191 if (!optimize)
|
|
2192 return true;
|
|
2193
|
|
2194 /* First determine which blocks can reach exit via normal paths. */
|
|
2195 tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
|
|
2196
|
|
2197 FOR_EACH_BB_FN (bb, cfun)
|
|
2198 bb->flags &= ~BB_REACHABLE;
|
|
2199
|
|
2200 /* Place the exit block on our worklist. */
|
|
2201 EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
|
|
2202 *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
|
|
2203
|
|
2204 /* Iterate: find everything reachable from what we've already seen. */
|
|
2205 while (tos != worklist)
|
|
2206 {
|
|
2207 bb = *--tos;
|
|
2208
|
|
2209 FOR_EACH_EDGE (e, ei, bb->preds)
|
|
2210 if (e->flags & EDGE_ABNORMAL)
|
|
2211 {
|
|
2212 free (worklist);
|
|
2213 return true;
|
|
2214 }
|
|
2215 else
|
|
2216 {
|
|
2217 basic_block src = e->src;
|
|
2218
|
|
2219 if (!(src->flags & BB_REACHABLE))
|
|
2220 {
|
|
2221 src->flags |= BB_REACHABLE;
|
|
2222 *tos++ = src;
|
|
2223 }
|
|
2224 }
|
|
2225 }
|
|
2226 free (worklist);
|
|
2227 /* No exceptional block reached exit unexceptionally. */
|
|
2228 return false;
|
|
2229 }
|
|
2230
|
|
2231
|
|
2232 /* Process recursively X of INSN and add REG_INC notes if necessary. */
|
|
2233 static void
|
|
2234 add_auto_inc_notes (rtx_insn *insn, rtx x)
|
|
2235 {
|
|
2236 enum rtx_code code = GET_CODE (x);
|
|
2237 const char *fmt;
|
|
2238 int i, j;
|
|
2239
|
|
2240 if (code == MEM && auto_inc_p (XEXP (x, 0)))
|
|
2241 {
|
|
2242 add_reg_note (insn, REG_INC, XEXP (XEXP (x, 0), 0));
|
|
2243 return;
|
|
2244 }
|
|
2245
|
|
2246 /* Scan all X sub-expressions. */
|
|
2247 fmt = GET_RTX_FORMAT (code);
|
|
2248 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
|
2249 {
|
|
2250 if (fmt[i] == 'e')
|
|
2251 add_auto_inc_notes (insn, XEXP (x, i));
|
|
2252 else if (fmt[i] == 'E')
|
|
2253 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
|
|
2254 add_auto_inc_notes (insn, XVECEXP (x, i, j));
|
|
2255 }
|
|
2256 }
|
|
2257
|
|
2258
|
|
2259 /* Remove all REG_DEAD and REG_UNUSED notes and regenerate REG_INC.
|
|
2260 We change pseudos by hard registers without notification of DF and
|
|
2261 that can make the notes obsolete. DF-infrastructure does not deal
|
|
2262 with REG_INC notes -- so we should regenerate them here. */
|
|
2263 static void
|
|
2264 update_inc_notes (void)
|
|
2265 {
|
|
2266 rtx *pnote;
|
|
2267 basic_block bb;
|
|
2268 rtx_insn *insn;
|
|
2269
|
|
2270 FOR_EACH_BB_FN (bb, cfun)
|
|
2271 FOR_BB_INSNS (bb, insn)
|
|
2272 if (NONDEBUG_INSN_P (insn))
|
|
2273 {
|
|
2274 pnote = ®_NOTES (insn);
|
|
2275 while (*pnote != 0)
|
|
2276 {
|
|
2277 if (REG_NOTE_KIND (*pnote) == REG_DEAD
|
|
2278 || REG_NOTE_KIND (*pnote) == REG_UNUSED
|
|
2279 || REG_NOTE_KIND (*pnote) == REG_INC)
|
|
2280 *pnote = XEXP (*pnote, 1);
|
|
2281 else
|
|
2282 pnote = &XEXP (*pnote, 1);
|
|
2283 }
|
|
2284
|
|
2285 if (AUTO_INC_DEC)
|
|
2286 add_auto_inc_notes (insn, PATTERN (insn));
|
|
2287 }
|
|
2288 }
|
|
2289
|
|
2290 /* Set to 1 while in lra. */
|
|
2291 int lra_in_progress;
|
|
2292
|
|
2293 /* Start of pseudo regnos before the LRA. */
|
|
2294 int lra_new_regno_start;
|
|
2295
|
|
2296 /* Start of reload pseudo regnos before the new spill pass. */
|
|
2297 int lra_constraint_new_regno_start;
|
|
2298
|
|
2299 /* Avoid spilling pseudos with regno more than the following value if
|
|
2300 it is possible. */
|
|
2301 int lra_bad_spill_regno_start;
|
|
2302
|
|
2303 /* Inheritance pseudo regnos before the new spill pass. */
|
|
2304 bitmap_head lra_inheritance_pseudos;
|
|
2305
|
|
2306 /* Split regnos before the new spill pass. */
|
|
2307 bitmap_head lra_split_regs;
|
|
2308
|
|
2309 /* Reload pseudo regnos before the new assignment pass which still can
|
|
2310 be spilled after the assignment pass as memory is also accepted in
|
|
2311 insns for the reload pseudos. */
|
|
2312 bitmap_head lra_optional_reload_pseudos;
|
|
2313
|
|
2314 /* Pseudo regnos used for subreg reloads before the new assignment
|
|
2315 pass. Such pseudos still can be spilled after the assignment
|
|
2316 pass. */
|
|
2317 bitmap_head lra_subreg_reload_pseudos;
|
|
2318
|
|
2319 /* File used for output of LRA debug information. */
|
|
2320 FILE *lra_dump_file;
|
|
2321
|
145
|
2322 /* True if we found an asm error. */
|
|
2323 bool lra_asm_error_p;
|
|
2324
|
111
|
2325 /* True if we should try spill into registers of different classes
|
|
2326 instead of memory. */
|
|
2327 bool lra_reg_spill_p;
|
|
2328
|
|
2329 /* Set up value LRA_REG_SPILL_P. */
|
|
2330 static void
|
|
2331 setup_reg_spill_flag (void)
|
|
2332 {
|
|
2333 int cl, mode;
|
|
2334
|
|
2335 if (targetm.spill_class != NULL)
|
|
2336 for (cl = 0; cl < (int) LIM_REG_CLASSES; cl++)
|
|
2337 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
|
|
2338 if (targetm.spill_class ((enum reg_class) cl,
|
|
2339 (machine_mode) mode) != NO_REGS)
|
|
2340 {
|
|
2341 lra_reg_spill_p = true;
|
|
2342 return;
|
|
2343 }
|
|
2344 lra_reg_spill_p = false;
|
|
2345 }
|
|
2346
|
|
2347 /* True if the current function is too big to use regular algorithms
|
|
2348 in LRA. In other words, we should use simpler and faster algorithms
|
|
2349 in LRA. It also means we should not worry about generation code
|
|
2350 for caller saves. The value is set up in IRA. */
|
|
2351 bool lra_simple_p;
|
|
2352
|
|
2353 /* Major LRA entry function. F is a file should be used to dump LRA
|
|
2354 debug info. */
|
|
2355 void
|
|
2356 lra (FILE *f)
|
|
2357 {
|
|
2358 int i;
|
|
2359 bool live_p, inserted_p;
|
|
2360
|
|
2361 lra_dump_file = f;
|
145
|
2362 lra_asm_error_p = false;
|
|
2363
|
111
|
2364 timevar_push (TV_LRA);
|
|
2365
|
|
2366 /* Make sure that the last insn is a note. Some subsequent passes
|
|
2367 need it. */
|
|
2368 emit_note (NOTE_INSN_DELETED);
|
|
2369
|
145
|
2370 lra_no_alloc_regs = ira_no_alloc_regs;
|
111
|
2371
|
|
2372 init_reg_info ();
|
|
2373 expand_reg_info ();
|
|
2374
|
|
2375 init_insn_recog_data ();
|
|
2376
|
|
2377 /* Some quick check on RTL generated by previous passes. */
|
|
2378 if (flag_checking)
|
|
2379 check_rtl (false);
|
|
2380
|
|
2381 lra_in_progress = 1;
|
|
2382
|
|
2383 lra_live_range_iter = lra_coalesce_iter = lra_constraint_iter = 0;
|
|
2384 lra_assignment_iter = lra_assignment_iter_after_spill = 0;
|
|
2385 lra_inheritance_iter = lra_undo_inheritance_iter = 0;
|
|
2386 lra_rematerialization_iter = 0;
|
|
2387
|
|
2388 setup_reg_spill_flag ();
|
|
2389
|
|
2390 /* Function remove_scratches can creates new pseudos for clobbers --
|
|
2391 so set up lra_constraint_new_regno_start before its call to
|
|
2392 permit changing reg classes for pseudos created by this
|
|
2393 simplification. */
|
|
2394 lra_constraint_new_regno_start = lra_new_regno_start = max_reg_num ();
|
|
2395 lra_bad_spill_regno_start = INT_MAX;
|
|
2396 remove_scratches ();
|
|
2397
|
|
2398 /* A function that has a non-local label that can reach the exit
|
|
2399 block via non-exceptional paths must save all call-saved
|
|
2400 registers. */
|
|
2401 if (cfun->has_nonlocal_label && has_nonexceptional_receiver ())
|
|
2402 crtl->saves_all_registers = 1;
|
|
2403
|
|
2404 if (crtl->saves_all_registers)
|
|
2405 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
145
|
2406 if (!crtl->abi->clobbers_full_reg_p (i)
|
|
2407 && !fixed_regs[i]
|
|
2408 && !LOCAL_REGNO (i))
|
111
|
2409 df_set_regs_ever_live (i, true);
|
|
2410
|
|
2411 /* We don't DF from now and avoid its using because it is to
|
|
2412 expensive when a lot of RTL changes are made. */
|
|
2413 df_set_flags (DF_NO_INSN_RESCAN);
|
|
2414 lra_constraint_insn_stack.create (get_max_uid ());
|
|
2415 lra_constraint_insn_stack_bitmap = sbitmap_alloc (get_max_uid ());
|
|
2416 bitmap_clear (lra_constraint_insn_stack_bitmap);
|
|
2417 lra_live_ranges_init ();
|
|
2418 lra_constraints_init ();
|
|
2419 lra_curr_reload_num = 0;
|
|
2420 push_insns (get_last_insn (), NULL);
|
|
2421 /* It is needed for the 1st coalescing. */
|
|
2422 bitmap_initialize (&lra_inheritance_pseudos, ®_obstack);
|
|
2423 bitmap_initialize (&lra_split_regs, ®_obstack);
|
|
2424 bitmap_initialize (&lra_optional_reload_pseudos, ®_obstack);
|
|
2425 bitmap_initialize (&lra_subreg_reload_pseudos, ®_obstack);
|
|
2426 live_p = false;
|
131
|
2427 if (maybe_ne (get_frame_size (), 0) && crtl->stack_alignment_needed)
|
111
|
2428 /* If we have a stack frame, we must align it now. The stack size
|
|
2429 may be a part of the offset computation for register
|
|
2430 elimination. */
|
|
2431 assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
|
|
2432 lra_init_equiv ();
|
|
2433 for (;;)
|
|
2434 {
|
|
2435 for (;;)
|
|
2436 {
|
|
2437 bool reloads_p = lra_constraints (lra_constraint_iter == 0);
|
|
2438 /* Constraint transformations may result in that eliminable
|
|
2439 hard regs become uneliminable and pseudos which use them
|
|
2440 should be spilled. It is better to do it before pseudo
|
|
2441 assignments.
|
|
2442
|
|
2443 For example, rs6000 can make
|
|
2444 RS6000_PIC_OFFSET_TABLE_REGNUM uneliminable if we started
|
|
2445 to use a constant pool. */
|
|
2446 lra_eliminate (false, false);
|
|
2447 /* We should try to assign hard registers to scratches even
|
|
2448 if there were no RTL transformations in lra_constraints.
|
|
2449 Also we should check IRA assignments on the first
|
|
2450 iteration as they can be wrong because of early clobbers
|
|
2451 operands which are ignored in IRA. */
|
|
2452 if (! reloads_p && lra_constraint_iter > 1)
|
|
2453 {
|
|
2454 /* Stack is not empty here only when there are changes
|
|
2455 during the elimination sub-pass. */
|
|
2456 if (bitmap_empty_p (lra_constraint_insn_stack_bitmap))
|
|
2457 break;
|
|
2458 else
|
|
2459 /* If there are no reloads but changing due
|
|
2460 elimination, restart the constraint sub-pass
|
|
2461 first. */
|
|
2462 continue;
|
|
2463 }
|
|
2464 /* Do inheritance only for regular algorithms. */
|
|
2465 if (! lra_simple_p)
|
145
|
2466 lra_inheritance ();
|
111
|
2467 if (live_p)
|
|
2468 lra_clear_live_ranges ();
|
131
|
2469 bool fails_p;
|
|
2470 do
|
111
|
2471 {
|
131
|
2472 /* We need live ranges for lra_assign -- so build them.
|
|
2473 But don't remove dead insns or change global live
|
|
2474 info as we can undo inheritance transformations after
|
|
2475 inheritance pseudo assigning. */
|
145
|
2476 lra_create_live_ranges (true, !lra_simple_p);
|
131
|
2477 live_p = true;
|
|
2478 /* If we don't spill non-reload and non-inheritance
|
|
2479 pseudos, there is no sense to run memory-memory move
|
|
2480 coalescing. If inheritance pseudos were spilled, the
|
|
2481 memory-memory moves involving them will be removed by
|
|
2482 pass undoing inheritance. */
|
|
2483 if (lra_simple_p)
|
|
2484 lra_assign (fails_p);
|
|
2485 else
|
111
|
2486 {
|
131
|
2487 bool spill_p = !lra_assign (fails_p);
|
|
2488
|
|
2489 if (lra_undo_inheritance ())
|
|
2490 live_p = false;
|
|
2491 if (spill_p && ! fails_p)
|
111
|
2492 {
|
131
|
2493 if (! live_p)
|
|
2494 {
|
|
2495 lra_create_live_ranges (true, true);
|
|
2496 live_p = true;
|
|
2497 }
|
|
2498 if (lra_coalesce ())
|
|
2499 live_p = false;
|
111
|
2500 }
|
131
|
2501 if (! live_p)
|
|
2502 lra_clear_live_ranges ();
|
111
|
2503 }
|
131
|
2504 if (fails_p)
|
|
2505 {
|
|
2506 /* It is a very rare case. It is the last hope to
|
|
2507 split a hard regno live range for a reload
|
|
2508 pseudo. */
|
|
2509 if (live_p)
|
|
2510 lra_clear_live_ranges ();
|
|
2511 live_p = false;
|
|
2512 if (! lra_split_hard_reg_for ())
|
|
2513 break;
|
|
2514 }
|
111
|
2515 }
|
131
|
2516 while (fails_p);
|
145
|
2517 if (! live_p) {
|
|
2518 /* We need the correct reg notes for work of constraint sub-pass. */
|
|
2519 lra_create_live_ranges (true, true);
|
|
2520 live_p = true;
|
|
2521 }
|
111
|
2522 }
|
|
2523 /* Don't clear optional reloads bitmap until all constraints are
|
|
2524 satisfied as we need to differ them from regular reloads. */
|
|
2525 bitmap_clear (&lra_optional_reload_pseudos);
|
|
2526 bitmap_clear (&lra_subreg_reload_pseudos);
|
|
2527 bitmap_clear (&lra_inheritance_pseudos);
|
|
2528 bitmap_clear (&lra_split_regs);
|
|
2529 if (! live_p)
|
|
2530 {
|
|
2531 /* We need full live info for spilling pseudos into
|
|
2532 registers instead of memory. */
|
|
2533 lra_create_live_ranges (lra_reg_spill_p, true);
|
|
2534 live_p = true;
|
|
2535 }
|
|
2536 /* We should check necessity for spilling here as the above live
|
|
2537 range pass can remove spilled pseudos. */
|
|
2538 if (! lra_need_for_spills_p ())
|
|
2539 break;
|
|
2540 /* Now we know what pseudos should be spilled. Try to
|
|
2541 rematerialize them first. */
|
|
2542 if (lra_remat ())
|
|
2543 {
|
|
2544 /* We need full live info -- see the comment above. */
|
|
2545 lra_create_live_ranges (lra_reg_spill_p, true);
|
|
2546 live_p = true;
|
|
2547 if (! lra_need_for_spills_p ())
|
145
|
2548 {
|
|
2549 if (lra_need_for_scratch_reg_p ())
|
|
2550 continue;
|
|
2551 break;
|
|
2552 }
|
111
|
2553 }
|
|
2554 lra_spill ();
|
|
2555 /* Assignment of stack slots changes elimination offsets for
|
|
2556 some eliminations. So update the offsets here. */
|
|
2557 lra_eliminate (false, false);
|
|
2558 lra_constraint_new_regno_start = max_reg_num ();
|
|
2559 if (lra_bad_spill_regno_start == INT_MAX
|
|
2560 && lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES
|
|
2561 && lra_rematerialization_iter > LRA_MAX_REMATERIALIZATION_PASSES)
|
|
2562 /* After switching off inheritance and rematerialization
|
|
2563 passes, avoid spilling reload pseudos will be created to
|
|
2564 prevent LRA cycling in some complicated cases. */
|
|
2565 lra_bad_spill_regno_start = lra_constraint_new_regno_start;
|
|
2566 lra_assignment_iter_after_spill = 0;
|
|
2567 }
|
|
2568 restore_scratches ();
|
|
2569 lra_eliminate (true, false);
|
|
2570 lra_final_code_change ();
|
|
2571 lra_in_progress = 0;
|
|
2572 if (live_p)
|
|
2573 lra_clear_live_ranges ();
|
|
2574 lra_live_ranges_finish ();
|
|
2575 lra_constraints_finish ();
|
|
2576 finish_reg_info ();
|
|
2577 sbitmap_free (lra_constraint_insn_stack_bitmap);
|
|
2578 lra_constraint_insn_stack.release ();
|
|
2579 finish_insn_recog_data ();
|
|
2580 regstat_free_n_sets_and_refs ();
|
|
2581 regstat_free_ri ();
|
|
2582 reload_completed = 1;
|
|
2583 update_inc_notes ();
|
|
2584
|
|
2585 inserted_p = fixup_abnormal_edges ();
|
|
2586
|
|
2587 /* We've possibly turned single trapping insn into multiple ones. */
|
|
2588 if (cfun->can_throw_non_call_exceptions)
|
|
2589 {
|
|
2590 auto_sbitmap blocks (last_basic_block_for_fn (cfun));
|
|
2591 bitmap_ones (blocks);
|
|
2592 find_many_sub_basic_blocks (blocks);
|
|
2593 }
|
|
2594
|
|
2595 if (inserted_p)
|
|
2596 commit_edge_insertions ();
|
|
2597
|
|
2598 /* Replacing pseudos with their memory equivalents might have
|
|
2599 created shared rtx. Subsequent passes would get confused
|
|
2600 by this, so unshare everything here. */
|
|
2601 unshare_all_rtl_again (get_insns ());
|
|
2602
|
|
2603 if (flag_checking)
|
|
2604 check_rtl (true);
|
|
2605
|
|
2606 timevar_pop (TV_LRA);
|
|
2607 }
|
|
2608
|
|
2609 /* Called once per compiler to initialize LRA data once. */
|
|
2610 void
|
|
2611 lra_init_once (void)
|
|
2612 {
|
|
2613 init_insn_code_data_once ();
|
|
2614 }
|
|
2615
|
|
2616 /* Called once per compiler to finish LRA data which are initialize
|
|
2617 once. */
|
|
2618 void
|
|
2619 lra_finish_once (void)
|
|
2620 {
|
|
2621 finish_insn_code_data_once ();
|
|
2622 }
|