Mercurial > hg > CbC > CbC_gcc
annotate gcc/cfgcleanup.c @ 63:b7f97abdc517 gcc-4.6-20100522
update gcc from gcc-4.5.0 to gcc-4.6
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 24 May 2010 12:47:05 +0900 |
parents | 77e2b8dfacca |
children | f6334be47118 |
rev | line source |
---|---|
0 | 1 /* Control flow optimization code for GNU compiler. |
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010 |
0 | 4 Free Software Foundation, Inc. |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify it under | |
9 the terms of the GNU General Public License as published by the Free | |
10 Software Foundation; either version 3, or (at your option) any later | |
11 version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 for more details. | |
17 | |
18 You should have received a copy of the GNU General Public License | |
19 along with GCC; see the file COPYING3. If not see | |
20 <http://www.gnu.org/licenses/>. */ | |
21 | |
22 /* This file contains optimizer of the control flow. The main entry point is | |
23 cleanup_cfg. Following optimizations are performed: | |
24 | |
25 - Unreachable blocks removal | |
26 - Edge forwarding (edge to the forwarder block is forwarded to its | |
27 successor. Simplification of the branch instruction is performed by | |
28 underlying infrastructure so branch can be converted to simplejump or | |
29 eliminated). | |
30 - Cross jumping (tail merging) | |
31 - Conditional jump-around-simplejump simplification | |
32 - Basic block merging. */ | |
33 | |
34 #include "config.h" | |
35 #include "system.h" | |
36 #include "coretypes.h" | |
37 #include "tm.h" | |
38 #include "rtl.h" | |
39 #include "hard-reg-set.h" | |
40 #include "regs.h" | |
41 #include "timevar.h" | |
42 #include "output.h" | |
43 #include "insn-config.h" | |
44 #include "flags.h" | |
45 #include "recog.h" | |
46 #include "toplev.h" | |
47 #include "cselib.h" | |
48 #include "params.h" | |
49 #include "tm_p.h" | |
50 #include "target.h" | |
51 #include "cfglayout.h" | |
52 #include "emit-rtl.h" | |
53 #include "tree-pass.h" | |
54 #include "cfgloop.h" | |
55 #include "expr.h" | |
56 #include "df.h" | |
57 #include "dce.h" | |
58 #include "dbgcnt.h" | |
59 | |
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK) | |
61 | |
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */ | |
63 static bool first_pass; | |
64 | |
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */ | |
66 static bool crossjumps_occured; | |
67 | |
68 static bool try_crossjump_to_edge (int, edge, edge); | |
69 static bool try_crossjump_bb (int, basic_block); | |
70 static bool outgoing_edges_match (int, basic_block, basic_block); | |
71 static bool old_insns_match_p (int, rtx, rtx); | |
72 | |
73 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block); | |
74 static void merge_blocks_move_successor_nojumps (basic_block, basic_block); | |
75 static bool try_optimize_cfg (int); | |
76 static bool try_simplify_condjump (basic_block); | |
77 static bool try_forward_edges (int, basic_block); | |
78 static edge thread_jump (edge, basic_block); | |
79 static bool mark_effect (rtx, bitmap); | |
80 static void notice_new_block (basic_block); | |
81 static void update_forwarder_flag (basic_block); | |
82 static int mentions_nonequal_regs (rtx *, void *); | |
83 static void merge_memattrs (rtx, rtx); | |
84 | |
85 /* Set flags for newly created block. */ | |
86 | |
87 static void | |
88 notice_new_block (basic_block bb) | |
89 { | |
90 if (!bb) | |
91 return; | |
92 | |
93 if (forwarder_block_p (bb)) | |
94 bb->flags |= BB_FORWARDER_BLOCK; | |
95 } | |
96 | |
97 /* Recompute forwarder flag after block has been modified. */ | |
98 | |
99 static void | |
100 update_forwarder_flag (basic_block bb) | |
101 { | |
102 if (forwarder_block_p (bb)) | |
103 bb->flags |= BB_FORWARDER_BLOCK; | |
104 else | |
105 bb->flags &= ~BB_FORWARDER_BLOCK; | |
106 } | |
107 | |
108 /* Simplify a conditional jump around an unconditional jump. | |
109 Return true if something changed. */ | |
110 | |
111 static bool | |
112 try_simplify_condjump (basic_block cbranch_block) | |
113 { | |
114 basic_block jump_block, jump_dest_block, cbranch_dest_block; | |
115 edge cbranch_jump_edge, cbranch_fallthru_edge; | |
116 rtx cbranch_insn; | |
117 | |
118 /* Verify that there are exactly two successors. */ | |
119 if (EDGE_COUNT (cbranch_block->succs) != 2) | |
120 return false; | |
121 | |
122 /* Verify that we've got a normal conditional branch at the end | |
123 of the block. */ | |
124 cbranch_insn = BB_END (cbranch_block); | |
125 if (!any_condjump_p (cbranch_insn)) | |
126 return false; | |
127 | |
128 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); | |
129 cbranch_jump_edge = BRANCH_EDGE (cbranch_block); | |
130 | |
131 /* The next block must not have multiple predecessors, must not | |
132 be the last block in the function, and must contain just the | |
133 unconditional jump. */ | |
134 jump_block = cbranch_fallthru_edge->dest; | |
135 if (!single_pred_p (jump_block) | |
136 || jump_block->next_bb == EXIT_BLOCK_PTR | |
137 || !FORWARDER_BLOCK_P (jump_block)) | |
138 return false; | |
139 jump_dest_block = single_succ (jump_block); | |
140 | |
141 /* If we are partitioning hot/cold basic blocks, we don't want to | |
142 mess up unconditional or indirect jumps that cross between hot | |
143 and cold sections. | |
144 | |
145 Basic block partitioning may result in some jumps that appear to | |
146 be optimizable (or blocks that appear to be mergeable), but which really | |
147 must be left untouched (they are required to make it safely across | |
148 partition boundaries). See the comments at the top of | |
149 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
150 | |
151 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block) | |
152 || (cbranch_jump_edge->flags & EDGE_CROSSING)) | |
153 return false; | |
154 | |
155 /* The conditional branch must target the block after the | |
156 unconditional branch. */ | |
157 cbranch_dest_block = cbranch_jump_edge->dest; | |
158 | |
159 if (cbranch_dest_block == EXIT_BLOCK_PTR | |
160 || !can_fallthru (jump_block, cbranch_dest_block)) | |
161 return false; | |
162 | |
163 /* Invert the conditional branch. */ | |
164 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0)) | |
165 return false; | |
166 | |
167 if (dump_file) | |
168 fprintf (dump_file, "Simplifying condjump %i around jump %i\n", | |
169 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block))); | |
170 | |
171 /* Success. Update the CFG to match. Note that after this point | |
172 the edge variable names appear backwards; the redirection is done | |
173 this way to preserve edge profile data. */ | |
174 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, | |
175 cbranch_dest_block); | |
176 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, | |
177 jump_dest_block); | |
178 cbranch_jump_edge->flags |= EDGE_FALLTHRU; | |
179 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; | |
180 update_br_prob_note (cbranch_block); | |
181 | |
182 /* Delete the block with the unconditional jump, and clean up the mess. */ | |
183 delete_basic_block (jump_block); | |
184 tidy_fallthru_edge (cbranch_jump_edge); | |
185 update_forwarder_flag (cbranch_block); | |
186 | |
187 return true; | |
188 } | |
189 | |
190 /* Attempt to prove that operation is NOOP using CSElib or mark the effect | |
191 on register. Used by jump threading. */ | |
192 | |
193 static bool | |
194 mark_effect (rtx exp, regset nonequal) | |
195 { | |
196 int regno; | |
197 rtx dest; | |
198 switch (GET_CODE (exp)) | |
199 { | |
200 /* In case we do clobber the register, mark it as equal, as we know the | |
201 value is dead so it don't have to match. */ | |
202 case CLOBBER: | |
203 if (REG_P (XEXP (exp, 0))) | |
204 { | |
205 dest = XEXP (exp, 0); | |
206 regno = REGNO (dest); | |
207 CLEAR_REGNO_REG_SET (nonequal, regno); | |
208 if (regno < FIRST_PSEUDO_REGISTER) | |
209 { | |
210 int n = hard_regno_nregs[regno][GET_MODE (dest)]; | |
211 while (--n > 0) | |
212 CLEAR_REGNO_REG_SET (nonequal, regno + n); | |
213 } | |
214 } | |
215 return false; | |
216 | |
217 case SET: | |
218 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp))) | |
219 return false; | |
220 dest = SET_DEST (exp); | |
221 if (dest == pc_rtx) | |
222 return false; | |
223 if (!REG_P (dest)) | |
224 return true; | |
225 regno = REGNO (dest); | |
226 SET_REGNO_REG_SET (nonequal, regno); | |
227 if (regno < FIRST_PSEUDO_REGISTER) | |
228 { | |
229 int n = hard_regno_nregs[regno][GET_MODE (dest)]; | |
230 while (--n > 0) | |
231 SET_REGNO_REG_SET (nonequal, regno + n); | |
232 } | |
233 return false; | |
234 | |
235 default: | |
236 return false; | |
237 } | |
238 } | |
239 | |
240 /* Return nonzero if X is a register set in regset DATA. | |
241 Called via for_each_rtx. */ | |
242 static int | |
243 mentions_nonequal_regs (rtx *x, void *data) | |
244 { | |
245 regset nonequal = (regset) data; | |
246 if (REG_P (*x)) | |
247 { | |
248 int regno; | |
249 | |
250 regno = REGNO (*x); | |
251 if (REGNO_REG_SET_P (nonequal, regno)) | |
252 return 1; | |
253 if (regno < FIRST_PSEUDO_REGISTER) | |
254 { | |
255 int n = hard_regno_nregs[regno][GET_MODE (*x)]; | |
256 while (--n > 0) | |
257 if (REGNO_REG_SET_P (nonequal, regno + n)) | |
258 return 1; | |
259 } | |
260 } | |
261 return 0; | |
262 } | |
263 /* Attempt to prove that the basic block B will have no side effects and | |
264 always continues in the same edge if reached via E. Return the edge | |
265 if exist, NULL otherwise. */ | |
266 | |
267 static edge | |
268 thread_jump (edge e, basic_block b) | |
269 { | |
270 rtx set1, set2, cond1, cond2, insn; | |
271 enum rtx_code code1, code2, reversed_code2; | |
272 bool reverse1 = false; | |
273 unsigned i; | |
274 regset nonequal; | |
275 bool failed = false; | |
276 reg_set_iterator rsi; | |
277 | |
278 if (b->flags & BB_NONTHREADABLE_BLOCK) | |
279 return NULL; | |
280 | |
281 /* At the moment, we do handle only conditional jumps, but later we may | |
282 want to extend this code to tablejumps and others. */ | |
283 if (EDGE_COUNT (e->src->succs) != 2) | |
284 return NULL; | |
285 if (EDGE_COUNT (b->succs) != 2) | |
286 { | |
287 b->flags |= BB_NONTHREADABLE_BLOCK; | |
288 return NULL; | |
289 } | |
290 | |
291 /* Second branch must end with onlyjump, as we will eliminate the jump. */ | |
292 if (!any_condjump_p (BB_END (e->src))) | |
293 return NULL; | |
294 | |
295 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b))) | |
296 { | |
297 b->flags |= BB_NONTHREADABLE_BLOCK; | |
298 return NULL; | |
299 } | |
300 | |
301 set1 = pc_set (BB_END (e->src)); | |
302 set2 = pc_set (BB_END (b)); | |
303 if (((e->flags & EDGE_FALLTHRU) != 0) | |
304 != (XEXP (SET_SRC (set1), 1) == pc_rtx)) | |
305 reverse1 = true; | |
306 | |
307 cond1 = XEXP (SET_SRC (set1), 0); | |
308 cond2 = XEXP (SET_SRC (set2), 0); | |
309 if (reverse1) | |
310 code1 = reversed_comparison_code (cond1, BB_END (e->src)); | |
311 else | |
312 code1 = GET_CODE (cond1); | |
313 | |
314 code2 = GET_CODE (cond2); | |
315 reversed_code2 = reversed_comparison_code (cond2, BB_END (b)); | |
316 | |
317 if (!comparison_dominates_p (code1, code2) | |
318 && !comparison_dominates_p (code1, reversed_code2)) | |
319 return NULL; | |
320 | |
321 /* Ensure that the comparison operators are equivalent. | |
322 ??? This is far too pessimistic. We should allow swapped operands, | |
323 different CCmodes, or for example comparisons for interval, that | |
324 dominate even when operands are not equivalent. */ | |
325 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
326 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
327 return NULL; | |
328 | |
329 /* Short circuit cases where block B contains some side effects, as we can't | |
330 safely bypass it. */ | |
331 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)); | |
332 insn = NEXT_INSN (insn)) | |
333 if (INSN_P (insn) && side_effects_p (PATTERN (insn))) | |
334 { | |
335 b->flags |= BB_NONTHREADABLE_BLOCK; | |
336 return NULL; | |
337 } | |
338 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
339 cselib_init (0); |
0 | 340 |
341 /* First process all values computed in the source basic block. */ | |
342 for (insn = NEXT_INSN (BB_HEAD (e->src)); | |
343 insn != NEXT_INSN (BB_END (e->src)); | |
344 insn = NEXT_INSN (insn)) | |
345 if (INSN_P (insn)) | |
346 cselib_process_insn (insn); | |
347 | |
348 nonequal = BITMAP_ALLOC (NULL); | |
349 CLEAR_REG_SET (nonequal); | |
350 | |
351 /* Now assume that we've continued by the edge E to B and continue | |
352 processing as if it were same basic block. | |
353 Our goal is to prove that whole block is an NOOP. */ | |
354 | |
355 for (insn = NEXT_INSN (BB_HEAD (b)); | |
356 insn != NEXT_INSN (BB_END (b)) && !failed; | |
357 insn = NEXT_INSN (insn)) | |
358 { | |
359 if (INSN_P (insn)) | |
360 { | |
361 rtx pat = PATTERN (insn); | |
362 | |
363 if (GET_CODE (pat) == PARALLEL) | |
364 { | |
365 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++) | |
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal); | |
367 } | |
368 else | |
369 failed |= mark_effect (pat, nonequal); | |
370 } | |
371 | |
372 cselib_process_insn (insn); | |
373 } | |
374 | |
375 /* Later we should clear nonequal of dead registers. So far we don't | |
376 have life information in cfg_cleanup. */ | |
377 if (failed) | |
378 { | |
379 b->flags |= BB_NONTHREADABLE_BLOCK; | |
380 goto failed_exit; | |
381 } | |
382 | |
383 /* cond2 must not mention any register that is not equal to the | |
384 former block. */ | |
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal)) | |
386 goto failed_exit; | |
387 | |
388 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi) | |
389 goto failed_exit; | |
390 | |
391 BITMAP_FREE (nonequal); | |
392 cselib_finish (); | |
393 if ((comparison_dominates_p (code1, code2) != 0) | |
394 != (XEXP (SET_SRC (set2), 1) == pc_rtx)) | |
395 return BRANCH_EDGE (b); | |
396 else | |
397 return FALLTHRU_EDGE (b); | |
398 | |
399 failed_exit: | |
400 BITMAP_FREE (nonequal); | |
401 cselib_finish (); | |
402 return NULL; | |
403 } | |
404 | |
405 /* Attempt to forward edges leaving basic block B. | |
406 Return true if successful. */ | |
407 | |
408 static bool | |
409 try_forward_edges (int mode, basic_block b) | |
410 { | |
411 bool changed = false; | |
412 edge_iterator ei; | |
413 edge e, *threaded_edges = NULL; | |
414 | |
415 /* If we are partitioning hot/cold basic blocks, we don't want to | |
416 mess up unconditional or indirect jumps that cross between hot | |
417 and cold sections. | |
418 | |
419 Basic block partitioning may result in some jumps that appear to | |
420 be optimizable (or blocks that appear to be mergeable), but which really | |
421 must be left untouched (they are required to make it safely across | |
422 partition boundaries). See the comments at the top of | |
423 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
424 | |
425 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)) | |
426 return false; | |
427 | |
428 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); ) | |
429 { | |
430 basic_block target, first; | |
431 int counter, goto_locus; | |
432 bool threaded = false; | |
433 int nthreaded_edges = 0; | |
434 bool may_thread = first_pass | df_get_bb_dirty (b); | |
435 | |
436 /* Skip complex edges because we don't know how to update them. | |
437 | |
438 Still handle fallthru edges, as we can succeed to forward fallthru | |
439 edge to the same place as the branch edge of conditional branch | |
440 and turn conditional branch to an unconditional branch. */ | |
441 if (e->flags & EDGE_COMPLEX) | |
442 { | |
443 ei_next (&ei); | |
444 continue; | |
445 } | |
446 | |
447 target = first = e->dest; | |
448 counter = NUM_FIXED_BLOCKS; | |
449 goto_locus = e->goto_locus; | |
450 | |
451 /* If we are partitioning hot/cold basic_blocks, we don't want to mess | |
452 up jumps that cross between hot/cold sections. | |
453 | |
454 Basic block partitioning may result in some jumps that appear | |
455 to be optimizable (or blocks that appear to be mergeable), but which | |
456 really must be left untouched (they are required to make it safely | |
457 across partition boundaries). See the comments at the top of | |
458 bb-reorder.c:partition_hot_cold_basic_blocks for complete | |
459 details. */ | |
460 | |
461 if (first != EXIT_BLOCK_PTR | |
462 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX)) | |
463 return false; | |
464 | |
465 while (counter < n_basic_blocks) | |
466 { | |
467 basic_block new_target = NULL; | |
468 bool new_target_threaded = false; | |
469 may_thread |= df_get_bb_dirty (target); | |
470 | |
471 if (FORWARDER_BLOCK_P (target) | |
472 && !(single_succ_edge (target)->flags & EDGE_CROSSING) | |
473 && single_succ (target) != EXIT_BLOCK_PTR) | |
474 { | |
475 /* Bypass trivial infinite loops. */ | |
476 new_target = single_succ (target); | |
477 if (target == new_target) | |
478 counter = n_basic_blocks; | |
479 else if (!optimize) | |
480 { | |
481 /* When not optimizing, ensure that edges or forwarder | |
482 blocks with different locus are not optimized out. */ | |
483 int locus = single_succ_edge (target)->goto_locus; | |
484 | |
485 if (locus && goto_locus && !locator_eq (locus, goto_locus)) | |
486 counter = n_basic_blocks; | |
487 else if (locus) | |
488 goto_locus = locus; | |
489 | |
490 if (INSN_P (BB_END (target))) | |
491 { | |
492 locus = INSN_LOCATOR (BB_END (target)); | |
493 | |
494 if (locus && goto_locus | |
495 && !locator_eq (locus, goto_locus)) | |
496 counter = n_basic_blocks; | |
497 else if (locus) | |
498 goto_locus = locus; | |
499 } | |
500 } | |
501 } | |
502 | |
503 /* Allow to thread only over one edge at time to simplify updating | |
504 of probabilities. */ | |
505 else if ((mode & CLEANUP_THREADING) && may_thread) | |
506 { | |
507 edge t = thread_jump (e, target); | |
508 if (t) | |
509 { | |
510 if (!threaded_edges) | |
511 threaded_edges = XNEWVEC (edge, n_basic_blocks); | |
512 else | |
513 { | |
514 int i; | |
515 | |
516 /* Detect an infinite loop across blocks not | |
517 including the start block. */ | |
518 for (i = 0; i < nthreaded_edges; ++i) | |
519 if (threaded_edges[i] == t) | |
520 break; | |
521 if (i < nthreaded_edges) | |
522 { | |
523 counter = n_basic_blocks; | |
524 break; | |
525 } | |
526 } | |
527 | |
528 /* Detect an infinite loop across the start block. */ | |
529 if (t->dest == b) | |
530 break; | |
531 | |
532 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS); | |
533 threaded_edges[nthreaded_edges++] = t; | |
534 | |
535 new_target = t->dest; | |
536 new_target_threaded = true; | |
537 } | |
538 } | |
539 | |
540 if (!new_target) | |
541 break; | |
542 | |
543 counter++; | |
544 target = new_target; | |
545 threaded |= new_target_threaded; | |
546 } | |
547 | |
548 if (counter >= n_basic_blocks) | |
549 { | |
550 if (dump_file) | |
551 fprintf (dump_file, "Infinite loop in BB %i.\n", | |
552 target->index); | |
553 } | |
554 else if (target == first) | |
555 ; /* We didn't do anything. */ | |
556 else | |
557 { | |
558 /* Save the values now, as the edge may get removed. */ | |
559 gcov_type edge_count = e->count; | |
560 int edge_probability = e->probability; | |
561 int edge_frequency; | |
562 int n = 0; | |
563 | |
564 e->goto_locus = goto_locus; | |
565 | |
566 /* Don't force if target is exit block. */ | |
567 if (threaded && target != EXIT_BLOCK_PTR) | |
568 { | |
569 notice_new_block (redirect_edge_and_branch_force (e, target)); | |
570 if (dump_file) | |
571 fprintf (dump_file, "Conditionals threaded.\n"); | |
572 } | |
573 else if (!redirect_edge_and_branch (e, target)) | |
574 { | |
575 if (dump_file) | |
576 fprintf (dump_file, | |
577 "Forwarding edge %i->%i to %i failed.\n", | |
578 b->index, e->dest->index, target->index); | |
579 ei_next (&ei); | |
580 continue; | |
581 } | |
582 | |
583 /* We successfully forwarded the edge. Now update profile | |
584 data: for each edge we traversed in the chain, remove | |
585 the original edge's execution count. */ | |
586 edge_frequency = ((edge_probability * b->frequency | |
587 + REG_BR_PROB_BASE / 2) | |
588 / REG_BR_PROB_BASE); | |
589 | |
590 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b)) | |
591 b->flags |= BB_FORWARDER_BLOCK; | |
592 | |
593 do | |
594 { | |
595 edge t; | |
596 | |
597 if (!single_succ_p (first)) | |
598 { | |
599 gcc_assert (n < nthreaded_edges); | |
600 t = threaded_edges [n++]; | |
601 gcc_assert (t->src == first); | |
602 update_bb_profile_for_threading (first, edge_frequency, | |
603 edge_count, t); | |
604 update_br_prob_note (first); | |
605 } | |
606 else | |
607 { | |
608 first->count -= edge_count; | |
609 if (first->count < 0) | |
610 first->count = 0; | |
611 first->frequency -= edge_frequency; | |
612 if (first->frequency < 0) | |
613 first->frequency = 0; | |
614 /* It is possible that as the result of | |
615 threading we've removed edge as it is | |
616 threaded to the fallthru edge. Avoid | |
617 getting out of sync. */ | |
618 if (n < nthreaded_edges | |
619 && first == threaded_edges [n]->src) | |
620 n++; | |
621 t = single_succ_edge (first); | |
622 } | |
623 | |
624 t->count -= edge_count; | |
625 if (t->count < 0) | |
626 t->count = 0; | |
627 first = t->dest; | |
628 } | |
629 while (first != target); | |
630 | |
631 changed = true; | |
632 continue; | |
633 } | |
634 ei_next (&ei); | |
635 } | |
636 | |
637 if (threaded_edges) | |
638 free (threaded_edges); | |
639 return changed; | |
640 } | |
641 | |
642 | |
643 /* Blocks A and B are to be merged into a single block. A has no incoming | |
644 fallthru edge, so it can be moved before B without adding or modifying | |
645 any jumps (aside from the jump from A to B). */ | |
646 | |
647 static void | |
648 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b) | |
649 { | |
650 rtx barrier; | |
651 | |
652 /* If we are partitioning hot/cold basic blocks, we don't want to | |
653 mess up unconditional or indirect jumps that cross between hot | |
654 and cold sections. | |
655 | |
656 Basic block partitioning may result in some jumps that appear to | |
657 be optimizable (or blocks that appear to be mergeable), but which really | |
658 must be left untouched (they are required to make it safely across | |
659 partition boundaries). See the comments at the top of | |
660 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
661 | |
662 if (BB_PARTITION (a) != BB_PARTITION (b)) | |
663 return; | |
664 | |
665 barrier = next_nonnote_insn (BB_END (a)); | |
666 gcc_assert (BARRIER_P (barrier)); | |
667 delete_insn (barrier); | |
668 | |
669 /* Scramble the insn chain. */ | |
670 if (BB_END (a) != PREV_INSN (BB_HEAD (b))) | |
671 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b))); | |
672 df_set_bb_dirty (a); | |
673 | |
674 if (dump_file) | |
675 fprintf (dump_file, "Moved block %d before %d and merged.\n", | |
676 a->index, b->index); | |
677 | |
678 /* Swap the records for the two blocks around. */ | |
679 | |
680 unlink_block (a); | |
681 link_block (a, b->prev_bb); | |
682 | |
683 /* Now blocks A and B are contiguous. Merge them. */ | |
684 merge_blocks (a, b); | |
685 } | |
686 | |
687 /* Blocks A and B are to be merged into a single block. B has no outgoing | |
688 fallthru edge, so it can be moved after A without adding or modifying | |
689 any jumps (aside from the jump from A to B). */ | |
690 | |
691 static void | |
692 merge_blocks_move_successor_nojumps (basic_block a, basic_block b) | |
693 { | |
694 rtx barrier, real_b_end; | |
695 rtx label, table; | |
696 | |
697 /* If we are partitioning hot/cold basic blocks, we don't want to | |
698 mess up unconditional or indirect jumps that cross between hot | |
699 and cold sections. | |
700 | |
701 Basic block partitioning may result in some jumps that appear to | |
702 be optimizable (or blocks that appear to be mergeable), but which really | |
703 must be left untouched (they are required to make it safely across | |
704 partition boundaries). See the comments at the top of | |
705 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
706 | |
707 if (BB_PARTITION (a) != BB_PARTITION (b)) | |
708 return; | |
709 | |
710 real_b_end = BB_END (b); | |
711 | |
712 /* If there is a jump table following block B temporarily add the jump table | |
713 to block B so that it will also be moved to the correct location. */ | |
714 if (tablejump_p (BB_END (b), &label, &table) | |
715 && prev_active_insn (label) == BB_END (b)) | |
716 { | |
717 BB_END (b) = table; | |
718 } | |
719 | |
720 /* There had better have been a barrier there. Delete it. */ | |
721 barrier = NEXT_INSN (BB_END (b)); | |
722 if (barrier && BARRIER_P (barrier)) | |
723 delete_insn (barrier); | |
724 | |
725 | |
726 /* Scramble the insn chain. */ | |
727 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a)); | |
728 | |
729 /* Restore the real end of b. */ | |
730 BB_END (b) = real_b_end; | |
731 | |
732 if (dump_file) | |
733 fprintf (dump_file, "Moved block %d after %d and merged.\n", | |
734 b->index, a->index); | |
735 | |
736 /* Now blocks A and B are contiguous. Merge them. */ | |
737 merge_blocks (a, b); | |
738 } | |
739 | |
740 /* Attempt to merge basic blocks that are potentially non-adjacent. | |
741 Return NULL iff the attempt failed, otherwise return basic block | |
742 where cleanup_cfg should continue. Because the merging commonly | |
743 moves basic block away or introduces another optimization | |
744 possibility, return basic block just before B so cleanup_cfg don't | |
745 need to iterate. | |
746 | |
747 It may be good idea to return basic block before C in the case | |
748 C has been moved after B and originally appeared earlier in the | |
749 insn sequence, but we have no information available about the | |
750 relative ordering of these two. Hopefully it is not too common. */ | |
751 | |
752 static basic_block | |
753 merge_blocks_move (edge e, basic_block b, basic_block c, int mode) | |
754 { | |
755 basic_block next; | |
756 | |
757 /* If we are partitioning hot/cold basic blocks, we don't want to | |
758 mess up unconditional or indirect jumps that cross between hot | |
759 and cold sections. | |
760 | |
761 Basic block partitioning may result in some jumps that appear to | |
762 be optimizable (or blocks that appear to be mergeable), but which really | |
763 must be left untouched (they are required to make it safely across | |
764 partition boundaries). See the comments at the top of | |
765 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
766 | |
767 if (BB_PARTITION (b) != BB_PARTITION (c)) | |
768 return NULL; | |
769 | |
770 /* If B has a fallthru edge to C, no need to move anything. */ | |
771 if (e->flags & EDGE_FALLTHRU) | |
772 { | |
773 int b_index = b->index, c_index = c->index; | |
774 merge_blocks (b, c); | |
775 update_forwarder_flag (b); | |
776 | |
777 if (dump_file) | |
778 fprintf (dump_file, "Merged %d and %d without moving.\n", | |
779 b_index, c_index); | |
780 | |
781 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb; | |
782 } | |
783 | |
784 /* Otherwise we will need to move code around. Do that only if expensive | |
785 transformations are allowed. */ | |
786 else if (mode & CLEANUP_EXPENSIVE) | |
787 { | |
788 edge tmp_edge, b_fallthru_edge; | |
789 bool c_has_outgoing_fallthru; | |
790 bool b_has_incoming_fallthru; | |
791 edge_iterator ei; | |
792 | |
793 /* Avoid overactive code motion, as the forwarder blocks should be | |
794 eliminated by edge redirection instead. One exception might have | |
795 been if B is a forwarder block and C has no fallthru edge, but | |
796 that should be cleaned up by bb-reorder instead. */ | |
797 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c)) | |
798 return NULL; | |
799 | |
800 /* We must make sure to not munge nesting of lexical blocks, | |
801 and loop notes. This is done by squeezing out all the notes | |
802 and leaving them there to lie. Not ideal, but functional. */ | |
803 | |
804 FOR_EACH_EDGE (tmp_edge, ei, c->succs) | |
805 if (tmp_edge->flags & EDGE_FALLTHRU) | |
806 break; | |
807 | |
808 c_has_outgoing_fallthru = (tmp_edge != NULL); | |
809 | |
810 FOR_EACH_EDGE (tmp_edge, ei, b->preds) | |
811 if (tmp_edge->flags & EDGE_FALLTHRU) | |
812 break; | |
813 | |
814 b_has_incoming_fallthru = (tmp_edge != NULL); | |
815 b_fallthru_edge = tmp_edge; | |
816 next = b->prev_bb; | |
817 if (next == c) | |
818 next = next->prev_bb; | |
819 | |
820 /* Otherwise, we're going to try to move C after B. If C does | |
821 not have an outgoing fallthru, then it can be moved | |
822 immediately after B without introducing or modifying jumps. */ | |
823 if (! c_has_outgoing_fallthru) | |
824 { | |
825 merge_blocks_move_successor_nojumps (b, c); | |
826 return next == ENTRY_BLOCK_PTR ? next->next_bb : next; | |
827 } | |
828 | |
829 /* If B does not have an incoming fallthru, then it can be moved | |
830 immediately before C without introducing or modifying jumps. | |
831 C cannot be the first block, so we do not have to worry about | |
832 accessing a non-existent block. */ | |
833 | |
834 if (b_has_incoming_fallthru) | |
835 { | |
836 basic_block bb; | |
837 | |
838 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR) | |
839 return NULL; | |
840 bb = force_nonfallthru (b_fallthru_edge); | |
841 if (bb) | |
842 notice_new_block (bb); | |
843 } | |
844 | |
845 merge_blocks_move_predecessor_nojumps (b, c); | |
846 return next == ENTRY_BLOCK_PTR ? next->next_bb : next; | |
847 } | |
848 | |
849 return NULL; | |
850 } | |
851 | |
852 | |
853 /* Removes the memory attributes of MEM expression | |
854 if they are not equal. */ | |
855 | |
856 void | |
857 merge_memattrs (rtx x, rtx y) | |
858 { | |
859 int i; | |
860 int j; | |
861 enum rtx_code code; | |
862 const char *fmt; | |
863 | |
864 if (x == y) | |
865 return; | |
866 if (x == 0 || y == 0) | |
867 return; | |
868 | |
869 code = GET_CODE (x); | |
870 | |
871 if (code != GET_CODE (y)) | |
872 return; | |
873 | |
874 if (GET_MODE (x) != GET_MODE (y)) | |
875 return; | |
876 | |
877 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y)) | |
878 { | |
879 if (! MEM_ATTRS (x)) | |
880 MEM_ATTRS (y) = 0; | |
881 else if (! MEM_ATTRS (y)) | |
882 MEM_ATTRS (x) = 0; | |
883 else | |
884 { | |
885 rtx mem_size; | |
886 | |
887 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) | |
888 { | |
889 set_mem_alias_set (x, 0); | |
890 set_mem_alias_set (y, 0); | |
891 } | |
892 | |
893 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y))) | |
894 { | |
895 set_mem_expr (x, 0); | |
896 set_mem_expr (y, 0); | |
897 set_mem_offset (x, 0); | |
898 set_mem_offset (y, 0); | |
899 } | |
900 else if (MEM_OFFSET (x) != MEM_OFFSET (y)) | |
901 { | |
902 set_mem_offset (x, 0); | |
903 set_mem_offset (y, 0); | |
904 } | |
905 | |
906 if (!MEM_SIZE (x)) | |
907 mem_size = NULL_RTX; | |
908 else if (!MEM_SIZE (y)) | |
909 mem_size = NULL_RTX; | |
910 else | |
911 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)), | |
912 INTVAL (MEM_SIZE (y)))); | |
913 set_mem_size (x, mem_size); | |
914 set_mem_size (y, mem_size); | |
915 | |
916 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y))); | |
917 set_mem_align (y, MEM_ALIGN (x)); | |
918 } | |
919 } | |
920 | |
921 fmt = GET_RTX_FORMAT (code); | |
922 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
923 { | |
924 switch (fmt[i]) | |
925 { | |
926 case 'E': | |
927 /* Two vectors must have the same length. */ | |
928 if (XVECLEN (x, i) != XVECLEN (y, i)) | |
929 return; | |
930 | |
931 for (j = 0; j < XVECLEN (x, i); j++) | |
932 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j)); | |
933 | |
934 break; | |
935 | |
936 case 'e': | |
937 merge_memattrs (XEXP (x, i), XEXP (y, i)); | |
938 } | |
939 } | |
940 return; | |
941 } | |
942 | |
943 | |
944 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */ | |
945 | |
946 static bool | |
947 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2) | |
948 { | |
949 rtx p1, p2; | |
950 | |
951 /* Verify that I1 and I2 are equivalent. */ | |
952 if (GET_CODE (i1) != GET_CODE (i2)) | |
953 return false; | |
954 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
955 /* __builtin_unreachable() may lead to empty blocks (ending with |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
956 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
957 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
958 return true; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
959 |
0 | 960 p1 = PATTERN (i1); |
961 p2 = PATTERN (i2); | |
962 | |
963 if (GET_CODE (p1) != GET_CODE (p2)) | |
964 return false; | |
965 | |
966 /* If this is a CALL_INSN, compare register usage information. | |
967 If we don't check this on stack register machines, the two | |
968 CALL_INSNs might be merged leaving reg-stack.c with mismatching | |
969 numbers of stack registers in the same basic block. | |
970 If we don't check this on machines with delay slots, a delay slot may | |
971 be filled that clobbers a parameter expected by the subroutine. | |
972 | |
973 ??? We take the simple route for now and assume that if they're | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
974 equal, they were constructed identically. |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
975 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
976 Also check for identical exception regions. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
977 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
978 if (CALL_P (i1)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
979 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
980 /* Ensure the same EH region. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
981 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
982 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0); |
0 | 983 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
984 if (!n1 && n2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
985 return false; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
986 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
987 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
988 return false; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
989 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
990 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), |
0 | 991 CALL_INSN_FUNCTION_USAGE (i2)) |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
992 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
993 return false; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
994 } |
0 | 995 |
996 #ifdef STACK_REGS | |
997 /* If cross_jump_death_matters is not 0, the insn's mode | |
998 indicates whether or not the insn contains any stack-like | |
999 regs. */ | |
1000 | |
1001 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) | |
1002 { | |
1003 /* If register stack conversion has already been done, then | |
1004 death notes must also be compared before it is certain that | |
1005 the two instruction streams match. */ | |
1006 | |
1007 rtx note; | |
1008 HARD_REG_SET i1_regset, i2_regset; | |
1009 | |
1010 CLEAR_HARD_REG_SET (i1_regset); | |
1011 CLEAR_HARD_REG_SET (i2_regset); | |
1012 | |
1013 for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) | |
1014 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1015 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); | |
1016 | |
1017 for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) | |
1018 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1019 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); | |
1020 | |
1021 if (!hard_reg_set_equal_p (i1_regset, i2_regset)) | |
1022 return false; | |
1023 } | |
1024 #endif | |
1025 | |
1026 if (reload_completed | |
1027 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2)) | |
1028 return true; | |
1029 | |
1030 return false; | |
1031 } | |
1032 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1033 /* When comparing insns I1 and I2 in flow_find_cross_jump or |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1034 flow_find_head_matching_sequence, ensure the notes match. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1035 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1036 static void |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1037 merge_notes (rtx i1, rtx i2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1038 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1039 /* If the merged insns have different REG_EQUAL notes, then |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1040 remove them. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1041 rtx equiv1 = find_reg_equal_equiv_note (i1); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1042 rtx equiv2 = find_reg_equal_equiv_note (i2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1043 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1044 if (equiv1 && !equiv2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1045 remove_note (i1, equiv1); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1046 else if (!equiv1 && equiv2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1047 remove_note (i2, equiv2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1048 else if (equiv1 && equiv2 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1049 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1050 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1051 remove_note (i1, equiv1); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1052 remove_note (i2, equiv2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1053 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1054 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1055 |
0 | 1056 /* Look through the insns at the end of BB1 and BB2 and find the longest |
1057 sequence that are equivalent. Store the first insns for that sequence | |
1058 in *F1 and *F2 and return the sequence length. | |
1059 | |
1060 To simplify callers of this function, if the blocks match exactly, | |
1061 store the head of the blocks in *F1 and *F2. */ | |
1062 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1063 int |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1064 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2) |
0 | 1065 { |
1066 rtx i1, i2, last1, last2, afterlast1, afterlast2; | |
1067 int ninsns = 0; | |
1068 | |
1069 /* Skip simple jumps at the end of the blocks. Complex jumps still | |
1070 need to be compared for equivalence, which we'll do below. */ | |
1071 | |
1072 i1 = BB_END (bb1); | |
1073 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX; | |
1074 if (onlyjump_p (i1) | |
1075 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) | |
1076 { | |
1077 last1 = i1; | |
1078 i1 = PREV_INSN (i1); | |
1079 } | |
1080 | |
1081 i2 = BB_END (bb2); | |
1082 if (onlyjump_p (i2) | |
1083 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) | |
1084 { | |
1085 last2 = i2; | |
1086 /* Count everything except for unconditional jump as insn. */ | |
1087 if (!simplejump_p (i2) && !returnjump_p (i2) && last1) | |
1088 ninsns++; | |
1089 i2 = PREV_INSN (i2); | |
1090 } | |
1091 | |
1092 while (true) | |
1093 { | |
1094 /* Ignore notes. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1095 while (!NONDEBUG_INSN_P (i1) && i1 != BB_HEAD (bb1)) |
0 | 1096 i1 = PREV_INSN (i1); |
1097 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1098 while (!NONDEBUG_INSN_P (i2) && i2 != BB_HEAD (bb2)) |
0 | 1099 i2 = PREV_INSN (i2); |
1100 | |
1101 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2)) | |
1102 break; | |
1103 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1104 if (!old_insns_match_p (0, i1, i2)) |
0 | 1105 break; |
1106 | |
1107 merge_memattrs (i1, i2); | |
1108 | |
1109 /* Don't begin a cross-jump with a NOTE insn. */ | |
1110 if (INSN_P (i1)) | |
1111 { | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1112 merge_notes (i1, i2); |
0 | 1113 |
1114 afterlast1 = last1, afterlast2 = last2; | |
1115 last1 = i1, last2 = i2; | |
1116 ninsns++; | |
1117 } | |
1118 | |
1119 i1 = PREV_INSN (i1); | |
1120 i2 = PREV_INSN (i2); | |
1121 } | |
1122 | |
1123 #ifdef HAVE_cc0 | |
1124 /* Don't allow the insn after a compare to be shared by | |
1125 cross-jumping unless the compare is also shared. */ | |
1126 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) | |
1127 last1 = afterlast1, last2 = afterlast2, ninsns--; | |
1128 #endif | |
1129 | |
1130 /* Include preceding notes and labels in the cross-jump. One, | |
1131 this may bring us to the head of the blocks as requested above. | |
1132 Two, it keeps line number notes as matched as may be. */ | |
1133 if (ninsns) | |
1134 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1135 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1))) |
0 | 1136 last1 = PREV_INSN (last1); |
1137 | |
1138 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1))) | |
1139 last1 = PREV_INSN (last1); | |
1140 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1141 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2))) |
0 | 1142 last2 = PREV_INSN (last2); |
1143 | |
1144 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2))) | |
1145 last2 = PREV_INSN (last2); | |
1146 | |
1147 *f1 = last1; | |
1148 *f2 = last2; | |
1149 } | |
1150 | |
1151 return ninsns; | |
1152 } | |
1153 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1154 /* Like flow_find_cross_jump, except start looking for a matching sequence from |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1155 the head of the two blocks. Do not include jumps at the end. |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1156 If STOP_AFTER is nonzero, stop after finding that many matching |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1157 instructions. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1158 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1159 int |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1160 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1, |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1161 rtx *f2, int stop_after) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1162 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1163 rtx i1, i2, last1, last2, beforelast1, beforelast2; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1164 int ninsns = 0; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1165 edge e; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1166 edge_iterator ei; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1167 int nehedges1 = 0, nehedges2 = 0; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1168 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1169 FOR_EACH_EDGE (e, ei, bb1->succs) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1170 if (e->flags & EDGE_EH) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1171 nehedges1++; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1172 FOR_EACH_EDGE (e, ei, bb2->succs) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1173 if (e->flags & EDGE_EH) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1174 nehedges2++; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1175 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1176 i1 = BB_HEAD (bb1); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1177 i2 = BB_HEAD (bb2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1178 last1 = beforelast1 = last2 = beforelast2 = NULL_RTX; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1179 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1180 while (true) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1181 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1182 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1183 /* Ignore notes. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1184 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1185 i1 = NEXT_INSN (i1); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1186 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1187 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1188 i2 = NEXT_INSN (i2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1189 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1190 if (NOTE_P (i1) || NOTE_P (i2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1191 || JUMP_P (i1) || JUMP_P (i2)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1192 break; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1193 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1194 /* A sanity check to make sure we're not merging insns with different |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1195 effects on EH. If only one of them ends a basic block, it shouldn't |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1196 have an EH edge; if both end a basic block, there should be the same |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1197 number of EH edges. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1198 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1199 && nehedges1 > 0) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1200 || (i2 == BB_END (bb2) && i1 != BB_END (bb1) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1201 && nehedges2 > 0) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1202 || (i1 == BB_END (bb1) && i2 == BB_END (bb2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1203 && nehedges1 != nehedges2)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1204 break; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1205 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1206 if (!old_insns_match_p (0, i1, i2)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1207 break; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1208 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1209 merge_memattrs (i1, i2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1210 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1211 /* Don't begin a cross-jump with a NOTE insn. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1212 if (INSN_P (i1)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1213 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1214 merge_notes (i1, i2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1215 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1216 beforelast1 = last1, beforelast2 = last2; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1217 last1 = i1, last2 = i2; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1218 ninsns++; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1219 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1220 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1221 if (i1 == BB_END (bb1) || i2 == BB_END (bb2) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1222 || (stop_after > 0 && ninsns == stop_after)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1223 break; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1224 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1225 i1 = NEXT_INSN (i1); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1226 i2 = NEXT_INSN (i2); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1227 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1228 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1229 #ifdef HAVE_cc0 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1230 /* Don't allow a compare to be shared by cross-jumping unless the insn |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1231 after the compare is also shared. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1232 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1233 last1 = beforelast1, last2 = beforelast2, ninsns--; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1234 #endif |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1235 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1236 if (ninsns) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1237 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1238 *f1 = last1; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1239 *f2 = last2; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1240 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1241 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1242 return ninsns; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1243 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1244 |
0 | 1245 /* Return true iff outgoing edges of BB1 and BB2 match, together with |
1246 the branch instruction. This means that if we commonize the control | |
1247 flow before end of the basic block, the semantic remains unchanged. | |
1248 | |
1249 We may assume that there exists one edge with a common destination. */ | |
1250 | |
1251 static bool | |
1252 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) | |
1253 { | |
1254 int nehedges1 = 0, nehedges2 = 0; | |
1255 edge fallthru1 = 0, fallthru2 = 0; | |
1256 edge e1, e2; | |
1257 edge_iterator ei; | |
1258 | |
1259 /* If BB1 has only one successor, we may be looking at either an | |
1260 unconditional jump, or a fake edge to exit. */ | |
1261 if (single_succ_p (bb1) | |
1262 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
1263 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1)))) | |
1264 return (single_succ_p (bb2) | |
1265 && (single_succ_edge (bb2)->flags | |
1266 & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
1267 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2)))); | |
1268 | |
1269 /* Match conditional jumps - this may get tricky when fallthru and branch | |
1270 edges are crossed. */ | |
1271 if (EDGE_COUNT (bb1->succs) == 2 | |
1272 && any_condjump_p (BB_END (bb1)) | |
1273 && onlyjump_p (BB_END (bb1))) | |
1274 { | |
1275 edge b1, f1, b2, f2; | |
1276 bool reverse, match; | |
1277 rtx set1, set2, cond1, cond2; | |
1278 enum rtx_code code1, code2; | |
1279 | |
1280 if (EDGE_COUNT (bb2->succs) != 2 | |
1281 || !any_condjump_p (BB_END (bb2)) | |
1282 || !onlyjump_p (BB_END (bb2))) | |
1283 return false; | |
1284 | |
1285 b1 = BRANCH_EDGE (bb1); | |
1286 b2 = BRANCH_EDGE (bb2); | |
1287 f1 = FALLTHRU_EDGE (bb1); | |
1288 f2 = FALLTHRU_EDGE (bb2); | |
1289 | |
1290 /* Get around possible forwarders on fallthru edges. Other cases | |
1291 should be optimized out already. */ | |
1292 if (FORWARDER_BLOCK_P (f1->dest)) | |
1293 f1 = single_succ_edge (f1->dest); | |
1294 | |
1295 if (FORWARDER_BLOCK_P (f2->dest)) | |
1296 f2 = single_succ_edge (f2->dest); | |
1297 | |
1298 /* To simplify use of this function, return false if there are | |
1299 unneeded forwarder blocks. These will get eliminated later | |
1300 during cleanup_cfg. */ | |
1301 if (FORWARDER_BLOCK_P (f1->dest) | |
1302 || FORWARDER_BLOCK_P (f2->dest) | |
1303 || FORWARDER_BLOCK_P (b1->dest) | |
1304 || FORWARDER_BLOCK_P (b2->dest)) | |
1305 return false; | |
1306 | |
1307 if (f1->dest == f2->dest && b1->dest == b2->dest) | |
1308 reverse = false; | |
1309 else if (f1->dest == b2->dest && b1->dest == f2->dest) | |
1310 reverse = true; | |
1311 else | |
1312 return false; | |
1313 | |
1314 set1 = pc_set (BB_END (bb1)); | |
1315 set2 = pc_set (BB_END (bb2)); | |
1316 if ((XEXP (SET_SRC (set1), 1) == pc_rtx) | |
1317 != (XEXP (SET_SRC (set2), 1) == pc_rtx)) | |
1318 reverse = !reverse; | |
1319 | |
1320 cond1 = XEXP (SET_SRC (set1), 0); | |
1321 cond2 = XEXP (SET_SRC (set2), 0); | |
1322 code1 = GET_CODE (cond1); | |
1323 if (reverse) | |
1324 code2 = reversed_comparison_code (cond2, BB_END (bb2)); | |
1325 else | |
1326 code2 = GET_CODE (cond2); | |
1327 | |
1328 if (code2 == UNKNOWN) | |
1329 return false; | |
1330 | |
1331 /* Verify codes and operands match. */ | |
1332 match = ((code1 == code2 | |
1333 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
1334 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
1335 || (code1 == swap_condition (code2) | |
1336 && rtx_renumbered_equal_p (XEXP (cond1, 1), | |
1337 XEXP (cond2, 0)) | |
1338 && rtx_renumbered_equal_p (XEXP (cond1, 0), | |
1339 XEXP (cond2, 1)))); | |
1340 | |
1341 /* If we return true, we will join the blocks. Which means that | |
1342 we will only have one branch prediction bit to work with. Thus | |
1343 we require the existing branches to have probabilities that are | |
1344 roughly similar. */ | |
1345 if (match | |
1346 && optimize_bb_for_speed_p (bb1) | |
1347 && optimize_bb_for_speed_p (bb2)) | |
1348 { | |
1349 int prob2; | |
1350 | |
1351 if (b1->dest == b2->dest) | |
1352 prob2 = b2->probability; | |
1353 else | |
1354 /* Do not use f2 probability as f2 may be forwarded. */ | |
1355 prob2 = REG_BR_PROB_BASE - b2->probability; | |
1356 | |
1357 /* Fail if the difference in probabilities is greater than 50%. | |
1358 This rules out two well-predicted branches with opposite | |
1359 outcomes. */ | |
1360 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2) | |
1361 { | |
1362 if (dump_file) | |
1363 fprintf (dump_file, | |
1364 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n", | |
1365 bb1->index, bb2->index, b1->probability, prob2); | |
1366 | |
1367 return false; | |
1368 } | |
1369 } | |
1370 | |
1371 if (dump_file && match) | |
1372 fprintf (dump_file, "Conditionals in bb %i and %i match.\n", | |
1373 bb1->index, bb2->index); | |
1374 | |
1375 return match; | |
1376 } | |
1377 | |
1378 /* Generic case - we are seeing a computed jump, table jump or trapping | |
1379 instruction. */ | |
1380 | |
1381 /* Check whether there are tablejumps in the end of BB1 and BB2. | |
1382 Return true if they are identical. */ | |
1383 { | |
1384 rtx label1, label2; | |
1385 rtx table1, table2; | |
1386 | |
1387 if (tablejump_p (BB_END (bb1), &label1, &table1) | |
1388 && tablejump_p (BB_END (bb2), &label2, &table2) | |
1389 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2))) | |
1390 { | |
1391 /* The labels should never be the same rtx. If they really are same | |
1392 the jump tables are same too. So disable crossjumping of blocks BB1 | |
1393 and BB2 because when deleting the common insns in the end of BB1 | |
1394 by delete_basic_block () the jump table would be deleted too. */ | |
1395 /* If LABEL2 is referenced in BB1->END do not do anything | |
1396 because we would loose information when replacing | |
1397 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */ | |
1398 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1))) | |
1399 { | |
1400 /* Set IDENTICAL to true when the tables are identical. */ | |
1401 bool identical = false; | |
1402 rtx p1, p2; | |
1403 | |
1404 p1 = PATTERN (table1); | |
1405 p2 = PATTERN (table2); | |
1406 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2)) | |
1407 { | |
1408 identical = true; | |
1409 } | |
1410 else if (GET_CODE (p1) == ADDR_DIFF_VEC | |
1411 && (XVECLEN (p1, 1) == XVECLEN (p2, 1)) | |
1412 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2)) | |
1413 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3))) | |
1414 { | |
1415 int i; | |
1416 | |
1417 identical = true; | |
1418 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--) | |
1419 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i))) | |
1420 identical = false; | |
1421 } | |
1422 | |
1423 if (identical) | |
1424 { | |
1425 replace_label_data rr; | |
1426 bool match; | |
1427 | |
1428 /* Temporarily replace references to LABEL1 with LABEL2 | |
1429 in BB1->END so that we could compare the instructions. */ | |
1430 rr.r1 = label1; | |
1431 rr.r2 = label2; | |
1432 rr.update_label_nuses = false; | |
1433 for_each_rtx (&BB_END (bb1), replace_label, &rr); | |
1434 | |
1435 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)); | |
1436 if (dump_file && match) | |
1437 fprintf (dump_file, | |
1438 "Tablejumps in bb %i and %i match.\n", | |
1439 bb1->index, bb2->index); | |
1440 | |
1441 /* Set the original label in BB1->END because when deleting | |
1442 a block whose end is a tablejump, the tablejump referenced | |
1443 from the instruction is deleted too. */ | |
1444 rr.r1 = label2; | |
1445 rr.r2 = label1; | |
1446 for_each_rtx (&BB_END (bb1), replace_label, &rr); | |
1447 | |
1448 return match; | |
1449 } | |
1450 } | |
1451 return false; | |
1452 } | |
1453 } | |
1454 | |
1455 /* First ensure that the instructions match. There may be many outgoing | |
1456 edges so this test is generally cheaper. */ | |
1457 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))) | |
1458 return false; | |
1459 | |
1460 /* Search the outgoing edges, ensure that the counts do match, find possible | |
1461 fallthru and exception handling edges since these needs more | |
1462 validation. */ | |
1463 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) | |
1464 return false; | |
1465 | |
1466 FOR_EACH_EDGE (e1, ei, bb1->succs) | |
1467 { | |
1468 e2 = EDGE_SUCC (bb2, ei.index); | |
1469 | |
1470 if (e1->flags & EDGE_EH) | |
1471 nehedges1++; | |
1472 | |
1473 if (e2->flags & EDGE_EH) | |
1474 nehedges2++; | |
1475 | |
1476 if (e1->flags & EDGE_FALLTHRU) | |
1477 fallthru1 = e1; | |
1478 if (e2->flags & EDGE_FALLTHRU) | |
1479 fallthru2 = e2; | |
1480 } | |
1481 | |
1482 /* If number of edges of various types does not match, fail. */ | |
1483 if (nehedges1 != nehedges2 | |
1484 || (fallthru1 != 0) != (fallthru2 != 0)) | |
1485 return false; | |
1486 | |
1487 /* fallthru edges must be forwarded to the same destination. */ | |
1488 if (fallthru1) | |
1489 { | |
1490 basic_block d1 = (forwarder_block_p (fallthru1->dest) | |
1491 ? single_succ (fallthru1->dest): fallthru1->dest); | |
1492 basic_block d2 = (forwarder_block_p (fallthru2->dest) | |
1493 ? single_succ (fallthru2->dest): fallthru2->dest); | |
1494 | |
1495 if (d1 != d2) | |
1496 return false; | |
1497 } | |
1498 | |
1499 /* Ensure the same EH region. */ | |
1500 { | |
1501 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0); | |
1502 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0); | |
1503 | |
1504 if (!n1 && n2) | |
1505 return false; | |
1506 | |
1507 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
1508 return false; | |
1509 } | |
1510 | |
1511 /* The same checks as in try_crossjump_to_edge. It is required for RTL | |
1512 version of sequence abstraction. */ | |
1513 FOR_EACH_EDGE (e1, ei, bb2->succs) | |
1514 { | |
1515 edge e2; | |
1516 edge_iterator ei; | |
1517 basic_block d1 = e1->dest; | |
1518 | |
1519 if (FORWARDER_BLOCK_P (d1)) | |
1520 d1 = EDGE_SUCC (d1, 0)->dest; | |
1521 | |
1522 FOR_EACH_EDGE (e2, ei, bb1->succs) | |
1523 { | |
1524 basic_block d2 = e2->dest; | |
1525 if (FORWARDER_BLOCK_P (d2)) | |
1526 d2 = EDGE_SUCC (d2, 0)->dest; | |
1527 if (d1 == d2) | |
1528 break; | |
1529 } | |
1530 | |
1531 if (!e2) | |
1532 return false; | |
1533 } | |
1534 | |
1535 return true; | |
1536 } | |
1537 | |
1538 /* Returns true if BB basic block has a preserve label. */ | |
1539 | |
1540 static bool | |
1541 block_has_preserve_label (basic_block bb) | |
1542 { | |
1543 return (bb | |
1544 && block_label (bb) | |
1545 && LABEL_PRESERVE_P (block_label (bb))); | |
1546 } | |
1547 | |
1548 /* E1 and E2 are edges with the same destination block. Search their | |
1549 predecessors for common code. If found, redirect control flow from | |
1550 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */ | |
1551 | |
1552 static bool | |
1553 try_crossjump_to_edge (int mode, edge e1, edge e2) | |
1554 { | |
1555 int nmatch; | |
1556 basic_block src1 = e1->src, src2 = e2->src; | |
1557 basic_block redirect_to, redirect_from, to_remove; | |
1558 rtx newpos1, newpos2; | |
1559 edge s; | |
1560 edge_iterator ei; | |
1561 | |
1562 newpos1 = newpos2 = NULL_RTX; | |
1563 | |
1564 /* If we have partitioned hot/cold basic blocks, it is a bad idea | |
1565 to try this optimization. | |
1566 | |
1567 Basic block partitioning may result in some jumps that appear to | |
1568 be optimizable (or blocks that appear to be mergeable), but which really | |
1569 must be left untouched (they are required to make it safely across | |
1570 partition boundaries). See the comments at the top of | |
1571 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
1572 | |
1573 if (flag_reorder_blocks_and_partition && reload_completed) | |
1574 return false; | |
1575 | |
1576 /* Search backward through forwarder blocks. We don't need to worry | |
1577 about multiple entry or chained forwarders, as they will be optimized | |
1578 away. We do this to look past the unconditional jump following a | |
1579 conditional jump that is required due to the current CFG shape. */ | |
1580 if (single_pred_p (src1) | |
1581 && FORWARDER_BLOCK_P (src1)) | |
1582 e1 = single_pred_edge (src1), src1 = e1->src; | |
1583 | |
1584 if (single_pred_p (src2) | |
1585 && FORWARDER_BLOCK_P (src2)) | |
1586 e2 = single_pred_edge (src2), src2 = e2->src; | |
1587 | |
1588 /* Nothing to do if we reach ENTRY, or a common source block. */ | |
1589 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR) | |
1590 return false; | |
1591 if (src1 == src2) | |
1592 return false; | |
1593 | |
1594 /* Seeing more than 1 forwarder blocks would confuse us later... */ | |
1595 if (FORWARDER_BLOCK_P (e1->dest) | |
1596 && FORWARDER_BLOCK_P (single_succ (e1->dest))) | |
1597 return false; | |
1598 | |
1599 if (FORWARDER_BLOCK_P (e2->dest) | |
1600 && FORWARDER_BLOCK_P (single_succ (e2->dest))) | |
1601 return false; | |
1602 | |
1603 /* Likewise with dead code (possibly newly created by the other optimizations | |
1604 of cfg_cleanup). */ | |
1605 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0) | |
1606 return false; | |
1607 | |
1608 /* Look for the common insn sequence, part the first ... */ | |
1609 if (!outgoing_edges_match (mode, src1, src2)) | |
1610 return false; | |
1611 | |
1612 /* ... and part the second. */ | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1613 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2); |
0 | 1614 |
1615 /* Don't proceed with the crossjump unless we found a sufficient number | |
1616 of matching instructions or the 'from' block was totally matched | |
1617 (such that its predecessors will hopefully be redirected and the | |
1618 block removed). */ | |
1619 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS)) | |
1620 && (newpos1 != BB_HEAD (src1))) | |
1621 return false; | |
1622 | |
1623 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */ | |
1624 if (block_has_preserve_label (e1->dest) | |
1625 && (e1->flags & EDGE_ABNORMAL)) | |
1626 return false; | |
1627 | |
1628 /* Here we know that the insns in the end of SRC1 which are common with SRC2 | |
1629 will be deleted. | |
1630 If we have tablejumps in the end of SRC1 and SRC2 | |
1631 they have been already compared for equivalence in outgoing_edges_match () | |
1632 so replace the references to TABLE1 by references to TABLE2. */ | |
1633 { | |
1634 rtx label1, label2; | |
1635 rtx table1, table2; | |
1636 | |
1637 if (tablejump_p (BB_END (src1), &label1, &table1) | |
1638 && tablejump_p (BB_END (src2), &label2, &table2) | |
1639 && label1 != label2) | |
1640 { | |
1641 replace_label_data rr; | |
1642 rtx insn; | |
1643 | |
1644 /* Replace references to LABEL1 with LABEL2. */ | |
1645 rr.r1 = label1; | |
1646 rr.r2 = label2; | |
1647 rr.update_label_nuses = true; | |
1648 for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) | |
1649 { | |
1650 /* Do not replace the label in SRC1->END because when deleting | |
1651 a block whose end is a tablejump, the tablejump referenced | |
1652 from the instruction is deleted too. */ | |
1653 if (insn != BB_END (src1)) | |
1654 for_each_rtx (&insn, replace_label, &rr); | |
1655 } | |
1656 } | |
1657 } | |
1658 | |
1659 /* Avoid splitting if possible. We must always split when SRC2 has | |
1660 EH predecessor edges, or we may end up with basic blocks with both | |
1661 normal and EH predecessor edges. */ | |
1662 if (newpos2 == BB_HEAD (src2) | |
1663 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH)) | |
1664 redirect_to = src2; | |
1665 else | |
1666 { | |
1667 if (newpos2 == BB_HEAD (src2)) | |
1668 { | |
1669 /* Skip possible basic block header. */ | |
1670 if (LABEL_P (newpos2)) | |
1671 newpos2 = NEXT_INSN (newpos2); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1672 while (DEBUG_INSN_P (newpos2)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1673 newpos2 = NEXT_INSN (newpos2); |
0 | 1674 if (NOTE_P (newpos2)) |
1675 newpos2 = NEXT_INSN (newpos2); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1676 while (DEBUG_INSN_P (newpos2)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1677 newpos2 = NEXT_INSN (newpos2); |
0 | 1678 } |
1679 | |
1680 if (dump_file) | |
1681 fprintf (dump_file, "Splitting bb %i before %i insns\n", | |
1682 src2->index, nmatch); | |
1683 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; | |
1684 } | |
1685 | |
1686 if (dump_file) | |
1687 fprintf (dump_file, | |
1688 "Cross jumping from bb %i to bb %i; %i common insns\n", | |
1689 src1->index, src2->index, nmatch); | |
1690 | |
1691 /* We may have some registers visible through the block. */ | |
1692 df_set_bb_dirty (redirect_to); | |
1693 | |
1694 /* Recompute the frequencies and counts of outgoing edges. */ | |
1695 FOR_EACH_EDGE (s, ei, redirect_to->succs) | |
1696 { | |
1697 edge s2; | |
1698 edge_iterator ei; | |
1699 basic_block d = s->dest; | |
1700 | |
1701 if (FORWARDER_BLOCK_P (d)) | |
1702 d = single_succ (d); | |
1703 | |
1704 FOR_EACH_EDGE (s2, ei, src1->succs) | |
1705 { | |
1706 basic_block d2 = s2->dest; | |
1707 if (FORWARDER_BLOCK_P (d2)) | |
1708 d2 = single_succ (d2); | |
1709 if (d == d2) | |
1710 break; | |
1711 } | |
1712 | |
1713 s->count += s2->count; | |
1714 | |
1715 /* Take care to update possible forwarder blocks. We verified | |
1716 that there is no more than one in the chain, so we can't run | |
1717 into infinite loop. */ | |
1718 if (FORWARDER_BLOCK_P (s->dest)) | |
1719 { | |
1720 single_succ_edge (s->dest)->count += s2->count; | |
1721 s->dest->count += s2->count; | |
1722 s->dest->frequency += EDGE_FREQUENCY (s); | |
1723 } | |
1724 | |
1725 if (FORWARDER_BLOCK_P (s2->dest)) | |
1726 { | |
1727 single_succ_edge (s2->dest)->count -= s2->count; | |
1728 if (single_succ_edge (s2->dest)->count < 0) | |
1729 single_succ_edge (s2->dest)->count = 0; | |
1730 s2->dest->count -= s2->count; | |
1731 s2->dest->frequency -= EDGE_FREQUENCY (s); | |
1732 if (s2->dest->frequency < 0) | |
1733 s2->dest->frequency = 0; | |
1734 if (s2->dest->count < 0) | |
1735 s2->dest->count = 0; | |
1736 } | |
1737 | |
1738 if (!redirect_to->frequency && !src1->frequency) | |
1739 s->probability = (s->probability + s2->probability) / 2; | |
1740 else | |
1741 s->probability | |
1742 = ((s->probability * redirect_to->frequency + | |
1743 s2->probability * src1->frequency) | |
1744 / (redirect_to->frequency + src1->frequency)); | |
1745 } | |
1746 | |
1747 /* Adjust count and frequency for the block. An earlier jump | |
1748 threading pass may have left the profile in an inconsistent | |
1749 state (see update_bb_profile_for_threading) so we must be | |
1750 prepared for overflows. */ | |
1751 redirect_to->count += src1->count; | |
1752 redirect_to->frequency += src1->frequency; | |
1753 if (redirect_to->frequency > BB_FREQ_MAX) | |
1754 redirect_to->frequency = BB_FREQ_MAX; | |
1755 update_br_prob_note (redirect_to); | |
1756 | |
1757 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ | |
1758 | |
1759 /* Skip possible basic block header. */ | |
1760 if (LABEL_P (newpos1)) | |
1761 newpos1 = NEXT_INSN (newpos1); | |
1762 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1763 while (DEBUG_INSN_P (newpos1)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1764 newpos1 = NEXT_INSN (newpos1); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1765 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1766 if (NOTE_INSN_BASIC_BLOCK_P (newpos1)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1767 newpos1 = NEXT_INSN (newpos1); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1768 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1769 while (DEBUG_INSN_P (newpos1)) |
0 | 1770 newpos1 = NEXT_INSN (newpos1); |
1771 | |
1772 redirect_from = split_block (src1, PREV_INSN (newpos1))->src; | |
1773 to_remove = single_succ (redirect_from); | |
1774 | |
1775 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to); | |
1776 delete_basic_block (to_remove); | |
1777 | |
1778 update_forwarder_flag (redirect_from); | |
1779 if (redirect_to != src2) | |
1780 update_forwarder_flag (src2); | |
1781 | |
1782 return true; | |
1783 } | |
1784 | |
1785 /* Search the predecessors of BB for common insn sequences. When found, | |
1786 share code between them by redirecting control flow. Return true if | |
1787 any changes made. */ | |
1788 | |
1789 static bool | |
1790 try_crossjump_bb (int mode, basic_block bb) | |
1791 { | |
1792 edge e, e2, fallthru; | |
1793 bool changed; | |
1794 unsigned max, ix, ix2; | |
1795 basic_block ev, ev2; | |
1796 edge_iterator ei; | |
1797 | |
1798 /* Nothing to do if there is not at least two incoming edges. */ | |
1799 if (EDGE_COUNT (bb->preds) < 2) | |
1800 return false; | |
1801 | |
1802 /* Don't crossjump if this block ends in a computed jump, | |
1803 unless we are optimizing for size. */ | |
1804 if (optimize_bb_for_size_p (bb) | |
1805 && bb != EXIT_BLOCK_PTR | |
1806 && computed_jump_p (BB_END (bb))) | |
1807 return false; | |
1808 | |
1809 /* If we are partitioning hot/cold basic blocks, we don't want to | |
1810 mess up unconditional or indirect jumps that cross between hot | |
1811 and cold sections. | |
1812 | |
1813 Basic block partitioning may result in some jumps that appear to | |
1814 be optimizable (or blocks that appear to be mergeable), but which really | |
1815 must be left untouched (they are required to make it safely across | |
1816 partition boundaries). See the comments at the top of | |
1817 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ | |
1818 | |
1819 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) != | |
1820 BB_PARTITION (EDGE_PRED (bb, 1)->src) | |
1821 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)) | |
1822 return false; | |
1823 | |
1824 /* It is always cheapest to redirect a block that ends in a branch to | |
1825 a block that falls through into BB, as that adds no branches to the | |
1826 program. We'll try that combination first. */ | |
1827 fallthru = NULL; | |
1828 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES); | |
1829 | |
1830 if (EDGE_COUNT (bb->preds) > max) | |
1831 return false; | |
1832 | |
1833 FOR_EACH_EDGE (e, ei, bb->preds) | |
1834 { | |
1835 if (e->flags & EDGE_FALLTHRU) | |
1836 { | |
1837 fallthru = e; | |
1838 break; | |
1839 } | |
1840 } | |
1841 | |
1842 changed = false; | |
1843 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); ) | |
1844 { | |
1845 e = EDGE_PRED (ev, ix); | |
1846 ix++; | |
1847 | |
1848 /* As noted above, first try with the fallthru predecessor (or, a | |
1849 fallthru predecessor if we are in cfglayout mode). */ | |
1850 if (fallthru) | |
1851 { | |
1852 /* Don't combine the fallthru edge into anything else. | |
1853 If there is a match, we'll do it the other way around. */ | |
1854 if (e == fallthru) | |
1855 continue; | |
1856 /* If nothing changed since the last attempt, there is nothing | |
1857 we can do. */ | |
1858 if (!first_pass | |
1859 && (!(df_get_bb_dirty (e->src)) | |
1860 && !(df_get_bb_dirty (fallthru->src)))) | |
1861 continue; | |
1862 | |
1863 if (try_crossjump_to_edge (mode, e, fallthru)) | |
1864 { | |
1865 changed = true; | |
1866 ix = 0; | |
1867 ev = bb; | |
1868 continue; | |
1869 } | |
1870 } | |
1871 | |
1872 /* Non-obvious work limiting check: Recognize that we're going | |
1873 to call try_crossjump_bb on every basic block. So if we have | |
1874 two blocks with lots of outgoing edges (a switch) and they | |
1875 share lots of common destinations, then we would do the | |
1876 cross-jump check once for each common destination. | |
1877 | |
1878 Now, if the blocks actually are cross-jump candidates, then | |
1879 all of their destinations will be shared. Which means that | |
1880 we only need check them for cross-jump candidacy once. We | |
1881 can eliminate redundant checks of crossjump(A,B) by arbitrarily | |
1882 choosing to do the check from the block for which the edge | |
1883 in question is the first successor of A. */ | |
1884 if (EDGE_SUCC (e->src, 0) != e) | |
1885 continue; | |
1886 | |
1887 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); ) | |
1888 { | |
1889 e2 = EDGE_PRED (ev2, ix2); | |
1890 ix2++; | |
1891 | |
1892 if (e2 == e) | |
1893 continue; | |
1894 | |
1895 /* We've already checked the fallthru edge above. */ | |
1896 if (e2 == fallthru) | |
1897 continue; | |
1898 | |
1899 /* The "first successor" check above only prevents multiple | |
1900 checks of crossjump(A,B). In order to prevent redundant | |
1901 checks of crossjump(B,A), require that A be the block | |
1902 with the lowest index. */ | |
1903 if (e->src->index > e2->src->index) | |
1904 continue; | |
1905 | |
1906 /* If nothing changed since the last attempt, there is nothing | |
1907 we can do. */ | |
1908 if (!first_pass | |
1909 && (!(df_get_bb_dirty (e->src)) | |
1910 && !(df_get_bb_dirty (e2->src)))) | |
1911 continue; | |
1912 | |
1913 if (try_crossjump_to_edge (mode, e, e2)) | |
1914 { | |
1915 changed = true; | |
1916 ev2 = bb; | |
1917 ix = 0; | |
1918 break; | |
1919 } | |
1920 } | |
1921 } | |
1922 | |
1923 if (changed) | |
1924 crossjumps_occured = true; | |
1925 | |
1926 return changed; | |
1927 } | |
1928 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1929 /* Return true if BB contains just bb note, or bb note followed |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1930 by only DEBUG_INSNs. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1931 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1932 static bool |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1933 trivially_empty_bb_p (basic_block bb) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1934 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1935 rtx insn = BB_END (bb); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1936 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1937 while (1) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1938 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1939 if (insn == BB_HEAD (bb)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1940 return true; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1941 if (!DEBUG_INSN_P (insn)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1942 return false; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1943 insn = PREV_INSN (insn); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1944 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1945 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1946 |
0 | 1947 /* Do simple CFG optimizations - basic block merging, simplifying of jump |
1948 instructions etc. Return nonzero if changes were made. */ | |
1949 | |
1950 static bool | |
1951 try_optimize_cfg (int mode) | |
1952 { | |
1953 bool changed_overall = false; | |
1954 bool changed; | |
1955 int iterations = 0; | |
1956 basic_block bb, b, next; | |
1957 | |
1958 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING)) | |
1959 clear_bb_flags (); | |
1960 | |
1961 crossjumps_occured = false; | |
1962 | |
1963 FOR_EACH_BB (bb) | |
1964 update_forwarder_flag (bb); | |
1965 | |
1966 if (! targetm.cannot_modify_jumps_p ()) | |
1967 { | |
1968 first_pass = true; | |
1969 /* Attempt to merge blocks as made possible by edge removal. If | |
1970 a block has only one successor, and the successor has only | |
1971 one predecessor, they may be combined. */ | |
1972 do | |
1973 { | |
1974 changed = false; | |
1975 iterations++; | |
1976 | |
1977 if (dump_file) | |
1978 fprintf (dump_file, | |
1979 "\n\ntry_optimize_cfg iteration %i\n\n", | |
1980 iterations); | |
1981 | |
1982 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;) | |
1983 { | |
1984 basic_block c; | |
1985 edge s; | |
1986 bool changed_here = false; | |
1987 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1988 /* Delete trivially dead basic blocks. This is either |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1989 blocks with no predecessors, or empty blocks with no |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1990 successors. However if the empty block with no |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1991 successors is the successor of the ENTRY_BLOCK, it is |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1992 kept. This ensures that the ENTRY_BLOCK will have a |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1993 successor which is a precondition for many RTL |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1994 passes. Empty blocks may result from expanding |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1995 __builtin_unreachable (). */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1996 if (EDGE_COUNT (b->preds) == 0 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1997 || (EDGE_COUNT (b->succs) == 0 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1998 && trivially_empty_bb_p (b) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1999 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b)) |
0 | 2000 { |
2001 c = b->prev_bb; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2002 if (EDGE_COUNT (b->preds) > 0) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2003 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2004 edge e; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2005 edge_iterator ei; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2006 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2007 if (current_ir_type () == IR_RTL_CFGLAYOUT) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2008 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2009 if (b->il.rtl->footer |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2010 && BARRIER_P (b->il.rtl->footer)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2011 FOR_EACH_EDGE (e, ei, b->preds) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2012 if ((e->flags & EDGE_FALLTHRU) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2013 && e->src->il.rtl->footer == NULL) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2014 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2015 if (b->il.rtl->footer) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2016 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2017 e->src->il.rtl->footer = b->il.rtl->footer; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2018 b->il.rtl->footer = NULL; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2019 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2020 else |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2021 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2022 start_sequence (); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2023 e->src->il.rtl->footer = emit_barrier (); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2024 end_sequence (); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2025 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2026 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2027 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2028 else |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2029 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2030 rtx last = get_last_bb_insn (b); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2031 if (last && BARRIER_P (last)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2032 FOR_EACH_EDGE (e, ei, b->preds) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2033 if ((e->flags & EDGE_FALLTHRU)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2034 emit_barrier_after (BB_END (e->src)); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2035 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2036 } |
0 | 2037 delete_basic_block (b); |
2038 if (!(mode & CLEANUP_CFGLAYOUT)) | |
2039 changed = true; | |
2040 /* Avoid trying to remove ENTRY_BLOCK_PTR. */ | |
2041 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c); | |
2042 continue; | |
2043 } | |
2044 | |
2045 /* Remove code labels no longer used. */ | |
2046 if (single_pred_p (b) | |
2047 && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
2048 && !(single_pred_edge (b)->flags & EDGE_COMPLEX) | |
2049 && LABEL_P (BB_HEAD (b)) | |
2050 /* If the previous block ends with a branch to this | |
2051 block, we can't delete the label. Normally this | |
2052 is a condjump that is yet to be simplified, but | |
2053 if CASE_DROPS_THRU, this can be a tablejump with | |
2054 some element going to the same place as the | |
2055 default (fallthru). */ | |
2056 && (single_pred (b) == ENTRY_BLOCK_PTR | |
2057 || !JUMP_P (BB_END (single_pred (b))) | |
2058 || ! label_is_jump_target_p (BB_HEAD (b), | |
2059 BB_END (single_pred (b))))) | |
2060 { | |
2061 rtx label = BB_HEAD (b); | |
2062 | |
2063 delete_insn_chain (label, label, false); | |
2064 /* If the case label is undeletable, move it after the | |
2065 BASIC_BLOCK note. */ | |
2066 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL) | |
2067 { | |
2068 rtx bb_note = NEXT_INSN (BB_HEAD (b)); | |
2069 | |
2070 reorder_insns_nobb (label, label, bb_note); | |
2071 BB_HEAD (b) = bb_note; | |
2072 if (BB_END (b) == bb_note) | |
2073 BB_END (b) = label; | |
2074 } | |
2075 if (dump_file) | |
2076 fprintf (dump_file, "Deleted label in block %i.\n", | |
2077 b->index); | |
2078 } | |
2079 | |
2080 /* If we fall through an empty block, we can remove it. */ | |
2081 if (!(mode & CLEANUP_CFGLAYOUT) | |
2082 && single_pred_p (b) | |
2083 && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
2084 && !LABEL_P (BB_HEAD (b)) | |
2085 && FORWARDER_BLOCK_P (b) | |
2086 /* Note that forwarder_block_p true ensures that | |
2087 there is a successor for this block. */ | |
2088 && (single_succ_edge (b)->flags & EDGE_FALLTHRU) | |
2089 && n_basic_blocks > NUM_FIXED_BLOCKS + 1) | |
2090 { | |
2091 if (dump_file) | |
2092 fprintf (dump_file, | |
2093 "Deleting fallthru block %i.\n", | |
2094 b->index); | |
2095 | |
2096 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb; | |
2097 redirect_edge_succ_nodup (single_pred_edge (b), | |
2098 single_succ (b)); | |
2099 delete_basic_block (b); | |
2100 changed = true; | |
2101 b = c; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2102 continue; |
0 | 2103 } |
2104 | |
2105 if (single_succ_p (b) | |
2106 && (s = single_succ_edge (b)) | |
2107 && !(s->flags & EDGE_COMPLEX) | |
2108 && (c = s->dest) != EXIT_BLOCK_PTR | |
2109 && single_pred_p (c) | |
2110 && b != c) | |
2111 { | |
2112 /* When not in cfg_layout mode use code aware of reordering | |
2113 INSN. This code possibly creates new basic blocks so it | |
2114 does not fit merge_blocks interface and is kept here in | |
2115 hope that it will become useless once more of compiler | |
2116 is transformed to use cfg_layout mode. */ | |
2117 | |
2118 if ((mode & CLEANUP_CFGLAYOUT) | |
2119 && can_merge_blocks_p (b, c)) | |
2120 { | |
2121 merge_blocks (b, c); | |
2122 update_forwarder_flag (b); | |
2123 changed_here = true; | |
2124 } | |
2125 else if (!(mode & CLEANUP_CFGLAYOUT) | |
2126 /* If the jump insn has side effects, | |
2127 we can't kill the edge. */ | |
2128 && (!JUMP_P (BB_END (b)) | |
2129 || (reload_completed | |
2130 ? simplejump_p (BB_END (b)) | |
2131 : (onlyjump_p (BB_END (b)) | |
2132 && !tablejump_p (BB_END (b), | |
2133 NULL, NULL)))) | |
2134 && (next = merge_blocks_move (s, b, c, mode))) | |
2135 { | |
2136 b = next; | |
2137 changed_here = true; | |
2138 } | |
2139 } | |
2140 | |
2141 /* Simplify branch over branch. */ | |
2142 if ((mode & CLEANUP_EXPENSIVE) | |
2143 && !(mode & CLEANUP_CFGLAYOUT) | |
2144 && try_simplify_condjump (b)) | |
2145 changed_here = true; | |
2146 | |
2147 /* If B has a single outgoing edge, but uses a | |
2148 non-trivial jump instruction without side-effects, we | |
2149 can either delete the jump entirely, or replace it | |
2150 with a simple unconditional jump. */ | |
2151 if (single_succ_p (b) | |
2152 && single_succ (b) != EXIT_BLOCK_PTR | |
2153 && onlyjump_p (BB_END (b)) | |
2154 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX) | |
2155 && try_redirect_by_replacing_jump (single_succ_edge (b), | |
2156 single_succ (b), | |
2157 (mode & CLEANUP_CFGLAYOUT) != 0)) | |
2158 { | |
2159 update_forwarder_flag (b); | |
2160 changed_here = true; | |
2161 } | |
2162 | |
2163 /* Simplify branch to branch. */ | |
2164 if (try_forward_edges (mode, b)) | |
2165 changed_here = true; | |
2166 | |
2167 /* Look for shared code between blocks. */ | |
2168 if ((mode & CLEANUP_CROSSJUMP) | |
2169 && try_crossjump_bb (mode, b)) | |
2170 changed_here = true; | |
2171 | |
2172 /* Don't get confused by the index shift caused by | |
2173 deleting blocks. */ | |
2174 if (!changed_here) | |
2175 b = b->next_bb; | |
2176 else | |
2177 changed = true; | |
2178 } | |
2179 | |
2180 if ((mode & CLEANUP_CROSSJUMP) | |
2181 && try_crossjump_bb (mode, EXIT_BLOCK_PTR)) | |
2182 changed = true; | |
2183 | |
2184 #ifdef ENABLE_CHECKING | |
2185 if (changed) | |
2186 verify_flow_info (); | |
2187 #endif | |
2188 | |
2189 changed_overall |= changed; | |
2190 first_pass = false; | |
2191 } | |
2192 while (changed); | |
2193 } | |
2194 | |
2195 FOR_ALL_BB (b) | |
2196 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK); | |
2197 | |
2198 return changed_overall; | |
2199 } | |
2200 | |
2201 /* Delete all unreachable basic blocks. */ | |
2202 | |
2203 bool | |
2204 delete_unreachable_blocks (void) | |
2205 { | |
2206 bool changed = false; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2207 basic_block b, prev_bb; |
0 | 2208 |
2209 find_unreachable_blocks (); | |
2210 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2211 /* When we're in GIMPLE mode and there may be debug insns, we should |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2212 delete blocks in reverse dominator order, so as to get a chance |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2213 to substitute all released DEFs into debug stmts. If we don't |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2214 have dominators information, walking blocks backward gets us a |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2215 better chance of retaining most debug information than |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2216 otherwise. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2217 if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2218 && dom_info_available_p (CDI_DOMINATORS)) |
0 | 2219 { |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2220 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2221 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2222 prev_bb = b->prev_bb; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2223 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2224 if (!(b->flags & BB_REACHABLE)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2225 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2226 /* Speed up the removal of blocks that don't dominate |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2227 others. Walking backwards, this should be the common |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2228 case. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2229 if (!first_dom_son (CDI_DOMINATORS, b)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2230 delete_basic_block (b); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2231 else |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2232 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2233 VEC (basic_block, heap) *h |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2234 = get_all_dominated_blocks (CDI_DOMINATORS, b); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2235 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2236 while (VEC_length (basic_block, h)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2237 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2238 b = VEC_pop (basic_block, h); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2239 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2240 prev_bb = b->prev_bb; |
0 | 2241 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2242 gcc_assert (!(b->flags & BB_REACHABLE)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2243 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2244 delete_basic_block (b); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2245 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2246 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2247 VEC_free (basic_block, heap, h); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2248 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2249 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2250 changed = true; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2251 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2252 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2253 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2254 else |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2255 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2256 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb) |
0 | 2257 { |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2258 prev_bb = b->prev_bb; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2259 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2260 if (!(b->flags & BB_REACHABLE)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2261 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2262 delete_basic_block (b); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2263 changed = true; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2264 } |
0 | 2265 } |
2266 } | |
2267 | |
2268 if (changed) | |
2269 tidy_fallthru_edges (); | |
2270 return changed; | |
2271 } | |
2272 | |
2273 /* Delete any jump tables never referenced. We can't delete them at the | |
2274 time of removing tablejump insn as they are referenced by the preceding | |
2275 insns computing the destination, so we delay deleting and garbagecollect | |
2276 them once life information is computed. */ | |
2277 void | |
2278 delete_dead_jumptables (void) | |
2279 { | |
2280 basic_block bb; | |
2281 | |
2282 /* A dead jump table does not belong to any basic block. Scan insns | |
2283 between two adjacent basic blocks. */ | |
2284 FOR_EACH_BB (bb) | |
2285 { | |
2286 rtx insn, next; | |
2287 | |
2288 for (insn = NEXT_INSN (BB_END (bb)); | |
2289 insn && !NOTE_INSN_BASIC_BLOCK_P (insn); | |
2290 insn = next) | |
2291 { | |
2292 next = NEXT_INSN (insn); | |
2293 if (LABEL_P (insn) | |
2294 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2295 && JUMP_TABLE_DATA_P (next)) |
0 | 2296 { |
2297 rtx label = insn, jump = next; | |
2298 | |
2299 if (dump_file) | |
2300 fprintf (dump_file, "Dead jumptable %i removed\n", | |
2301 INSN_UID (insn)); | |
2302 | |
2303 next = NEXT_INSN (next); | |
2304 delete_insn (jump); | |
2305 delete_insn (label); | |
2306 } | |
2307 } | |
2308 } | |
2309 } | |
2310 | |
2311 | |
2312 /* Tidy the CFG by deleting unreachable code and whatnot. */ | |
2313 | |
2314 bool | |
2315 cleanup_cfg (int mode) | |
2316 { | |
2317 bool changed = false; | |
2318 | |
2319 /* Set the cfglayout mode flag here. We could update all the callers | |
2320 but that is just inconvenient, especially given that we eventually | |
2321 want to have cfglayout mode as the default. */ | |
2322 if (current_ir_type () == IR_RTL_CFGLAYOUT) | |
2323 mode |= CLEANUP_CFGLAYOUT; | |
2324 | |
2325 timevar_push (TV_CLEANUP_CFG); | |
2326 if (delete_unreachable_blocks ()) | |
2327 { | |
2328 changed = true; | |
2329 /* We've possibly created trivially dead code. Cleanup it right | |
2330 now to introduce more opportunities for try_optimize_cfg. */ | |
2331 if (!(mode & (CLEANUP_NO_INSN_DEL)) | |
2332 && !reload_completed) | |
2333 delete_trivially_dead_insns (get_insns (), max_reg_num ()); | |
2334 } | |
2335 | |
2336 compact_blocks (); | |
2337 | |
2338 /* To tail-merge blocks ending in the same noreturn function (e.g. | |
2339 a call to abort) we have to insert fake edges to exit. Do this | |
2340 here once. The fake edges do not interfere with any other CFG | |
2341 cleanups. */ | |
2342 if (mode & CLEANUP_CROSSJUMP) | |
2343 add_noreturn_fake_exit_edges (); | |
2344 | |
2345 if (!dbg_cnt (cfg_cleanup)) | |
2346 return changed; | |
2347 | |
2348 while (try_optimize_cfg (mode)) | |
2349 { | |
2350 delete_unreachable_blocks (), changed = true; | |
2351 if (!(mode & CLEANUP_NO_INSN_DEL)) | |
2352 { | |
2353 /* Try to remove some trivially dead insns when doing an expensive | |
2354 cleanup. But delete_trivially_dead_insns doesn't work after | |
2355 reload (it only handles pseudos) and run_fast_dce is too costly | |
2356 to run in every iteration. | |
2357 | |
2358 For effective cross jumping, we really want to run a fast DCE to | |
2359 clean up any dead conditions, or they get in the way of performing | |
2360 useful tail merges. | |
2361 | |
2362 Other transformations in cleanup_cfg are not so sensitive to dead | |
2363 code, so delete_trivially_dead_insns or even doing nothing at all | |
2364 is good enough. */ | |
2365 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed | |
2366 && !delete_trivially_dead_insns (get_insns (), max_reg_num ())) | |
2367 break; | |
2368 else if ((mode & CLEANUP_CROSSJUMP) | |
2369 && crossjumps_occured) | |
2370 run_fast_dce (); | |
2371 } | |
2372 else | |
2373 break; | |
2374 } | |
2375 | |
2376 if (mode & CLEANUP_CROSSJUMP) | |
2377 remove_fake_exit_edges (); | |
2378 | |
2379 /* Don't call delete_dead_jumptables in cfglayout mode, because | |
2380 that function assumes that jump tables are in the insns stream. | |
2381 But we also don't _have_ to delete dead jumptables in cfglayout | |
2382 mode because we shouldn't even be looking at things that are | |
2383 not in a basic block. Dead jumptables are cleaned up when | |
2384 going out of cfglayout mode. */ | |
2385 if (!(mode & CLEANUP_CFGLAYOUT)) | |
2386 delete_dead_jumptables (); | |
2387 | |
2388 timevar_pop (TV_CLEANUP_CFG); | |
2389 | |
2390 return changed; | |
2391 } | |
2392 | |
2393 static unsigned int | |
2394 rest_of_handle_jump (void) | |
2395 { | |
2396 if (crtl->tail_call_emit) | |
2397 fixup_tail_calls (); | |
2398 return 0; | |
2399 } | |
2400 | |
2401 struct rtl_opt_pass pass_jump = | |
2402 { | |
2403 { | |
2404 RTL_PASS, | |
2405 "sibling", /* name */ | |
2406 NULL, /* gate */ | |
2407 rest_of_handle_jump, /* execute */ | |
2408 NULL, /* sub */ | |
2409 NULL, /* next */ | |
2410 0, /* static_pass_number */ | |
2411 TV_JUMP, /* tv_id */ | |
2412 0, /* properties_required */ | |
2413 0, /* properties_provided */ | |
2414 0, /* properties_destroyed */ | |
2415 TODO_ggc_collect, /* todo_flags_start */ | |
2416 TODO_verify_flow, /* todo_flags_finish */ | |
2417 } | |
2418 }; | |
2419 | |
2420 | |
2421 static unsigned int | |
2422 rest_of_handle_jump2 (void) | |
2423 { | |
2424 delete_trivially_dead_insns (get_insns (), max_reg_num ()); | |
2425 if (dump_file) | |
2426 dump_flow_info (dump_file, dump_flags); | |
2427 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | |
2428 | (flag_thread_jumps ? CLEANUP_THREADING : 0)); | |
2429 return 0; | |
2430 } | |
2431 | |
2432 | |
2433 struct rtl_opt_pass pass_jump2 = | |
2434 { | |
2435 { | |
2436 RTL_PASS, | |
2437 "jump", /* name */ | |
2438 NULL, /* gate */ | |
2439 rest_of_handle_jump2, /* execute */ | |
2440 NULL, /* sub */ | |
2441 NULL, /* next */ | |
2442 0, /* static_pass_number */ | |
2443 TV_JUMP, /* tv_id */ | |
2444 0, /* properties_required */ | |
2445 0, /* properties_provided */ | |
2446 0, /* properties_destroyed */ | |
2447 TODO_ggc_collect, /* todo_flags_start */ | |
2448 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */ | |
2449 } | |
2450 }; | |
2451 | |
2452 |