Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-cfgcleanup.c @ 47:3bfb6c00c1e0
update it from 4.4.2 to 4.4.3.
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 07 Feb 2010 17:44:34 +0900 |
parents | a06113de4d67 |
children | 77e2b8dfacca |
rev | line source |
---|---|
0 | 1 /* CFG cleanup for trees. |
47
3bfb6c00c1e0
update it from 4.4.2 to 4.4.3.
kent <kent@cr.ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
0 | 3 Free Software Foundation, Inc. |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify | |
8 it under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 3, or (at your option) | |
10 any later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, | |
13 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 GNU General Public License for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
24 #include "tm.h" | |
25 #include "tree.h" | |
26 #include "rtl.h" | |
27 #include "tm_p.h" | |
28 #include "hard-reg-set.h" | |
29 #include "basic-block.h" | |
30 #include "output.h" | |
31 #include "toplev.h" | |
32 #include "flags.h" | |
33 #include "function.h" | |
34 #include "expr.h" | |
35 #include "ggc.h" | |
36 #include "langhooks.h" | |
37 #include "diagnostic.h" | |
38 #include "tree-flow.h" | |
39 #include "timevar.h" | |
40 #include "tree-dump.h" | |
41 #include "tree-pass.h" | |
42 #include "toplev.h" | |
43 #include "except.h" | |
44 #include "cfgloop.h" | |
45 #include "cfglayout.h" | |
46 #include "hashtab.h" | |
47 #include "tree-ssa-propagate.h" | |
48 #include "tree-scalar-evolution.h" | |
49 | |
50 /* The set of blocks in that at least one of the following changes happened: | |
51 -- the statement at the end of the block was changed | |
52 -- the block was newly created | |
53 -- the set of the predecessors of the block changed | |
54 -- the set of the successors of the block changed | |
55 ??? Maybe we could track these changes separately, since they determine | |
56 what cleanups it makes sense to try on the block. */ | |
57 bitmap cfgcleanup_altered_bbs; | |
58 | |
59 /* Remove any fallthru edge from EV. Return true if an edge was removed. */ | |
60 | |
61 static bool | |
62 remove_fallthru_edge (VEC(edge,gc) *ev) | |
63 { | |
64 edge_iterator ei; | |
65 edge e; | |
66 | |
67 FOR_EACH_EDGE (e, ei, ev) | |
68 if ((e->flags & EDGE_FALLTHRU) != 0) | |
69 { | |
70 remove_edge_and_dominated_blocks (e); | |
71 return true; | |
72 } | |
73 return false; | |
74 } | |
75 | |
76 | |
77 /* Disconnect an unreachable block in the control expression starting | |
78 at block BB. */ | |
79 | |
80 static bool | |
81 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi) | |
82 { | |
83 edge taken_edge; | |
84 bool retval = false; | |
85 gimple stmt = gsi_stmt (gsi); | |
86 tree val; | |
87 | |
88 if (!single_succ_p (bb)) | |
89 { | |
90 edge e; | |
91 edge_iterator ei; | |
92 bool warned; | |
93 | |
94 fold_defer_overflow_warnings (); | |
95 val = gimple_fold (stmt); | |
96 taken_edge = find_taken_edge (bb, val); | |
97 if (!taken_edge) | |
98 { | |
99 fold_undefer_and_ignore_overflow_warnings (); | |
100 return false; | |
101 } | |
102 | |
103 /* Remove all the edges except the one that is always executed. */ | |
104 warned = false; | |
105 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) | |
106 { | |
107 if (e != taken_edge) | |
108 { | |
109 if (!warned) | |
110 { | |
111 fold_undefer_overflow_warnings | |
112 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); | |
113 warned = true; | |
114 } | |
115 | |
116 taken_edge->probability += e->probability; | |
117 taken_edge->count += e->count; | |
118 remove_edge_and_dominated_blocks (e); | |
119 retval = true; | |
120 } | |
121 else | |
122 ei_next (&ei); | |
123 } | |
124 if (!warned) | |
125 fold_undefer_and_ignore_overflow_warnings (); | |
126 if (taken_edge->probability > REG_BR_PROB_BASE) | |
127 taken_edge->probability = REG_BR_PROB_BASE; | |
128 } | |
129 else | |
130 taken_edge = single_succ_edge (bb); | |
131 | |
132 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); | |
133 gsi_remove (&gsi, true); | |
134 taken_edge->flags = EDGE_FALLTHRU; | |
135 | |
136 return retval; | |
137 } | |
138 | |
139 /* Try to remove superfluous control structures in basic block BB. Returns | |
140 true if anything changes. */ | |
141 | |
142 static bool | |
143 cleanup_control_flow_bb (basic_block bb) | |
144 { | |
145 gimple_stmt_iterator gsi; | |
146 bool retval = false; | |
147 gimple stmt; | |
148 | |
149 /* If the last statement of the block could throw and now cannot, | |
150 we need to prune cfg. */ | |
151 retval |= gimple_purge_dead_eh_edges (bb); | |
152 | |
153 gsi = gsi_last_bb (bb); | |
154 if (gsi_end_p (gsi)) | |
155 return retval; | |
156 | |
157 stmt = gsi_stmt (gsi); | |
158 | |
159 if (gimple_code (stmt) == GIMPLE_COND | |
160 || gimple_code (stmt) == GIMPLE_SWITCH) | |
161 retval |= cleanup_control_expr_graph (bb, gsi); | |
162 else if (gimple_code (stmt) == GIMPLE_GOTO | |
163 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR | |
164 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0)) | |
165 == LABEL_DECL)) | |
166 { | |
167 /* If we had a computed goto which has a compile-time determinable | |
168 destination, then we can eliminate the goto. */ | |
169 edge e; | |
170 tree label; | |
171 edge_iterator ei; | |
172 basic_block target_block; | |
173 | |
174 /* First look at all the outgoing edges. Delete any outgoing | |
175 edges which do not go to the right block. For the one | |
176 edge which goes to the right block, fix up its flags. */ | |
177 label = TREE_OPERAND (gimple_goto_dest (stmt), 0); | |
178 target_block = label_to_block (label); | |
179 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) | |
180 { | |
181 if (e->dest != target_block) | |
182 remove_edge_and_dominated_blocks (e); | |
183 else | |
184 { | |
185 /* Turn off the EDGE_ABNORMAL flag. */ | |
186 e->flags &= ~EDGE_ABNORMAL; | |
187 | |
188 /* And set EDGE_FALLTHRU. */ | |
189 e->flags |= EDGE_FALLTHRU; | |
190 ei_next (&ei); | |
191 } | |
192 } | |
193 | |
194 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); | |
195 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index); | |
196 | |
197 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the | |
198 relevant information we need. */ | |
199 gsi_remove (&gsi, true); | |
200 retval = true; | |
201 } | |
202 | |
203 /* Check for indirect calls that have been turned into | |
204 noreturn calls. */ | |
205 else if (is_gimple_call (stmt) | |
206 && gimple_call_noreturn_p (stmt) | |
207 && remove_fallthru_edge (bb->succs)) | |
208 retval = true; | |
209 | |
210 return retval; | |
211 } | |
212 | |
213 /* Return true if basic block BB does nothing except pass control | |
214 flow to another block and that we can safely insert a label at | |
215 the start of the successor block. | |
216 | |
217 As a precondition, we require that BB be not equal to | |
218 ENTRY_BLOCK_PTR. */ | |
219 | |
220 static bool | |
221 tree_forwarder_block_p (basic_block bb, bool phi_wanted) | |
222 { | |
223 gimple_stmt_iterator gsi; | |
224 edge_iterator ei; | |
225 edge e, succ; | |
226 basic_block dest; | |
227 | |
228 /* BB must have a single outgoing edge. */ | |
229 if (single_succ_p (bb) != 1 | |
230 /* If PHI_WANTED is false, BB must not have any PHI nodes. | |
231 Otherwise, BB must have PHI nodes. */ | |
232 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted | |
233 /* BB may not be a predecessor of EXIT_BLOCK_PTR. */ | |
234 || single_succ (bb) == EXIT_BLOCK_PTR | |
235 /* Nor should this be an infinite loop. */ | |
236 || single_succ (bb) == bb | |
237 /* BB may not have an abnormal outgoing edge. */ | |
238 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL)) | |
239 return false; | |
240 | |
241 #if ENABLE_CHECKING | |
242 gcc_assert (bb != ENTRY_BLOCK_PTR); | |
243 #endif | |
244 | |
245 /* Now walk through the statements backward. We can ignore labels, | |
246 anything else means this is not a forwarder block. */ | |
247 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
248 { | |
249 gimple stmt = gsi_stmt (gsi); | |
250 | |
251 switch (gimple_code (stmt)) | |
252 { | |
253 case GIMPLE_LABEL: | |
254 if (DECL_NONLOCAL (gimple_label_label (stmt))) | |
255 return false; | |
256 break; | |
257 | |
258 default: | |
259 return false; | |
260 } | |
261 } | |
262 | |
263 if (find_edge (ENTRY_BLOCK_PTR, bb)) | |
264 return false; | |
265 | |
266 if (current_loops) | |
267 { | |
268 basic_block dest; | |
269 /* Protect loop latches, headers and preheaders. */ | |
270 if (bb->loop_father->header == bb) | |
271 return false; | |
272 dest = EDGE_SUCC (bb, 0)->dest; | |
273 | |
274 if (dest->loop_father->header == dest) | |
275 return false; | |
276 } | |
277 | |
278 /* If we have an EH edge leaving this block, make sure that the | |
279 destination of this block has only one predecessor. This ensures | |
280 that we don't get into the situation where we try to remove two | |
281 forwarders that go to the same basic block but are handlers for | |
282 different EH regions. */ | |
283 succ = single_succ_edge (bb); | |
284 dest = succ->dest; | |
285 FOR_EACH_EDGE (e, ei, bb->preds) | |
286 { | |
287 if (e->flags & EDGE_EH) | |
288 { | |
289 if (!single_pred_p (dest)) | |
290 return false; | |
291 } | |
292 } | |
293 | |
294 return true; | |
295 } | |
296 | |
297 /* Return true if BB has at least one abnormal incoming edge. */ | |
298 | |
299 static inline bool | |
300 has_abnormal_incoming_edge_p (basic_block bb) | |
301 { | |
302 edge e; | |
303 edge_iterator ei; | |
304 | |
305 FOR_EACH_EDGE (e, ei, bb->preds) | |
306 if (e->flags & EDGE_ABNORMAL) | |
307 return true; | |
308 | |
309 return false; | |
310 } | |
311 | |
312 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and | |
313 those alternatives are equal in each of the PHI nodes, then return | |
314 true, else return false. */ | |
315 | |
316 static bool | |
317 phi_alternatives_equal (basic_block dest, edge e1, edge e2) | |
318 { | |
319 int n1 = e1->dest_idx; | |
320 int n2 = e2->dest_idx; | |
321 gimple_stmt_iterator gsi; | |
322 | |
323 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
324 { | |
325 gimple phi = gsi_stmt (gsi); | |
326 tree val1 = gimple_phi_arg_def (phi, n1); | |
327 tree val2 = gimple_phi_arg_def (phi, n2); | |
328 | |
329 gcc_assert (val1 != NULL_TREE); | |
330 gcc_assert (val2 != NULL_TREE); | |
331 | |
332 if (!operand_equal_for_phi_arg_p (val1, val2)) | |
333 return false; | |
334 } | |
335 | |
336 return true; | |
337 } | |
338 | |
339 /* Removes forwarder block BB. Returns false if this failed. */ | |
340 | |
341 static bool | |
342 remove_forwarder_block (basic_block bb) | |
343 { | |
344 edge succ = single_succ_edge (bb), e, s; | |
345 basic_block dest = succ->dest; | |
346 gimple label; | |
347 edge_iterator ei; | |
348 gimple_stmt_iterator gsi, gsi_to; | |
349 bool seen_abnormal_edge = false; | |
350 | |
351 /* We check for infinite loops already in tree_forwarder_block_p. | |
352 However it may happen that the infinite loop is created | |
353 afterwards due to removal of forwarders. */ | |
354 if (dest == bb) | |
355 return false; | |
356 | |
357 /* If the destination block consists of a nonlocal label, do not merge | |
358 it. */ | |
359 label = first_stmt (dest); | |
360 if (label | |
361 && gimple_code (label) == GIMPLE_LABEL | |
362 && DECL_NONLOCAL (gimple_label_label (label))) | |
363 return false; | |
364 | |
365 /* If there is an abnormal edge to basic block BB, but not into | |
366 dest, problems might occur during removal of the phi node at out | |
367 of ssa due to overlapping live ranges of registers. | |
368 | |
369 If there is an abnormal edge in DEST, the problems would occur | |
370 anyway since cleanup_dead_labels would then merge the labels for | |
371 two different eh regions, and rest of exception handling code | |
372 does not like it. | |
373 | |
374 So if there is an abnormal edge to BB, proceed only if there is | |
375 no abnormal edge to DEST and there are no phi nodes in DEST. */ | |
376 if (has_abnormal_incoming_edge_p (bb)) | |
377 { | |
378 seen_abnormal_edge = true; | |
379 | |
380 if (has_abnormal_incoming_edge_p (dest) | |
381 || !gimple_seq_empty_p (phi_nodes (dest))) | |
382 return false; | |
383 } | |
384 | |
385 /* If there are phi nodes in DEST, and some of the blocks that are | |
386 predecessors of BB are also predecessors of DEST, check that the | |
387 phi node arguments match. */ | |
388 if (!gimple_seq_empty_p (phi_nodes (dest))) | |
389 { | |
390 FOR_EACH_EDGE (e, ei, bb->preds) | |
391 { | |
392 s = find_edge (e->src, dest); | |
393 if (!s) | |
394 continue; | |
395 | |
396 if (!phi_alternatives_equal (dest, succ, s)) | |
397 return false; | |
398 } | |
399 } | |
400 | |
401 /* Redirect the edges. */ | |
402 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) | |
403 { | |
404 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index); | |
405 | |
406 if (e->flags & EDGE_ABNORMAL) | |
407 { | |
408 /* If there is an abnormal edge, redirect it anyway, and | |
409 move the labels to the new block to make it legal. */ | |
410 s = redirect_edge_succ_nodup (e, dest); | |
411 } | |
412 else | |
413 s = redirect_edge_and_branch (e, dest); | |
414 | |
415 if (s == e) | |
416 { | |
417 /* Create arguments for the phi nodes, since the edge was not | |
418 here before. */ | |
419 for (gsi = gsi_start_phis (dest); | |
420 !gsi_end_p (gsi); | |
421 gsi_next (&gsi)) | |
422 { | |
423 gimple phi = gsi_stmt (gsi); | |
424 add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s); | |
425 } | |
426 } | |
427 } | |
428 | |
429 if (seen_abnormal_edge) | |
430 { | |
431 /* Move the labels to the new block, so that the redirection of | |
432 the abnormal edges works. */ | |
433 gsi_to = gsi_start_bb (dest); | |
434 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
435 { | |
436 label = gsi_stmt (gsi); | |
437 gcc_assert (gimple_code (label) == GIMPLE_LABEL); | |
438 gsi_remove (&gsi, false); | |
439 gsi_insert_before (&gsi_to, label, GSI_CONTINUE_LINKING); | |
440 } | |
441 } | |
442 | |
443 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index); | |
444 | |
445 /* Update the dominators. */ | |
446 if (dom_info_available_p (CDI_DOMINATORS)) | |
447 { | |
448 basic_block dom, dombb, domdest; | |
449 | |
450 dombb = get_immediate_dominator (CDI_DOMINATORS, bb); | |
451 domdest = get_immediate_dominator (CDI_DOMINATORS, dest); | |
452 if (domdest == bb) | |
453 { | |
454 /* Shortcut to avoid calling (relatively expensive) | |
455 nearest_common_dominator unless necessary. */ | |
456 dom = dombb; | |
457 } | |
458 else | |
459 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); | |
460 | |
461 set_immediate_dominator (CDI_DOMINATORS, dest, dom); | |
462 } | |
463 | |
464 /* And kill the forwarder block. */ | |
465 delete_basic_block (bb); | |
466 | |
467 return true; | |
468 } | |
469 | |
470 /* Split basic blocks on calls in the middle of a basic block that are now | |
471 known not to return, and remove the unreachable code. */ | |
472 | |
473 static bool | |
474 split_bbs_on_noreturn_calls (void) | |
475 { | |
476 bool changed = false; | |
477 gimple stmt; | |
478 basic_block bb; | |
479 | |
480 /* Detect cases where a mid-block call is now known not to return. */ | |
481 if (cfun->gimple_df) | |
482 while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun))) | |
483 { | |
484 stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun)); | |
485 bb = gimple_bb (stmt); | |
486 /* BB might be deleted at this point, so verify first | |
487 BB is present in the cfg. */ | |
488 if (bb == NULL | |
489 || bb->index < NUM_FIXED_BLOCKS | |
490 || bb->index >= n_basic_blocks | |
491 || BASIC_BLOCK (bb->index) != bb | |
492 || last_stmt (bb) == stmt | |
493 || !gimple_call_noreturn_p (stmt)) | |
494 continue; | |
495 | |
496 changed = true; | |
497 split_block (bb, stmt); | |
498 remove_fallthru_edge (bb->succs); | |
499 } | |
500 | |
501 return changed; | |
502 } | |
503 | |
504 /* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it. */ | |
505 | |
506 static bool | |
507 cleanup_omp_return (basic_block bb) | |
508 { | |
509 gimple stmt = last_stmt (bb); | |
510 basic_block control_bb; | |
511 | |
512 if (stmt == NULL | |
513 || gimple_code (stmt) != GIMPLE_OMP_RETURN | |
514 || !single_pred_p (bb)) | |
515 return false; | |
516 | |
517 control_bb = single_pred (bb); | |
518 stmt = last_stmt (control_bb); | |
519 | |
47
3bfb6c00c1e0
update it from 4.4.2 to 4.4.3.
kent <kent@cr.ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
520 if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH) |
0 | 521 return false; |
522 | |
523 /* The block with the control statement normally has two entry edges -- one | |
524 from entry, one from continue. If continue is removed, return is | |
525 unreachable, so we remove it here as well. */ | |
526 if (EDGE_COUNT (control_bb->preds) == 2) | |
527 return false; | |
528 | |
529 gcc_assert (EDGE_COUNT (control_bb->preds) == 1); | |
530 remove_edge_and_dominated_blocks (single_pred_edge (bb)); | |
531 return true; | |
532 } | |
533 | |
534 /* Tries to cleanup cfg in basic block BB. Returns true if anything | |
535 changes. */ | |
536 | |
537 static bool | |
538 cleanup_tree_cfg_bb (basic_block bb) | |
539 { | |
540 bool retval = false; | |
541 | |
542 if (cleanup_omp_return (bb)) | |
543 return true; | |
544 | |
545 retval = cleanup_control_flow_bb (bb); | |
546 | |
547 /* Forwarder blocks can carry line number information which is | |
548 useful when debugging, so we only clean them up when | |
549 optimizing. */ | |
550 if (optimize > 0 | |
551 && tree_forwarder_block_p (bb, false) | |
552 && remove_forwarder_block (bb)) | |
553 return true; | |
554 | |
555 /* Merging the blocks may create new opportunities for folding | |
556 conditional branches (due to the elimination of single-valued PHI | |
557 nodes). */ | |
558 if (single_succ_p (bb) | |
559 && can_merge_blocks_p (bb, single_succ (bb))) | |
560 { | |
561 merge_blocks (bb, single_succ (bb)); | |
562 return true; | |
563 } | |
564 | |
565 return retval; | |
566 } | |
567 | |
568 /* Iterate the cfg cleanups, while anything changes. */ | |
569 | |
570 static bool | |
571 cleanup_tree_cfg_1 (void) | |
572 { | |
573 bool retval = false; | |
574 basic_block bb; | |
575 unsigned i, n; | |
576 | |
577 retval |= split_bbs_on_noreturn_calls (); | |
578 | |
579 /* Prepare the worklists of altered blocks. */ | |
580 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL); | |
581 | |
582 /* During forwarder block cleanup, we may redirect edges out of | |
583 SWITCH_EXPRs, which can get expensive. So we want to enable | |
584 recording of edge to CASE_LABEL_EXPR. */ | |
585 start_recording_case_labels (); | |
586 | |
587 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB, | |
588 since the basic blocks may get removed. */ | |
589 n = last_basic_block; | |
590 for (i = NUM_FIXED_BLOCKS; i < n; i++) | |
591 { | |
592 bb = BASIC_BLOCK (i); | |
593 if (bb) | |
594 retval |= cleanup_tree_cfg_bb (bb); | |
595 } | |
596 | |
597 /* Now process the altered blocks, as long as any are available. */ | |
598 while (!bitmap_empty_p (cfgcleanup_altered_bbs)) | |
599 { | |
600 i = bitmap_first_set_bit (cfgcleanup_altered_bbs); | |
601 bitmap_clear_bit (cfgcleanup_altered_bbs, i); | |
602 if (i < NUM_FIXED_BLOCKS) | |
603 continue; | |
604 | |
605 bb = BASIC_BLOCK (i); | |
606 if (!bb) | |
607 continue; | |
608 | |
609 retval |= cleanup_tree_cfg_bb (bb); | |
610 | |
611 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn | |
612 calls. */ | |
613 retval |= split_bbs_on_noreturn_calls (); | |
614 } | |
615 | |
616 end_recording_case_labels (); | |
617 BITMAP_FREE (cfgcleanup_altered_bbs); | |
618 return retval; | |
619 } | |
620 | |
621 | |
622 /* Remove unreachable blocks and other miscellaneous clean up work. | |
623 Return true if the flowgraph was modified, false otherwise. */ | |
624 | |
625 static bool | |
626 cleanup_tree_cfg_noloop (void) | |
627 { | |
628 bool changed; | |
629 | |
630 timevar_push (TV_TREE_CLEANUP_CFG); | |
631 | |
632 /* Iterate until there are no more cleanups left to do. If any | |
633 iteration changed the flowgraph, set CHANGED to true. | |
634 | |
635 If dominance information is available, there cannot be any unreachable | |
636 blocks. */ | |
637 if (!dom_info_available_p (CDI_DOMINATORS)) | |
638 { | |
639 changed = delete_unreachable_blocks (); | |
640 calculate_dominance_info (CDI_DOMINATORS); | |
641 } | |
642 else | |
643 { | |
644 #ifdef ENABLE_CHECKING | |
645 verify_dominators (CDI_DOMINATORS); | |
646 #endif | |
647 changed = false; | |
648 } | |
649 | |
650 changed |= cleanup_tree_cfg_1 (); | |
651 | |
652 gcc_assert (dom_info_available_p (CDI_DOMINATORS)); | |
653 compact_blocks (); | |
654 | |
655 #ifdef ENABLE_CHECKING | |
656 verify_flow_info (); | |
657 #endif | |
658 | |
659 timevar_pop (TV_TREE_CLEANUP_CFG); | |
660 | |
661 if (changed && current_loops) | |
662 loops_state_set (LOOPS_NEED_FIXUP); | |
663 | |
664 return changed; | |
665 } | |
666 | |
667 /* Repairs loop structures. */ | |
668 | |
669 static void | |
670 repair_loop_structures (void) | |
671 { | |
672 bitmap changed_bbs = BITMAP_ALLOC (NULL); | |
673 fix_loop_structure (changed_bbs); | |
674 | |
675 /* This usually does nothing. But sometimes parts of cfg that originally | |
676 were inside a loop get out of it due to edge removal (since they | |
677 become unreachable by back edges from latch). */ | |
678 if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) | |
679 rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa); | |
680 | |
681 BITMAP_FREE (changed_bbs); | |
682 | |
683 #ifdef ENABLE_CHECKING | |
684 verify_loop_structure (); | |
685 #endif | |
686 scev_reset (); | |
687 | |
688 loops_state_clear (LOOPS_NEED_FIXUP); | |
689 } | |
690 | |
691 /* Cleanup cfg and repair loop structures. */ | |
692 | |
693 bool | |
694 cleanup_tree_cfg (void) | |
695 { | |
696 bool changed = cleanup_tree_cfg_noloop (); | |
697 | |
698 if (current_loops != NULL | |
699 && loops_state_satisfies_p (LOOPS_NEED_FIXUP)) | |
700 repair_loop_structures (); | |
701 | |
702 return changed; | |
703 } | |
704 | |
705 /* Merge the PHI nodes at BB into those at BB's sole successor. */ | |
706 | |
707 static void | |
708 remove_forwarder_block_with_phi (basic_block bb) | |
709 { | |
710 edge succ = single_succ_edge (bb); | |
711 basic_block dest = succ->dest; | |
712 gimple label; | |
713 basic_block dombb, domdest, dom; | |
714 | |
715 /* We check for infinite loops already in tree_forwarder_block_p. | |
716 However it may happen that the infinite loop is created | |
717 afterwards due to removal of forwarders. */ | |
718 if (dest == bb) | |
719 return; | |
720 | |
721 /* If the destination block consists of a nonlocal label, do not | |
722 merge it. */ | |
723 label = first_stmt (dest); | |
724 if (label | |
725 && gimple_code (label) == GIMPLE_LABEL | |
726 && DECL_NONLOCAL (gimple_label_label (label))) | |
727 return; | |
728 | |
729 /* Redirect each incoming edge to BB to DEST. */ | |
730 while (EDGE_COUNT (bb->preds) > 0) | |
731 { | |
732 edge e = EDGE_PRED (bb, 0), s; | |
733 gimple_stmt_iterator gsi; | |
734 | |
735 s = find_edge (e->src, dest); | |
736 if (s) | |
737 { | |
738 /* We already have an edge S from E->src to DEST. If S and | |
739 E->dest's sole successor edge have the same PHI arguments | |
740 at DEST, redirect S to DEST. */ | |
741 if (phi_alternatives_equal (dest, s, succ)) | |
742 { | |
743 e = redirect_edge_and_branch (e, dest); | |
744 redirect_edge_var_map_clear (e); | |
745 continue; | |
746 } | |
747 | |
748 /* PHI arguments are different. Create a forwarder block by | |
749 splitting E so that we can merge PHI arguments on E to | |
750 DEST. */ | |
751 e = single_succ_edge (split_edge (e)); | |
752 } | |
753 | |
754 s = redirect_edge_and_branch (e, dest); | |
755 | |
756 /* redirect_edge_and_branch must not create a new edge. */ | |
757 gcc_assert (s == e); | |
758 | |
759 /* Add to the PHI nodes at DEST each PHI argument removed at the | |
760 destination of E. */ | |
761 for (gsi = gsi_start_phis (dest); | |
762 !gsi_end_p (gsi); | |
763 gsi_next (&gsi)) | |
764 { | |
765 gimple phi = gsi_stmt (gsi); | |
766 tree def = gimple_phi_arg_def (phi, succ->dest_idx); | |
767 | |
768 if (TREE_CODE (def) == SSA_NAME) | |
769 { | |
770 edge_var_map_vector head; | |
771 edge_var_map *vm; | |
772 size_t i; | |
773 | |
774 /* If DEF is one of the results of PHI nodes removed during | |
775 redirection, replace it with the PHI argument that used | |
776 to be on E. */ | |
777 head = redirect_edge_var_map_vector (e); | |
778 for (i = 0; VEC_iterate (edge_var_map, head, i, vm); ++i) | |
779 { | |
780 tree old_arg = redirect_edge_var_map_result (vm); | |
781 tree new_arg = redirect_edge_var_map_def (vm); | |
782 | |
783 if (def == old_arg) | |
784 { | |
785 def = new_arg; | |
786 break; | |
787 } | |
788 } | |
789 } | |
790 | |
791 add_phi_arg (phi, def, s); | |
792 } | |
793 | |
794 redirect_edge_var_map_clear (e); | |
795 } | |
796 | |
797 /* Update the dominators. */ | |
798 dombb = get_immediate_dominator (CDI_DOMINATORS, bb); | |
799 domdest = get_immediate_dominator (CDI_DOMINATORS, dest); | |
800 if (domdest == bb) | |
801 { | |
802 /* Shortcut to avoid calling (relatively expensive) | |
803 nearest_common_dominator unless necessary. */ | |
804 dom = dombb; | |
805 } | |
806 else | |
807 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); | |
808 | |
809 set_immediate_dominator (CDI_DOMINATORS, dest, dom); | |
810 | |
811 /* Remove BB since all of BB's incoming edges have been redirected | |
812 to DEST. */ | |
813 delete_basic_block (bb); | |
814 } | |
815 | |
816 /* This pass merges PHI nodes if one feeds into another. For example, | |
817 suppose we have the following: | |
818 | |
819 goto <bb 9> (<L9>); | |
820 | |
821 <L8>:; | |
822 tem_17 = foo (); | |
823 | |
824 # tem_6 = PHI <tem_17(8), tem_23(7)>; | |
825 <L9>:; | |
826 | |
827 # tem_3 = PHI <tem_6(9), tem_2(5)>; | |
828 <L10>:; | |
829 | |
830 Then we merge the first PHI node into the second one like so: | |
831 | |
832 goto <bb 9> (<L10>); | |
833 | |
834 <L8>:; | |
835 tem_17 = foo (); | |
836 | |
837 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>; | |
838 <L10>:; | |
839 */ | |
840 | |
841 static unsigned int | |
842 merge_phi_nodes (void) | |
843 { | |
844 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks); | |
845 basic_block *current = worklist; | |
846 basic_block bb; | |
847 | |
848 calculate_dominance_info (CDI_DOMINATORS); | |
849 | |
850 /* Find all PHI nodes that we may be able to merge. */ | |
851 FOR_EACH_BB (bb) | |
852 { | |
853 basic_block dest; | |
854 | |
855 /* Look for a forwarder block with PHI nodes. */ | |
856 if (!tree_forwarder_block_p (bb, true)) | |
857 continue; | |
858 | |
859 dest = single_succ (bb); | |
860 | |
861 /* We have to feed into another basic block with PHI | |
862 nodes. */ | |
863 if (!phi_nodes (dest) | |
864 /* We don't want to deal with a basic block with | |
865 abnormal edges. */ | |
866 || has_abnormal_incoming_edge_p (bb)) | |
867 continue; | |
868 | |
869 if (!dominated_by_p (CDI_DOMINATORS, dest, bb)) | |
870 { | |
871 /* If BB does not dominate DEST, then the PHI nodes at | |
872 DEST must be the only users of the results of the PHI | |
873 nodes at BB. */ | |
874 *current++ = bb; | |
875 } | |
876 else | |
877 { | |
878 gimple_stmt_iterator gsi; | |
879 unsigned int dest_idx = single_succ_edge (bb)->dest_idx; | |
880 | |
881 /* BB dominates DEST. There may be many users of the PHI | |
882 nodes in BB. However, there is still a trivial case we | |
883 can handle. If the result of every PHI in BB is used | |
884 only by a PHI in DEST, then we can trivially merge the | |
885 PHI nodes from BB into DEST. */ | |
886 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); | |
887 gsi_next (&gsi)) | |
888 { | |
889 gimple phi = gsi_stmt (gsi); | |
890 tree result = gimple_phi_result (phi); | |
891 use_operand_p imm_use; | |
892 gimple use_stmt; | |
893 | |
894 /* If the PHI's result is never used, then we can just | |
895 ignore it. */ | |
896 if (has_zero_uses (result)) | |
897 continue; | |
898 | |
899 /* Get the single use of the result of this PHI node. */ | |
900 if (!single_imm_use (result, &imm_use, &use_stmt) | |
901 || gimple_code (use_stmt) != GIMPLE_PHI | |
902 || gimple_bb (use_stmt) != dest | |
903 || gimple_phi_arg_def (use_stmt, dest_idx) != result) | |
904 break; | |
905 } | |
906 | |
907 /* If the loop above iterated through all the PHI nodes | |
908 in BB, then we can merge the PHIs from BB into DEST. */ | |
909 if (gsi_end_p (gsi)) | |
910 *current++ = bb; | |
911 } | |
912 } | |
913 | |
914 /* Now let's drain WORKLIST. */ | |
915 while (current != worklist) | |
916 { | |
917 bb = *--current; | |
918 remove_forwarder_block_with_phi (bb); | |
919 } | |
920 | |
921 free (worklist); | |
922 return 0; | |
923 } | |
924 | |
925 static bool | |
926 gate_merge_phi (void) | |
927 { | |
928 return 1; | |
929 } | |
930 | |
931 struct gimple_opt_pass pass_merge_phi = | |
932 { | |
933 { | |
934 GIMPLE_PASS, | |
935 "mergephi", /* name */ | |
936 gate_merge_phi, /* gate */ | |
937 merge_phi_nodes, /* execute */ | |
938 NULL, /* sub */ | |
939 NULL, /* next */ | |
940 0, /* static_pass_number */ | |
941 TV_TREE_MERGE_PHI, /* tv_id */ | |
942 PROP_cfg | PROP_ssa, /* properties_required */ | |
943 0, /* properties_provided */ | |
944 0, /* properties_destroyed */ | |
945 0, /* todo_flags_start */ | |
946 TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ | |
947 | TODO_verify_ssa | |
948 } | |
949 }; |