Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-cfgcleanup.c @ 59:5b5b9ea5b220
fix
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 15 Feb 2010 17:22:24 +0900 |
parents | 77e2b8dfacca |
children | b7f97abdc517 |
rev | line source |
---|---|
0 | 1 /* CFG cleanup for trees. |
47
3bfb6c00c1e0
update it from 4.4.2 to 4.4.3.
kent <kent@cr.ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
0 | 3 Free Software Foundation, Inc. |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify | |
8 it under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 3, or (at your option) | |
10 any later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, | |
13 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 GNU General Public License for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
24 #include "tm.h" | |
25 #include "tree.h" | |
26 #include "rtl.h" | |
27 #include "tm_p.h" | |
28 #include "hard-reg-set.h" | |
29 #include "basic-block.h" | |
30 #include "output.h" | |
31 #include "toplev.h" | |
32 #include "flags.h" | |
33 #include "function.h" | |
34 #include "expr.h" | |
35 #include "ggc.h" | |
36 #include "langhooks.h" | |
37 #include "diagnostic.h" | |
38 #include "tree-flow.h" | |
39 #include "timevar.h" | |
40 #include "tree-dump.h" | |
41 #include "tree-pass.h" | |
42 #include "toplev.h" | |
43 #include "except.h" | |
44 #include "cfgloop.h" | |
45 #include "cfglayout.h" | |
46 #include "hashtab.h" | |
47 #include "tree-ssa-propagate.h" | |
48 #include "tree-scalar-evolution.h" | |
49 | |
50 /* The set of blocks in that at least one of the following changes happened: | |
51 -- the statement at the end of the block was changed | |
52 -- the block was newly created | |
53 -- the set of the predecessors of the block changed | |
54 -- the set of the successors of the block changed | |
55 ??? Maybe we could track these changes separately, since they determine | |
56 what cleanups it makes sense to try on the block. */ | |
57 bitmap cfgcleanup_altered_bbs; | |
58 | |
59 /* Remove any fallthru edge from EV. Return true if an edge was removed. */ | |
60 | |
61 static bool | |
62 remove_fallthru_edge (VEC(edge,gc) *ev) | |
63 { | |
64 edge_iterator ei; | |
65 edge e; | |
66 | |
67 FOR_EACH_EDGE (e, ei, ev) | |
68 if ((e->flags & EDGE_FALLTHRU) != 0) | |
69 { | |
70 remove_edge_and_dominated_blocks (e); | |
71 return true; | |
72 } | |
73 return false; | |
74 } | |
75 | |
76 | |
77 /* Disconnect an unreachable block in the control expression starting | |
78 at block BB. */ | |
79 | |
80 static bool | |
81 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi) | |
82 { | |
83 edge taken_edge; | |
84 bool retval = false; | |
85 gimple stmt = gsi_stmt (gsi); | |
86 tree val; | |
87 | |
88 if (!single_succ_p (bb)) | |
89 { | |
90 edge e; | |
91 edge_iterator ei; | |
92 bool warned; | |
93 | |
94 fold_defer_overflow_warnings (); | |
95 val = gimple_fold (stmt); | |
96 taken_edge = find_taken_edge (bb, val); | |
97 if (!taken_edge) | |
98 { | |
99 fold_undefer_and_ignore_overflow_warnings (); | |
100 return false; | |
101 } | |
102 | |
103 /* Remove all the edges except the one that is always executed. */ | |
104 warned = false; | |
105 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) | |
106 { | |
107 if (e != taken_edge) | |
108 { | |
109 if (!warned) | |
110 { | |
111 fold_undefer_overflow_warnings | |
112 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); | |
113 warned = true; | |
114 } | |
115 | |
116 taken_edge->probability += e->probability; | |
117 taken_edge->count += e->count; | |
118 remove_edge_and_dominated_blocks (e); | |
119 retval = true; | |
120 } | |
121 else | |
122 ei_next (&ei); | |
123 } | |
124 if (!warned) | |
125 fold_undefer_and_ignore_overflow_warnings (); | |
126 if (taken_edge->probability > REG_BR_PROB_BASE) | |
127 taken_edge->probability = REG_BR_PROB_BASE; | |
128 } | |
129 else | |
130 taken_edge = single_succ_edge (bb); | |
131 | |
132 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); | |
133 gsi_remove (&gsi, true); | |
134 taken_edge->flags = EDGE_FALLTHRU; | |
135 | |
136 return retval; | |
137 } | |
138 | |
139 /* Try to remove superfluous control structures in basic block BB. Returns | |
140 true if anything changes. */ | |
141 | |
142 static bool | |
143 cleanup_control_flow_bb (basic_block bb) | |
144 { | |
145 gimple_stmt_iterator gsi; | |
146 bool retval = false; | |
147 gimple stmt; | |
148 | |
149 /* If the last statement of the block could throw and now cannot, | |
150 we need to prune cfg. */ | |
151 retval |= gimple_purge_dead_eh_edges (bb); | |
152 | |
153 gsi = gsi_last_bb (bb); | |
154 if (gsi_end_p (gsi)) | |
155 return retval; | |
156 | |
157 stmt = gsi_stmt (gsi); | |
158 | |
159 if (gimple_code (stmt) == GIMPLE_COND | |
160 || gimple_code (stmt) == GIMPLE_SWITCH) | |
161 retval |= cleanup_control_expr_graph (bb, gsi); | |
162 else if (gimple_code (stmt) == GIMPLE_GOTO | |
163 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR | |
164 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0)) | |
165 == LABEL_DECL)) | |
166 { | |
167 /* If we had a computed goto which has a compile-time determinable | |
168 destination, then we can eliminate the goto. */ | |
169 edge e; | |
170 tree label; | |
171 edge_iterator ei; | |
172 basic_block target_block; | |
173 | |
174 /* First look at all the outgoing edges. Delete any outgoing | |
175 edges which do not go to the right block. For the one | |
176 edge which goes to the right block, fix up its flags. */ | |
177 label = TREE_OPERAND (gimple_goto_dest (stmt), 0); | |
178 target_block = label_to_block (label); | |
179 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) | |
180 { | |
181 if (e->dest != target_block) | |
182 remove_edge_and_dominated_blocks (e); | |
183 else | |
184 { | |
185 /* Turn off the EDGE_ABNORMAL flag. */ | |
186 e->flags &= ~EDGE_ABNORMAL; | |
187 | |
188 /* And set EDGE_FALLTHRU. */ | |
189 e->flags |= EDGE_FALLTHRU; | |
190 ei_next (&ei); | |
191 } | |
192 } | |
193 | |
194 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); | |
195 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index); | |
196 | |
197 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the | |
198 relevant information we need. */ | |
199 gsi_remove (&gsi, true); | |
200 retval = true; | |
201 } | |
202 | |
203 /* Check for indirect calls that have been turned into | |
204 noreturn calls. */ | |
205 else if (is_gimple_call (stmt) | |
206 && gimple_call_noreturn_p (stmt) | |
207 && remove_fallthru_edge (bb->succs)) | |
208 retval = true; | |
209 | |
210 return retval; | |
211 } | |
212 | |
213 /* Return true if basic block BB does nothing except pass control | |
214 flow to another block and that we can safely insert a label at | |
215 the start of the successor block. | |
216 | |
217 As a precondition, we require that BB be not equal to | |
218 ENTRY_BLOCK_PTR. */ | |
219 | |
220 static bool | |
221 tree_forwarder_block_p (basic_block bb, bool phi_wanted) | |
222 { | |
223 gimple_stmt_iterator gsi; | |
224 | |
225 /* BB must have a single outgoing edge. */ | |
226 if (single_succ_p (bb) != 1 | |
227 /* If PHI_WANTED is false, BB must not have any PHI nodes. | |
228 Otherwise, BB must have PHI nodes. */ | |
229 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted | |
230 /* BB may not be a predecessor of EXIT_BLOCK_PTR. */ | |
231 || single_succ (bb) == EXIT_BLOCK_PTR | |
232 /* Nor should this be an infinite loop. */ | |
233 || single_succ (bb) == bb | |
234 /* BB may not have an abnormal outgoing edge. */ | |
235 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL)) | |
236 return false; | |
237 | |
238 #if ENABLE_CHECKING | |
239 gcc_assert (bb != ENTRY_BLOCK_PTR); | |
240 #endif | |
241 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
242 /* There should not be an edge coming from entry, or an EH edge. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
243 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
244 edge_iterator ei; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
245 edge e; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
246 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
247 FOR_EACH_EDGE (e, ei, bb->preds) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
248 if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
249 return false; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
250 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
251 |
0 | 252 /* Now walk through the statements backward. We can ignore labels, |
253 anything else means this is not a forwarder block. */ | |
254 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
255 { | |
256 gimple stmt = gsi_stmt (gsi); | |
257 | |
258 switch (gimple_code (stmt)) | |
259 { | |
260 case GIMPLE_LABEL: | |
261 if (DECL_NONLOCAL (gimple_label_label (stmt))) | |
262 return false; | |
263 break; | |
264 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
265 /* ??? For now, hope there's a corresponding debug |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
266 assignment at the destination. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
267 case GIMPLE_DEBUG: |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
268 break; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
269 |
0 | 270 default: |
271 return false; | |
272 } | |
273 } | |
274 | |
275 if (current_loops) | |
276 { | |
277 basic_block dest; | |
278 /* Protect loop latches, headers and preheaders. */ | |
279 if (bb->loop_father->header == bb) | |
280 return false; | |
281 dest = EDGE_SUCC (bb, 0)->dest; | |
282 | |
283 if (dest->loop_father->header == dest) | |
284 return false; | |
285 } | |
286 return true; | |
287 } | |
288 | |
289 /* Return true if BB has at least one abnormal incoming edge. */ | |
290 | |
291 static inline bool | |
292 has_abnormal_incoming_edge_p (basic_block bb) | |
293 { | |
294 edge e; | |
295 edge_iterator ei; | |
296 | |
297 FOR_EACH_EDGE (e, ei, bb->preds) | |
298 if (e->flags & EDGE_ABNORMAL) | |
299 return true; | |
300 | |
301 return false; | |
302 } | |
303 | |
304 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and | |
305 those alternatives are equal in each of the PHI nodes, then return | |
306 true, else return false. */ | |
307 | |
308 static bool | |
309 phi_alternatives_equal (basic_block dest, edge e1, edge e2) | |
310 { | |
311 int n1 = e1->dest_idx; | |
312 int n2 = e2->dest_idx; | |
313 gimple_stmt_iterator gsi; | |
314 | |
315 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
316 { | |
317 gimple phi = gsi_stmt (gsi); | |
318 tree val1 = gimple_phi_arg_def (phi, n1); | |
319 tree val2 = gimple_phi_arg_def (phi, n2); | |
320 | |
321 gcc_assert (val1 != NULL_TREE); | |
322 gcc_assert (val2 != NULL_TREE); | |
323 | |
324 if (!operand_equal_for_phi_arg_p (val1, val2)) | |
325 return false; | |
326 } | |
327 | |
328 return true; | |
329 } | |
330 | |
331 /* Removes forwarder block BB. Returns false if this failed. */ | |
332 | |
333 static bool | |
334 remove_forwarder_block (basic_block bb) | |
335 { | |
336 edge succ = single_succ_edge (bb), e, s; | |
337 basic_block dest = succ->dest; | |
338 gimple label; | |
339 edge_iterator ei; | |
340 gimple_stmt_iterator gsi, gsi_to; | |
341 bool seen_abnormal_edge = false; | |
342 | |
343 /* We check for infinite loops already in tree_forwarder_block_p. | |
344 However it may happen that the infinite loop is created | |
345 afterwards due to removal of forwarders. */ | |
346 if (dest == bb) | |
347 return false; | |
348 | |
349 /* If the destination block consists of a nonlocal label, do not merge | |
350 it. */ | |
351 label = first_stmt (dest); | |
352 if (label | |
353 && gimple_code (label) == GIMPLE_LABEL | |
354 && DECL_NONLOCAL (gimple_label_label (label))) | |
355 return false; | |
356 | |
357 /* If there is an abnormal edge to basic block BB, but not into | |
358 dest, problems might occur during removal of the phi node at out | |
359 of ssa due to overlapping live ranges of registers. | |
360 | |
361 If there is an abnormal edge in DEST, the problems would occur | |
362 anyway since cleanup_dead_labels would then merge the labels for | |
363 two different eh regions, and rest of exception handling code | |
364 does not like it. | |
365 | |
366 So if there is an abnormal edge to BB, proceed only if there is | |
367 no abnormal edge to DEST and there are no phi nodes in DEST. */ | |
368 if (has_abnormal_incoming_edge_p (bb)) | |
369 { | |
370 seen_abnormal_edge = true; | |
371 | |
372 if (has_abnormal_incoming_edge_p (dest) | |
373 || !gimple_seq_empty_p (phi_nodes (dest))) | |
374 return false; | |
375 } | |
376 | |
377 /* If there are phi nodes in DEST, and some of the blocks that are | |
378 predecessors of BB are also predecessors of DEST, check that the | |
379 phi node arguments match. */ | |
380 if (!gimple_seq_empty_p (phi_nodes (dest))) | |
381 { | |
382 FOR_EACH_EDGE (e, ei, bb->preds) | |
383 { | |
384 s = find_edge (e->src, dest); | |
385 if (!s) | |
386 continue; | |
387 | |
388 if (!phi_alternatives_equal (dest, succ, s)) | |
389 return false; | |
390 } | |
391 } | |
392 | |
393 /* Redirect the edges. */ | |
394 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) | |
395 { | |
396 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index); | |
397 | |
398 if (e->flags & EDGE_ABNORMAL) | |
399 { | |
400 /* If there is an abnormal edge, redirect it anyway, and | |
401 move the labels to the new block to make it legal. */ | |
402 s = redirect_edge_succ_nodup (e, dest); | |
403 } | |
404 else | |
405 s = redirect_edge_and_branch (e, dest); | |
406 | |
407 if (s == e) | |
408 { | |
409 /* Create arguments for the phi nodes, since the edge was not | |
410 here before. */ | |
411 for (gsi = gsi_start_phis (dest); | |
412 !gsi_end_p (gsi); | |
413 gsi_next (&gsi)) | |
414 { | |
415 gimple phi = gsi_stmt (gsi); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
416 source_location l = gimple_phi_arg_location_from_edge (phi, succ); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
417 add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s, l); |
0 | 418 } |
419 } | |
420 } | |
421 | |
422 if (seen_abnormal_edge) | |
423 { | |
424 /* Move the labels to the new block, so that the redirection of | |
425 the abnormal edges works. */ | |
426 gsi_to = gsi_start_bb (dest); | |
427 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) | |
428 { | |
429 label = gsi_stmt (gsi); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
430 gcc_assert (gimple_code (label) == GIMPLE_LABEL |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
431 || is_gimple_debug (label)); |
0 | 432 gsi_remove (&gsi, false); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
433 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT); |
0 | 434 } |
435 } | |
436 | |
437 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index); | |
438 | |
439 /* Update the dominators. */ | |
440 if (dom_info_available_p (CDI_DOMINATORS)) | |
441 { | |
442 basic_block dom, dombb, domdest; | |
443 | |
444 dombb = get_immediate_dominator (CDI_DOMINATORS, bb); | |
445 domdest = get_immediate_dominator (CDI_DOMINATORS, dest); | |
446 if (domdest == bb) | |
447 { | |
448 /* Shortcut to avoid calling (relatively expensive) | |
449 nearest_common_dominator unless necessary. */ | |
450 dom = dombb; | |
451 } | |
452 else | |
453 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); | |
454 | |
455 set_immediate_dominator (CDI_DOMINATORS, dest, dom); | |
456 } | |
457 | |
458 /* And kill the forwarder block. */ | |
459 delete_basic_block (bb); | |
460 | |
461 return true; | |
462 } | |
463 | |
464 /* Split basic blocks on calls in the middle of a basic block that are now | |
465 known not to return, and remove the unreachable code. */ | |
466 | |
467 static bool | |
468 split_bbs_on_noreturn_calls (void) | |
469 { | |
470 bool changed = false; | |
471 gimple stmt; | |
472 basic_block bb; | |
473 | |
474 /* Detect cases where a mid-block call is now known not to return. */ | |
475 if (cfun->gimple_df) | |
476 while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun))) | |
477 { | |
478 stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun)); | |
479 bb = gimple_bb (stmt); | |
480 /* BB might be deleted at this point, so verify first | |
481 BB is present in the cfg. */ | |
482 if (bb == NULL | |
483 || bb->index < NUM_FIXED_BLOCKS | |
484 || bb->index >= n_basic_blocks | |
485 || BASIC_BLOCK (bb->index) != bb | |
486 || last_stmt (bb) == stmt | |
487 || !gimple_call_noreturn_p (stmt)) | |
488 continue; | |
489 | |
490 changed = true; | |
491 split_block (bb, stmt); | |
492 remove_fallthru_edge (bb->succs); | |
493 } | |
494 | |
495 return changed; | |
496 } | |
497 | |
498 /* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it. */ | |
499 | |
500 static bool | |
501 cleanup_omp_return (basic_block bb) | |
502 { | |
503 gimple stmt = last_stmt (bb); | |
504 basic_block control_bb; | |
505 | |
506 if (stmt == NULL | |
507 || gimple_code (stmt) != GIMPLE_OMP_RETURN | |
508 || !single_pred_p (bb)) | |
509 return false; | |
510 | |
511 control_bb = single_pred (bb); | |
512 stmt = last_stmt (control_bb); | |
513 | |
47
3bfb6c00c1e0
update it from 4.4.2 to 4.4.3.
kent <kent@cr.ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
514 if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH) |
0 | 515 return false; |
516 | |
517 /* The block with the control statement normally has two entry edges -- one | |
518 from entry, one from continue. If continue is removed, return is | |
519 unreachable, so we remove it here as well. */ | |
520 if (EDGE_COUNT (control_bb->preds) == 2) | |
521 return false; | |
522 | |
523 gcc_assert (EDGE_COUNT (control_bb->preds) == 1); | |
524 remove_edge_and_dominated_blocks (single_pred_edge (bb)); | |
525 return true; | |
526 } | |
527 | |
528 /* Tries to cleanup cfg in basic block BB. Returns true if anything | |
529 changes. */ | |
530 | |
531 static bool | |
532 cleanup_tree_cfg_bb (basic_block bb) | |
533 { | |
534 bool retval = false; | |
535 | |
536 if (cleanup_omp_return (bb)) | |
537 return true; | |
538 | |
539 retval = cleanup_control_flow_bb (bb); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
540 |
0 | 541 /* Forwarder blocks can carry line number information which is |
542 useful when debugging, so we only clean them up when | |
543 optimizing. */ | |
544 if (optimize > 0 | |
545 && tree_forwarder_block_p (bb, false) | |
546 && remove_forwarder_block (bb)) | |
547 return true; | |
548 | |
549 /* Merging the blocks may create new opportunities for folding | |
550 conditional branches (due to the elimination of single-valued PHI | |
551 nodes). */ | |
552 if (single_succ_p (bb) | |
553 && can_merge_blocks_p (bb, single_succ (bb))) | |
554 { | |
555 merge_blocks (bb, single_succ (bb)); | |
556 return true; | |
557 } | |
558 | |
559 return retval; | |
560 } | |
561 | |
562 /* Iterate the cfg cleanups, while anything changes. */ | |
563 | |
564 static bool | |
565 cleanup_tree_cfg_1 (void) | |
566 { | |
567 bool retval = false; | |
568 basic_block bb; | |
569 unsigned i, n; | |
570 | |
571 retval |= split_bbs_on_noreturn_calls (); | |
572 | |
573 /* Prepare the worklists of altered blocks. */ | |
574 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL); | |
575 | |
576 /* During forwarder block cleanup, we may redirect edges out of | |
577 SWITCH_EXPRs, which can get expensive. So we want to enable | |
578 recording of edge to CASE_LABEL_EXPR. */ | |
579 start_recording_case_labels (); | |
580 | |
581 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB, | |
582 since the basic blocks may get removed. */ | |
583 n = last_basic_block; | |
584 for (i = NUM_FIXED_BLOCKS; i < n; i++) | |
585 { | |
586 bb = BASIC_BLOCK (i); | |
587 if (bb) | |
588 retval |= cleanup_tree_cfg_bb (bb); | |
589 } | |
590 | |
591 /* Now process the altered blocks, as long as any are available. */ | |
592 while (!bitmap_empty_p (cfgcleanup_altered_bbs)) | |
593 { | |
594 i = bitmap_first_set_bit (cfgcleanup_altered_bbs); | |
595 bitmap_clear_bit (cfgcleanup_altered_bbs, i); | |
596 if (i < NUM_FIXED_BLOCKS) | |
597 continue; | |
598 | |
599 bb = BASIC_BLOCK (i); | |
600 if (!bb) | |
601 continue; | |
602 | |
603 retval |= cleanup_tree_cfg_bb (bb); | |
604 | |
605 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn | |
606 calls. */ | |
607 retval |= split_bbs_on_noreturn_calls (); | |
608 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
609 |
0 | 610 end_recording_case_labels (); |
611 BITMAP_FREE (cfgcleanup_altered_bbs); | |
612 return retval; | |
613 } | |
614 | |
615 | |
616 /* Remove unreachable blocks and other miscellaneous clean up work. | |
617 Return true if the flowgraph was modified, false otherwise. */ | |
618 | |
619 static bool | |
620 cleanup_tree_cfg_noloop (void) | |
621 { | |
622 bool changed; | |
623 | |
624 timevar_push (TV_TREE_CLEANUP_CFG); | |
625 | |
626 /* Iterate until there are no more cleanups left to do. If any | |
627 iteration changed the flowgraph, set CHANGED to true. | |
628 | |
629 If dominance information is available, there cannot be any unreachable | |
630 blocks. */ | |
631 if (!dom_info_available_p (CDI_DOMINATORS)) | |
632 { | |
633 changed = delete_unreachable_blocks (); | |
634 calculate_dominance_info (CDI_DOMINATORS); | |
635 } | |
636 else | |
637 { | |
638 #ifdef ENABLE_CHECKING | |
639 verify_dominators (CDI_DOMINATORS); | |
640 #endif | |
641 changed = false; | |
642 } | |
643 | |
644 changed |= cleanup_tree_cfg_1 (); | |
645 | |
646 gcc_assert (dom_info_available_p (CDI_DOMINATORS)); | |
647 compact_blocks (); | |
648 | |
649 #ifdef ENABLE_CHECKING | |
650 verify_flow_info (); | |
651 #endif | |
652 | |
653 timevar_pop (TV_TREE_CLEANUP_CFG); | |
654 | |
655 if (changed && current_loops) | |
656 loops_state_set (LOOPS_NEED_FIXUP); | |
657 | |
658 return changed; | |
659 } | |
660 | |
661 /* Repairs loop structures. */ | |
662 | |
663 static void | |
664 repair_loop_structures (void) | |
665 { | |
666 bitmap changed_bbs = BITMAP_ALLOC (NULL); | |
667 fix_loop_structure (changed_bbs); | |
668 | |
669 /* This usually does nothing. But sometimes parts of cfg that originally | |
670 were inside a loop get out of it due to edge removal (since they | |
671 become unreachable by back edges from latch). */ | |
672 if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) | |
673 rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa); | |
674 | |
675 BITMAP_FREE (changed_bbs); | |
676 | |
677 #ifdef ENABLE_CHECKING | |
678 verify_loop_structure (); | |
679 #endif | |
680 scev_reset (); | |
681 | |
682 loops_state_clear (LOOPS_NEED_FIXUP); | |
683 } | |
684 | |
685 /* Cleanup cfg and repair loop structures. */ | |
686 | |
687 bool | |
688 cleanup_tree_cfg (void) | |
689 { | |
690 bool changed = cleanup_tree_cfg_noloop (); | |
691 | |
692 if (current_loops != NULL | |
693 && loops_state_satisfies_p (LOOPS_NEED_FIXUP)) | |
694 repair_loop_structures (); | |
695 | |
696 return changed; | |
697 } | |
698 | |
699 /* Merge the PHI nodes at BB into those at BB's sole successor. */ | |
700 | |
701 static void | |
702 remove_forwarder_block_with_phi (basic_block bb) | |
703 { | |
704 edge succ = single_succ_edge (bb); | |
705 basic_block dest = succ->dest; | |
706 gimple label; | |
707 basic_block dombb, domdest, dom; | |
708 | |
709 /* We check for infinite loops already in tree_forwarder_block_p. | |
710 However it may happen that the infinite loop is created | |
711 afterwards due to removal of forwarders. */ | |
712 if (dest == bb) | |
713 return; | |
714 | |
715 /* If the destination block consists of a nonlocal label, do not | |
716 merge it. */ | |
717 label = first_stmt (dest); | |
718 if (label | |
719 && gimple_code (label) == GIMPLE_LABEL | |
720 && DECL_NONLOCAL (gimple_label_label (label))) | |
721 return; | |
722 | |
723 /* Redirect each incoming edge to BB to DEST. */ | |
724 while (EDGE_COUNT (bb->preds) > 0) | |
725 { | |
726 edge e = EDGE_PRED (bb, 0), s; | |
727 gimple_stmt_iterator gsi; | |
728 | |
729 s = find_edge (e->src, dest); | |
730 if (s) | |
731 { | |
732 /* We already have an edge S from E->src to DEST. If S and | |
733 E->dest's sole successor edge have the same PHI arguments | |
734 at DEST, redirect S to DEST. */ | |
735 if (phi_alternatives_equal (dest, s, succ)) | |
736 { | |
737 e = redirect_edge_and_branch (e, dest); | |
738 redirect_edge_var_map_clear (e); | |
739 continue; | |
740 } | |
741 | |
742 /* PHI arguments are different. Create a forwarder block by | |
743 splitting E so that we can merge PHI arguments on E to | |
744 DEST. */ | |
745 e = single_succ_edge (split_edge (e)); | |
746 } | |
747 | |
748 s = redirect_edge_and_branch (e, dest); | |
749 | |
750 /* redirect_edge_and_branch must not create a new edge. */ | |
751 gcc_assert (s == e); | |
752 | |
753 /* Add to the PHI nodes at DEST each PHI argument removed at the | |
754 destination of E. */ | |
755 for (gsi = gsi_start_phis (dest); | |
756 !gsi_end_p (gsi); | |
757 gsi_next (&gsi)) | |
758 { | |
759 gimple phi = gsi_stmt (gsi); | |
760 tree def = gimple_phi_arg_def (phi, succ->dest_idx); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
761 source_location locus = gimple_phi_arg_location_from_edge (phi, succ); |
0 | 762 |
763 if (TREE_CODE (def) == SSA_NAME) | |
764 { | |
765 edge_var_map_vector head; | |
766 edge_var_map *vm; | |
767 size_t i; | |
768 | |
769 /* If DEF is one of the results of PHI nodes removed during | |
770 redirection, replace it with the PHI argument that used | |
771 to be on E. */ | |
772 head = redirect_edge_var_map_vector (e); | |
773 for (i = 0; VEC_iterate (edge_var_map, head, i, vm); ++i) | |
774 { | |
775 tree old_arg = redirect_edge_var_map_result (vm); | |
776 tree new_arg = redirect_edge_var_map_def (vm); | |
777 | |
778 if (def == old_arg) | |
779 { | |
780 def = new_arg; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
781 locus = redirect_edge_var_map_location (vm); |
0 | 782 break; |
783 } | |
784 } | |
785 } | |
786 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
787 add_phi_arg (phi, def, s, locus); |
0 | 788 } |
789 | |
790 redirect_edge_var_map_clear (e); | |
791 } | |
792 | |
793 /* Update the dominators. */ | |
794 dombb = get_immediate_dominator (CDI_DOMINATORS, bb); | |
795 domdest = get_immediate_dominator (CDI_DOMINATORS, dest); | |
796 if (domdest == bb) | |
797 { | |
798 /* Shortcut to avoid calling (relatively expensive) | |
799 nearest_common_dominator unless necessary. */ | |
800 dom = dombb; | |
801 } | |
802 else | |
803 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); | |
804 | |
805 set_immediate_dominator (CDI_DOMINATORS, dest, dom); | |
806 | |
807 /* Remove BB since all of BB's incoming edges have been redirected | |
808 to DEST. */ | |
809 delete_basic_block (bb); | |
810 } | |
811 | |
812 /* This pass merges PHI nodes if one feeds into another. For example, | |
813 suppose we have the following: | |
814 | |
815 goto <bb 9> (<L9>); | |
816 | |
817 <L8>:; | |
818 tem_17 = foo (); | |
819 | |
820 # tem_6 = PHI <tem_17(8), tem_23(7)>; | |
821 <L9>:; | |
822 | |
823 # tem_3 = PHI <tem_6(9), tem_2(5)>; | |
824 <L10>:; | |
825 | |
826 Then we merge the first PHI node into the second one like so: | |
827 | |
828 goto <bb 9> (<L10>); | |
829 | |
830 <L8>:; | |
831 tem_17 = foo (); | |
832 | |
833 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>; | |
834 <L10>:; | |
835 */ | |
836 | |
837 static unsigned int | |
838 merge_phi_nodes (void) | |
839 { | |
840 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks); | |
841 basic_block *current = worklist; | |
842 basic_block bb; | |
843 | |
844 calculate_dominance_info (CDI_DOMINATORS); | |
845 | |
846 /* Find all PHI nodes that we may be able to merge. */ | |
847 FOR_EACH_BB (bb) | |
848 { | |
849 basic_block dest; | |
850 | |
851 /* Look for a forwarder block with PHI nodes. */ | |
852 if (!tree_forwarder_block_p (bb, true)) | |
853 continue; | |
854 | |
855 dest = single_succ (bb); | |
856 | |
857 /* We have to feed into another basic block with PHI | |
858 nodes. */ | |
859 if (!phi_nodes (dest) | |
860 /* We don't want to deal with a basic block with | |
861 abnormal edges. */ | |
862 || has_abnormal_incoming_edge_p (bb)) | |
863 continue; | |
864 | |
865 if (!dominated_by_p (CDI_DOMINATORS, dest, bb)) | |
866 { | |
867 /* If BB does not dominate DEST, then the PHI nodes at | |
868 DEST must be the only users of the results of the PHI | |
869 nodes at BB. */ | |
870 *current++ = bb; | |
871 } | |
872 else | |
873 { | |
874 gimple_stmt_iterator gsi; | |
875 unsigned int dest_idx = single_succ_edge (bb)->dest_idx; | |
876 | |
877 /* BB dominates DEST. There may be many users of the PHI | |
878 nodes in BB. However, there is still a trivial case we | |
879 can handle. If the result of every PHI in BB is used | |
880 only by a PHI in DEST, then we can trivially merge the | |
881 PHI nodes from BB into DEST. */ | |
882 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); | |
883 gsi_next (&gsi)) | |
884 { | |
885 gimple phi = gsi_stmt (gsi); | |
886 tree result = gimple_phi_result (phi); | |
887 use_operand_p imm_use; | |
888 gimple use_stmt; | |
889 | |
890 /* If the PHI's result is never used, then we can just | |
891 ignore it. */ | |
892 if (has_zero_uses (result)) | |
893 continue; | |
894 | |
895 /* Get the single use of the result of this PHI node. */ | |
896 if (!single_imm_use (result, &imm_use, &use_stmt) | |
897 || gimple_code (use_stmt) != GIMPLE_PHI | |
898 || gimple_bb (use_stmt) != dest | |
899 || gimple_phi_arg_def (use_stmt, dest_idx) != result) | |
900 break; | |
901 } | |
902 | |
903 /* If the loop above iterated through all the PHI nodes | |
904 in BB, then we can merge the PHIs from BB into DEST. */ | |
905 if (gsi_end_p (gsi)) | |
906 *current++ = bb; | |
907 } | |
908 } | |
909 | |
910 /* Now let's drain WORKLIST. */ | |
911 while (current != worklist) | |
912 { | |
913 bb = *--current; | |
914 remove_forwarder_block_with_phi (bb); | |
915 } | |
916 | |
917 free (worklist); | |
918 return 0; | |
919 } | |
920 | |
921 static bool | |
922 gate_merge_phi (void) | |
923 { | |
924 return 1; | |
925 } | |
926 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
927 struct gimple_opt_pass pass_merge_phi = |
0 | 928 { |
929 { | |
930 GIMPLE_PASS, | |
931 "mergephi", /* name */ | |
932 gate_merge_phi, /* gate */ | |
933 merge_phi_nodes, /* execute */ | |
934 NULL, /* sub */ | |
935 NULL, /* next */ | |
936 0, /* static_pass_number */ | |
937 TV_TREE_MERGE_PHI, /* tv_id */ | |
938 PROP_cfg | PROP_ssa, /* properties_required */ | |
939 0, /* properties_provided */ | |
940 0, /* properties_destroyed */ | |
941 0, /* todo_flags_start */ | |
942 TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ | |
943 | TODO_verify_ssa | |
944 } | |
945 }; |