Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-cfgcleanup.c @ 143:76e1cf5455ef
add cbc_gc test
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 23 Dec 2018 19:24:05 +0900 |
parents | 84e7813d76e9 |
children | 1830386684a0 |
rev | line source |
---|---|
0 | 1 /* CFG cleanup for trees. |
131 | 2 Copyright (C) 2001-2018 Free Software Foundation, Inc. |
0 | 3 |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify | |
7 it under the terms of the GNU General Public License as published by | |
8 the Free Software Foundation; either version 3, or (at your option) | |
9 any later version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, | |
12 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 GNU General Public License for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 #include "config.h" | |
21 #include "system.h" | |
22 #include "coretypes.h" | |
111 | 23 #include "backend.h" |
24 #include "rtl.h" | |
0 | 25 #include "tree.h" |
111 | 26 #include "gimple.h" |
27 #include "cfghooks.h" | |
28 #include "tree-pass.h" | |
29 #include "ssa.h" | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
30 #include "diagnostic-core.h" |
111 | 31 #include "fold-const.h" |
32 #include "cfganal.h" | |
33 #include "cfgcleanup.h" | |
34 #include "tree-eh.h" | |
35 #include "gimplify.h" | |
36 #include "gimple-iterator.h" | |
37 #include "tree-cfg.h" | |
38 #include "tree-ssa-loop-manip.h" | |
39 #include "tree-dfa.h" | |
40 #include "tree-ssa.h" | |
0 | 41 #include "cfgloop.h" |
42 #include "tree-scalar-evolution.h" | |
111 | 43 #include "gimple-match.h" |
44 #include "gimple-fold.h" | |
45 #include "tree-ssa-loop-niter.h" | |
46 | |
0 | 47 |
48 /* The set of blocks in that at least one of the following changes happened: | |
49 -- the statement at the end of the block was changed | |
50 -- the block was newly created | |
51 -- the set of the predecessors of the block changed | |
52 -- the set of the successors of the block changed | |
53 ??? Maybe we could track these changes separately, since they determine | |
54 what cleanups it makes sense to try on the block. */ | |
55 bitmap cfgcleanup_altered_bbs; | |
56 | |
57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */ | |
58 | |
59 static bool | |
111 | 60 remove_fallthru_edge (vec<edge, va_gc> *ev) |
0 | 61 { |
62 edge_iterator ei; | |
63 edge e; | |
64 | |
65 FOR_EACH_EDGE (e, ei, ev) | |
66 if ((e->flags & EDGE_FALLTHRU) != 0) | |
67 { | |
111 | 68 if (e->flags & EDGE_COMPLEX) |
69 e->flags &= ~EDGE_FALLTHRU; | |
70 else | |
71 remove_edge_and_dominated_blocks (e); | |
0 | 72 return true; |
73 } | |
74 return false; | |
75 } | |
76 | |
111 | 77 /* Convert a SWTCH with single non-default case to gcond and replace it |
78 at GSI. */ | |
79 | |
80 static bool | |
81 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi) | |
82 { | |
83 if (gimple_switch_num_labels (swtch) != 2) | |
84 return false; | |
85 | |
86 tree index = gimple_switch_index (swtch); | |
87 tree label = gimple_switch_label (swtch, 1); | |
88 tree low = CASE_LOW (label); | |
89 tree high = CASE_HIGH (label); | |
90 | |
131 | 91 basic_block default_bb = gimple_switch_default_bb (cfun, swtch); |
92 basic_block case_bb = label_to_block (cfun, CASE_LABEL (label)); | |
111 | 93 |
94 basic_block bb = gimple_bb (swtch); | |
95 gcond *cond; | |
96 | |
97 /* Replace switch statement with condition statement. */ | |
98 if (high) | |
99 { | |
100 tree lhs, rhs; | |
101 generate_range_test (bb, index, low, high, &lhs, &rhs); | |
102 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE); | |
103 } | |
104 else | |
105 cond = gimple_build_cond (EQ_EXPR, index, | |
106 fold_convert (TREE_TYPE (index), low), | |
107 NULL_TREE, NULL_TREE); | |
108 | |
109 gsi_replace (&gsi, cond, true); | |
110 | |
111 /* Update edges. */ | |
112 edge case_edge = find_edge (bb, case_bb); | |
113 edge default_edge = find_edge (bb, default_bb); | |
114 | |
115 case_edge->flags |= EDGE_TRUE_VALUE; | |
116 default_edge->flags |= EDGE_FALSE_VALUE; | |
117 return true; | |
118 } | |
0 | 119 |
120 /* Disconnect an unreachable block in the control expression starting | |
121 at block BB. */ | |
122 | |
123 static bool | |
131 | 124 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi) |
0 | 125 { |
126 edge taken_edge; | |
127 bool retval = false; | |
111 | 128 gimple *stmt = gsi_stmt (gsi); |
0 | 129 |
130 if (!single_succ_p (bb)) | |
131 { | |
132 edge e; | |
133 edge_iterator ei; | |
134 bool warned; | |
111 | 135 tree val = NULL_TREE; |
136 | |
137 /* Try to convert a switch with just a single non-default case to | |
138 GIMPLE condition. */ | |
139 if (gimple_code (stmt) == GIMPLE_SWITCH | |
140 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi)) | |
141 stmt = gsi_stmt (gsi); | |
0 | 142 |
143 fold_defer_overflow_warnings (); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
144 switch (gimple_code (stmt)) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
145 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
146 case GIMPLE_COND: |
131 | 147 { |
148 gimple_match_op res_op; | |
149 if (gimple_simplify (stmt, &res_op, NULL, no_follow_ssa_edges, | |
150 no_follow_ssa_edges) | |
151 && res_op.code == INTEGER_CST) | |
152 val = res_op.ops[0]; | |
153 } | |
111 | 154 break; |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
155 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
156 case GIMPLE_SWITCH: |
111 | 157 val = gimple_switch_index (as_a <gswitch *> (stmt)); |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
158 break; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
159 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
160 default: |
111 | 161 ; |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
162 } |
0 | 163 taken_edge = find_taken_edge (bb, val); |
164 if (!taken_edge) | |
165 { | |
166 fold_undefer_and_ignore_overflow_warnings (); | |
167 return false; | |
168 } | |
169 | |
170 /* Remove all the edges except the one that is always executed. */ | |
171 warned = false; | |
172 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) | |
173 { | |
174 if (e != taken_edge) | |
175 { | |
176 if (!warned) | |
177 { | |
178 fold_undefer_overflow_warnings | |
179 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); | |
180 warned = true; | |
181 } | |
182 | |
183 taken_edge->probability += e->probability; | |
184 remove_edge_and_dominated_blocks (e); | |
185 retval = true; | |
186 } | |
187 else | |
188 ei_next (&ei); | |
189 } | |
190 if (!warned) | |
191 fold_undefer_and_ignore_overflow_warnings (); | |
192 } | |
193 else | |
194 taken_edge = single_succ_edge (bb); | |
195 | |
196 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); | |
197 gsi_remove (&gsi, true); | |
198 taken_edge->flags = EDGE_FALLTHRU; | |
199 | |
200 return retval; | |
201 } | |
202 | |
111 | 203 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to |
204 to updated gimple_call_flags. */ | |
205 | |
206 static void | |
207 cleanup_call_ctrl_altering_flag (gimple *bb_end) | |
208 { | |
209 if (!is_gimple_call (bb_end) | |
210 || !gimple_call_ctrl_altering_p (bb_end)) | |
211 return; | |
212 | |
213 int flags = gimple_call_flags (bb_end); | |
214 if (((flags & (ECF_CONST | ECF_PURE)) | |
215 && !(flags & ECF_LOOPING_CONST_OR_PURE)) | |
216 || (flags & ECF_LEAF)) | |
217 gimple_call_set_ctrl_altering (bb_end, false); | |
218 } | |
219 | |
0 | 220 /* Try to remove superfluous control structures in basic block BB. Returns |
221 true if anything changes. */ | |
222 | |
223 static bool | |
131 | 224 cleanup_control_flow_bb (basic_block bb) |
0 | 225 { |
226 gimple_stmt_iterator gsi; | |
227 bool retval = false; | |
111 | 228 gimple *stmt; |
0 | 229 |
230 /* If the last statement of the block could throw and now cannot, | |
231 we need to prune cfg. */ | |
232 retval |= gimple_purge_dead_eh_edges (bb); | |
233 | |
111 | 234 gsi = gsi_last_nondebug_bb (bb); |
0 | 235 if (gsi_end_p (gsi)) |
236 return retval; | |
237 | |
238 stmt = gsi_stmt (gsi); | |
239 | |
111 | 240 /* Try to cleanup ctrl altering flag for call which ends bb. */ |
241 cleanup_call_ctrl_altering_flag (stmt); | |
242 | |
0 | 243 if (gimple_code (stmt) == GIMPLE_COND |
244 || gimple_code (stmt) == GIMPLE_SWITCH) | |
111 | 245 { |
246 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt); | |
131 | 247 retval |= cleanup_control_expr_graph (bb, gsi); |
111 | 248 } |
0 | 249 else if (gimple_code (stmt) == GIMPLE_GOTO |
250 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR | |
251 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0)) | |
252 == LABEL_DECL)) | |
253 { | |
254 /* If we had a computed goto which has a compile-time determinable | |
255 destination, then we can eliminate the goto. */ | |
256 edge e; | |
257 tree label; | |
258 edge_iterator ei; | |
259 basic_block target_block; | |
260 | |
111 | 261 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt); |
0 | 262 /* First look at all the outgoing edges. Delete any outgoing |
263 edges which do not go to the right block. For the one | |
264 edge which goes to the right block, fix up its flags. */ | |
265 label = TREE_OPERAND (gimple_goto_dest (stmt), 0); | |
111 | 266 if (DECL_CONTEXT (label) != cfun->decl) |
267 return retval; | |
131 | 268 target_block = label_to_block (cfun, label); |
0 | 269 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) |
270 { | |
271 if (e->dest != target_block) | |
272 remove_edge_and_dominated_blocks (e); | |
273 else | |
274 { | |
275 /* Turn off the EDGE_ABNORMAL flag. */ | |
276 e->flags &= ~EDGE_ABNORMAL; | |
277 | |
278 /* And set EDGE_FALLTHRU. */ | |
279 e->flags |= EDGE_FALLTHRU; | |
280 ei_next (&ei); | |
281 } | |
282 } | |
283 | |
284 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); | |
285 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index); | |
286 | |
287 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the | |
288 relevant information we need. */ | |
289 gsi_remove (&gsi, true); | |
290 retval = true; | |
291 } | |
292 | |
293 /* Check for indirect calls that have been turned into | |
294 noreturn calls. */ | |
295 else if (is_gimple_call (stmt) | |
111 | 296 && gimple_call_noreturn_p (stmt)) |
297 { | |
298 /* If there are debug stmts after the noreturn call, remove them | |
299 now, they should be all unreachable anyway. */ | |
300 for (gsi_next (&gsi); !gsi_end_p (gsi); ) | |
301 gsi_remove (&gsi, true); | |
302 if (remove_fallthru_edge (bb->succs)) | |
303 retval = true; | |
304 } | |
0 | 305 |
306 return retval; | |
307 } | |
308 | |
309 /* Return true if basic block BB does nothing except pass control | |
310 flow to another block and that we can safely insert a label at | |
311 the start of the successor block. | |
312 | |
313 As a precondition, we require that BB be not equal to | |
111 | 314 the entry block. */ |
0 | 315 |
316 static bool | |
317 tree_forwarder_block_p (basic_block bb, bool phi_wanted) | |
318 { | |
319 gimple_stmt_iterator gsi; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
320 location_t locus; |
0 | 321 |
322 /* BB must have a single outgoing edge. */ | |
323 if (single_succ_p (bb) != 1 | |
324 /* If PHI_WANTED is false, BB must not have any PHI nodes. | |
325 Otherwise, BB must have PHI nodes. */ | |
326 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted | |
111 | 327 /* BB may not be a predecessor of the exit block. */ |
328 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun) | |
0 | 329 /* Nor should this be an infinite loop. */ |
330 || single_succ (bb) == bb | |
331 /* BB may not have an abnormal outgoing edge. */ | |
332 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL)) | |
333 return false; | |
334 | |
111 | 335 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
0 | 336 |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
337 locus = single_succ_edge (bb)->goto_locus; |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
338 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
339 /* There should not be an edge coming from entry, or an EH edge. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
340 { |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
341 edge_iterator ei; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
342 edge e; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
343 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
344 FOR_EACH_EDGE (e, ei, bb->preds) |
111 | 345 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH)) |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
346 return false; |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
347 /* If goto_locus of any of the edges differs, prevent removing |
131 | 348 the forwarder block when not optimizing. */ |
349 else if (!optimize | |
350 && (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION | |
351 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION) | |
352 && e->goto_locus != locus) | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
353 return false; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
354 } |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
355 |
0 | 356 /* Now walk through the statements backward. We can ignore labels, |
357 anything else means this is not a forwarder block. */ | |
358 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) | |
359 { | |
111 | 360 gimple *stmt = gsi_stmt (gsi); |
0 | 361 |
362 switch (gimple_code (stmt)) | |
363 { | |
364 case GIMPLE_LABEL: | |
111 | 365 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt)))) |
0 | 366 return false; |
131 | 367 if (!optimize |
368 && (gimple_has_location (stmt) | |
369 || LOCATION_LOCUS (locus) != UNKNOWN_LOCATION) | |
370 && gimple_location (stmt) != locus) | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
371 return false; |
0 | 372 break; |
373 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
374 /* ??? For now, hope there's a corresponding debug |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
375 assignment at the destination. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
376 case GIMPLE_DEBUG: |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
377 break; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
378 |
0 | 379 default: |
380 return false; | |
381 } | |
382 } | |
383 | |
384 if (current_loops) | |
385 { | |
386 basic_block dest; | |
111 | 387 /* Protect loop headers. */ |
388 if (bb_loop_header_p (bb)) | |
0 | 389 return false; |
111 | 390 |
0 | 391 dest = EDGE_SUCC (bb, 0)->dest; |
111 | 392 /* Protect loop preheaders and latches if requested. */ |
0 | 393 if (dest->loop_father->header == dest) |
111 | 394 { |
395 if (bb->loop_father == dest->loop_father) | |
396 { | |
397 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES)) | |
398 return false; | |
399 /* If bb doesn't have a single predecessor we'd make this | |
400 loop have multiple latches. Don't do that if that | |
401 would in turn require disambiguating them. */ | |
402 return (single_pred_p (bb) | |
403 || loops_state_satisfies_p | |
404 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)); | |
405 } | |
406 else if (bb->loop_father == loop_outer (dest->loop_father)) | |
407 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS); | |
408 /* Always preserve other edges into loop headers that are | |
409 not simple latches or preheaders. */ | |
410 return false; | |
411 } | |
0 | 412 } |
111 | 413 |
0 | 414 return true; |
415 } | |
416 | |
417 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and | |
418 those alternatives are equal in each of the PHI nodes, then return | |
419 true, else return false. */ | |
420 | |
421 static bool | |
422 phi_alternatives_equal (basic_block dest, edge e1, edge e2) | |
423 { | |
424 int n1 = e1->dest_idx; | |
425 int n2 = e2->dest_idx; | |
111 | 426 gphi_iterator gsi; |
0 | 427 |
428 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
429 { | |
111 | 430 gphi *phi = gsi.phi (); |
0 | 431 tree val1 = gimple_phi_arg_def (phi, n1); |
432 tree val2 = gimple_phi_arg_def (phi, n2); | |
433 | |
434 gcc_assert (val1 != NULL_TREE); | |
435 gcc_assert (val2 != NULL_TREE); | |
436 | |
437 if (!operand_equal_for_phi_arg_p (val1, val2)) | |
438 return false; | |
439 } | |
440 | |
441 return true; | |
442 } | |
443 | |
444 /* Removes forwarder block BB. Returns false if this failed. */ | |
445 | |
446 static bool | |
447 remove_forwarder_block (basic_block bb) | |
448 { | |
449 edge succ = single_succ_edge (bb), e, s; | |
450 basic_block dest = succ->dest; | |
131 | 451 gimple *stmt; |
0 | 452 edge_iterator ei; |
453 gimple_stmt_iterator gsi, gsi_to; | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
454 bool can_move_debug_stmts; |
0 | 455 |
456 /* We check for infinite loops already in tree_forwarder_block_p. | |
457 However it may happen that the infinite loop is created | |
458 afterwards due to removal of forwarders. */ | |
459 if (dest == bb) | |
460 return false; | |
461 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
462 /* If the destination block consists of a nonlocal label or is a |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
463 EH landing pad, do not merge it. */ |
131 | 464 stmt = first_stmt (dest); |
465 if (stmt) | |
466 if (glabel *label_stmt = dyn_cast <glabel *> (stmt)) | |
111 | 467 if (DECL_NONLOCAL (gimple_label_label (label_stmt)) |
468 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0) | |
469 return false; | |
0 | 470 |
471 /* If there is an abnormal edge to basic block BB, but not into | |
472 dest, problems might occur during removal of the phi node at out | |
473 of ssa due to overlapping live ranges of registers. | |
474 | |
475 If there is an abnormal edge in DEST, the problems would occur | |
476 anyway since cleanup_dead_labels would then merge the labels for | |
477 two different eh regions, and rest of exception handling code | |
478 does not like it. | |
479 | |
480 So if there is an abnormal edge to BB, proceed only if there is | |
481 no abnormal edge to DEST and there are no phi nodes in DEST. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
482 if (bb_has_abnormal_pred (bb) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
483 && (bb_has_abnormal_pred (dest) |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
484 || !gimple_seq_empty_p (phi_nodes (dest)))) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
485 return false; |
0 | 486 |
487 /* If there are phi nodes in DEST, and some of the blocks that are | |
488 predecessors of BB are also predecessors of DEST, check that the | |
489 phi node arguments match. */ | |
490 if (!gimple_seq_empty_p (phi_nodes (dest))) | |
491 { | |
492 FOR_EACH_EDGE (e, ei, bb->preds) | |
493 { | |
494 s = find_edge (e->src, dest); | |
495 if (!s) | |
496 continue; | |
497 | |
498 if (!phi_alternatives_equal (dest, succ, s)) | |
499 return false; | |
500 } | |
501 } | |
502 | |
111 | 503 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest); |
504 | |
505 basic_block pred = NULL; | |
506 if (single_pred_p (bb)) | |
507 pred = single_pred (bb); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
508 |
0 | 509 /* Redirect the edges. */ |
510 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) | |
511 { | |
512 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index); | |
513 | |
514 if (e->flags & EDGE_ABNORMAL) | |
515 { | |
516 /* If there is an abnormal edge, redirect it anyway, and | |
517 move the labels to the new block to make it legal. */ | |
518 s = redirect_edge_succ_nodup (e, dest); | |
519 } | |
520 else | |
521 s = redirect_edge_and_branch (e, dest); | |
522 | |
523 if (s == e) | |
524 { | |
525 /* Create arguments for the phi nodes, since the edge was not | |
526 here before. */ | |
111 | 527 for (gphi_iterator psi = gsi_start_phis (dest); |
528 !gsi_end_p (psi); | |
529 gsi_next (&psi)) | |
0 | 530 { |
111 | 531 gphi *phi = psi.phi (); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
532 source_location l = gimple_phi_arg_location_from_edge (phi, succ); |
111 | 533 tree def = gimple_phi_arg_def (phi, succ->dest_idx); |
534 add_phi_arg (phi, unshare_expr (def), s, l); | |
0 | 535 } |
536 } | |
537 } | |
538 | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
539 /* Move nonlocal labels and computed goto targets as well as user |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
540 defined labels and labels with an EH landing pad number to the |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
541 new block, so that the redirection of the abnormal edges works, |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
542 jump targets end up in a sane place and debug information for |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
543 labels is retained. */ |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
544 gsi_to = gsi_start_bb (dest); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
545 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) |
0 | 546 { |
131 | 547 stmt = gsi_stmt (gsi); |
548 if (is_gimple_debug (stmt)) | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
549 break; |
131 | 550 |
551 /* Forwarder blocks can only contain labels and debug stmts, and | |
552 labels must come first, so if we get to this point, we know | |
553 we're looking at a label. */ | |
554 tree decl = gimple_label_label (as_a <glabel *> (stmt)); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
555 if (EH_LANDING_PAD_NR (decl) != 0 |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
556 || DECL_NONLOCAL (decl) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
557 || FORCED_LABEL (decl) |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
558 || !DECL_ARTIFICIAL (decl)) |
131 | 559 gsi_move_before (&gsi, &gsi_to); |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
560 else |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
561 gsi_next (&gsi); |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
562 } |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
563 |
111 | 564 /* Move debug statements if the destination has a single predecessor. */ |
131 | 565 if (can_move_debug_stmts && !gsi_end_p (gsi)) |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
566 { |
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
567 gsi_to = gsi_after_labels (dest); |
131 | 568 do |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
569 { |
111 | 570 gimple *debug = gsi_stmt (gsi); |
131 | 571 gcc_assert (is_gimple_debug (debug)); |
572 gsi_move_before (&gsi, &gsi_to); | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
573 } |
131 | 574 while (!gsi_end_p (gsi)); |
0 | 575 } |
576 | |
577 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index); | |
578 | |
579 /* Update the dominators. */ | |
580 if (dom_info_available_p (CDI_DOMINATORS)) | |
581 { | |
582 basic_block dom, dombb, domdest; | |
583 | |
584 dombb = get_immediate_dominator (CDI_DOMINATORS, bb); | |
585 domdest = get_immediate_dominator (CDI_DOMINATORS, dest); | |
586 if (domdest == bb) | |
587 { | |
588 /* Shortcut to avoid calling (relatively expensive) | |
589 nearest_common_dominator unless necessary. */ | |
590 dom = dombb; | |
591 } | |
592 else | |
593 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); | |
594 | |
595 set_immediate_dominator (CDI_DOMINATORS, dest, dom); | |
596 } | |
597 | |
111 | 598 /* Adjust latch infomation of BB's parent loop as otherwise |
599 the cfg hook has a hard time not to kill the loop. */ | |
600 if (current_loops && bb->loop_father->latch == bb) | |
601 bb->loop_father->latch = pred; | |
602 | |
0 | 603 /* And kill the forwarder block. */ |
604 delete_basic_block (bb); | |
605 | |
606 return true; | |
607 } | |
608 | |
111 | 609 /* STMT is a call that has been discovered noreturn. Split the |
610 block to prepare fixing up the CFG and remove LHS. | |
611 Return true if cleanup-cfg needs to run. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
612 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
613 bool |
111 | 614 fixup_noreturn_call (gimple *stmt) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
615 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
616 basic_block bb = gimple_bb (stmt); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
617 bool changed = false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
618 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
619 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
620 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
621 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
622 /* First split basic block if stmt is not last. */ |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
623 if (stmt != gsi_stmt (gsi_last_bb (bb))) |
111 | 624 { |
625 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb))) | |
626 { | |
627 /* Don't split if there are only debug stmts | |
628 after stmt, that can result in -fcompare-debug | |
629 failures. Remove the debug stmts instead, | |
630 they should be all unreachable anyway. */ | |
631 gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
632 for (gsi_next (&gsi); !gsi_end_p (gsi); ) | |
633 gsi_remove (&gsi, true); | |
634 } | |
635 else | |
636 { | |
637 split_block (bb, stmt); | |
638 changed = true; | |
639 } | |
640 } | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
641 |
111 | 642 /* If there is an LHS, remove it, but only if its type has fixed size. |
643 The LHS will need to be recreated during RTL expansion and creating | |
644 temporaries of variable-sized types is not supported. Also don't | |
645 do this with TREE_ADDRESSABLE types, as assign_temp will abort. | |
646 Drop LHS regardless of TREE_ADDRESSABLE, if the function call | |
647 has been changed into a call that does not return a value, like | |
648 __builtin_unreachable or __cxa_pure_virtual. */ | |
649 tree lhs = gimple_call_lhs (stmt); | |
650 if (lhs | |
651 && (should_remove_lhs_p (lhs) | |
652 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt))))) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
653 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
654 gimple_call_set_lhs (stmt, NULL_TREE); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
655 |
111 | 656 /* We need to fix up the SSA name to avoid checking errors. */ |
657 if (TREE_CODE (lhs) == SSA_NAME) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
658 { |
111 | 659 tree new_var = create_tmp_reg (TREE_TYPE (lhs)); |
660 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var); | |
661 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop (); | |
662 set_ssa_default_def (cfun, new_var, lhs); | |
663 } | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
664 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
665 update_stmt (stmt); |
111 | 666 } |
667 | |
668 /* Mark the call as altering control flow. */ | |
669 if (!gimple_call_ctrl_altering_p (stmt)) | |
670 { | |
671 gimple_call_set_ctrl_altering (stmt, true); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
672 changed = true; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
673 } |
0 | 674 |
675 return changed; | |
676 } | |
677 | |
111 | 678 /* Return true if we want to merge BB1 and BB2 into a single block. */ |
0 | 679 |
680 static bool | |
111 | 681 want_merge_blocks_p (basic_block bb1, basic_block bb2) |
0 | 682 { |
111 | 683 if (!can_merge_blocks_p (bb1, bb2)) |
0 | 684 return false; |
111 | 685 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1); |
686 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi))) | |
687 return true; | |
688 return bb1->count.ok_for_merging (bb2->count); | |
689 } | |
0 | 690 |
691 | |
131 | 692 /* Tries to cleanup cfg in basic block BB by merging blocks. Returns |
693 true if anything changes. */ | |
0 | 694 |
695 static bool | |
696 cleanup_tree_cfg_bb (basic_block bb) | |
697 { | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
698 if (tree_forwarder_block_p (bb, false) |
0 | 699 && remove_forwarder_block (bb)) |
700 return true; | |
701 | |
111 | 702 /* If there is a merge opportunity with the predecessor |
703 do nothing now but wait until we process the predecessor. | |
704 This happens when we visit BBs in a non-optimal order and | |
705 avoids quadratic behavior with adjusting stmts BB pointer. */ | |
706 if (single_pred_p (bb) | |
707 && want_merge_blocks_p (single_pred (bb), bb)) | |
708 /* But make sure we _do_ visit it. When we remove unreachable paths | |
709 ending in a backedge we fail to mark the destinations predecessors | |
710 as changed. */ | |
711 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index); | |
712 | |
0 | 713 /* Merging the blocks may create new opportunities for folding |
714 conditional branches (due to the elimination of single-valued PHI | |
715 nodes). */ | |
111 | 716 else if (single_succ_p (bb) |
717 && want_merge_blocks_p (bb, single_succ (bb))) | |
0 | 718 { |
719 merge_blocks (bb, single_succ (bb)); | |
720 return true; | |
721 } | |
722 | |
111 | 723 return false; |
0 | 724 } |
725 | |
131 | 726 /* Do cleanup_control_flow_bb in PRE order. */ |
0 | 727 |
728 static bool | |
131 | 729 cleanup_control_flow_pre () |
0 | 730 { |
731 bool retval = false; | |
732 | |
131 | 733 /* We want remove_edge_and_dominated_blocks to only remove edges, |
734 not dominated blocks which it does when dom info isn't available. | |
735 Pretend so. */ | |
736 dom_state saved_state = dom_info_state (CDI_DOMINATORS); | |
737 set_dom_info_availability (CDI_DOMINATORS, DOM_NONE); | |
0 | 738 |
131 | 739 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1); |
740 auto_sbitmap visited (last_basic_block_for_fn (cfun)); | |
741 bitmap_clear (visited); | |
742 | |
743 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)); | |
0 | 744 |
131 | 745 while (! stack.is_empty ()) |
746 { | |
747 /* Look at the edge on the top of the stack. */ | |
748 edge_iterator ei = stack.last (); | |
749 basic_block dest = ei_edge (ei)->dest; | |
111 | 750 |
131 | 751 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun) |
752 && ! bitmap_bit_p (visited, dest->index)) | |
753 { | |
754 bitmap_set_bit (visited, dest->index); | |
755 /* We only possibly remove edges from DEST here, leaving | |
756 possibly unreachable code in the IL. */ | |
757 retval |= cleanup_control_flow_bb (dest); | |
758 if (EDGE_COUNT (dest->succs) > 0) | |
759 stack.quick_push (ei_start (dest->succs)); | |
760 } | |
761 else | |
762 { | |
763 if (!ei_one_before_end_p (ei)) | |
764 ei_next (&stack.last ()); | |
765 else | |
766 stack.pop (); | |
767 } | |
111 | 768 } |
769 | |
131 | 770 set_dom_info_availability (CDI_DOMINATORS, saved_state); |
771 | |
772 /* We are deleting BBs in non-reverse dominator order, make sure | |
773 insert_debug_temps_for_defs is prepared for that. */ | |
774 if (retval) | |
775 free_dominance_info (CDI_DOMINATORS); | |
111 | 776 |
131 | 777 /* Remove all now (and previously) unreachable blocks. */ |
778 for (int i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); ++i) | |
111 | 779 { |
131 | 780 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); |
781 if (bb && !bitmap_bit_p (visited, bb->index)) | |
782 { | |
783 if (!retval) | |
784 free_dominance_info (CDI_DOMINATORS); | |
785 delete_basic_block (bb); | |
786 retval = true; | |
787 } | |
0 | 788 } |
789 | |
790 return retval; | |
791 } | |
792 | |
111 | 793 static bool |
794 mfb_keep_latches (edge e) | |
795 { | |
131 | 796 return !((dom_info_available_p (CDI_DOMINATORS) |
797 && dominated_by_p (CDI_DOMINATORS, e->src, e->dest)) | |
798 || (e->flags & EDGE_DFS_BACK)); | |
111 | 799 } |
0 | 800 |
801 /* Remove unreachable blocks and other miscellaneous clean up work. | |
802 Return true if the flowgraph was modified, false otherwise. */ | |
803 | |
804 static bool | |
805 cleanup_tree_cfg_noloop (void) | |
806 { | |
807 timevar_push (TV_TREE_CLEANUP_CFG); | |
808 | |
111 | 809 /* Ensure that we have single entries into loop headers. Otherwise |
810 if one of the entries is becoming a latch due to CFG cleanup | |
811 (from formerly being part of an irreducible region) then we mess | |
812 up loop fixup and associate the old loop with a different region | |
813 which makes niter upper bounds invalid. See for example PR80549. | |
814 This needs to be done before we remove trivially dead edges as | |
815 we need to capture the dominance state before the pending transform. */ | |
816 if (current_loops) | |
817 { | |
131 | 818 /* This needs backedges or dominators. */ |
819 if (!dom_info_available_p (CDI_DOMINATORS)) | |
820 mark_dfs_back_edges (); | |
821 | |
111 | 822 loop_p loop; |
823 unsigned i; | |
824 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop) | |
825 if (loop && loop->header) | |
826 { | |
827 basic_block bb = loop->header; | |
828 edge_iterator ei; | |
829 edge e; | |
830 bool found_latch = false; | |
831 bool any_abnormal = false; | |
832 unsigned n = 0; | |
833 /* We are only interested in preserving existing loops, but | |
834 we need to check whether they are still real and of course | |
835 if we need to add a preheader at all. */ | |
836 FOR_EACH_EDGE (e, ei, bb->preds) | |
837 { | |
838 if (e->flags & EDGE_ABNORMAL) | |
839 { | |
840 any_abnormal = true; | |
841 break; | |
842 } | |
131 | 843 if ((dom_info_available_p (CDI_DOMINATORS) |
844 && dominated_by_p (CDI_DOMINATORS, e->src, bb)) | |
845 || (e->flags & EDGE_DFS_BACK)) | |
111 | 846 { |
847 found_latch = true; | |
848 continue; | |
849 } | |
850 n++; | |
851 } | |
852 /* If we have more than one entry to the loop header | |
853 create a forwarder. */ | |
854 if (found_latch && ! any_abnormal && n > 1) | |
855 { | |
856 edge fallthru = make_forwarder_block (bb, mfb_keep_latches, | |
857 NULL); | |
858 loop->header = fallthru->dest; | |
859 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP)) | |
860 { | |
861 /* The loop updating from the CFG hook is incomplete | |
862 when we have multiple latches, fixup manually. */ | |
863 remove_bb_from_loops (fallthru->src); | |
864 loop_p cloop = loop; | |
865 FOR_EACH_EDGE (e, ei, fallthru->src->preds) | |
866 cloop = find_common_loop (cloop, e->src->loop_father); | |
867 add_bb_to_loop (fallthru->src, cloop); | |
868 } | |
869 } | |
870 } | |
871 } | |
872 | |
131 | 873 /* Prepare the worklists of altered blocks. */ |
874 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL); | |
875 | |
876 /* Start by iterating over all basic blocks in PRE order looking for | |
877 edge removal opportunities. Do this first because incoming SSA form | |
878 may be invalid and we want to avoid performing SSA related tasks such | |
879 as propgating out a PHI node during BB merging in that state. This | |
880 also gets rid of unreachable blocks. */ | |
881 bool changed = cleanup_control_flow_pre (); | |
882 | |
883 /* After doing the above SSA form should be valid (or an update SSA | |
884 should be required). */ | |
885 | |
886 /* Compute dominator info which we need for the iterative process below. */ | |
887 if (!dom_info_available_p (CDI_DOMINATORS)) | |
888 calculate_dominance_info (CDI_DOMINATORS); | |
889 else | |
890 checking_verify_dominators (CDI_DOMINATORS); | |
891 | |
892 /* During forwarder block cleanup, we may redirect edges out of | |
893 SWITCH_EXPRs, which can get expensive. So we want to enable | |
894 recording of edge to CASE_LABEL_EXPR. */ | |
895 start_recording_case_labels (); | |
896 | |
897 /* Continue by iterating over all basic blocks looking for BB merging | |
898 opportunities. We cannot use FOR_EACH_BB_FN for the BB iteration | |
899 since the basic blocks may get removed. */ | |
900 unsigned n = last_basic_block_for_fn (cfun); | |
901 for (unsigned i = NUM_FIXED_BLOCKS; i < n; i++) | |
902 { | |
903 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); | |
904 if (bb) | |
905 changed |= cleanup_tree_cfg_bb (bb); | |
906 } | |
907 | |
908 /* Now process the altered blocks, as long as any are available. */ | |
909 while (!bitmap_empty_p (cfgcleanup_altered_bbs)) | |
910 { | |
911 unsigned i = bitmap_first_set_bit (cfgcleanup_altered_bbs); | |
912 bitmap_clear_bit (cfgcleanup_altered_bbs, i); | |
913 if (i < NUM_FIXED_BLOCKS) | |
914 continue; | |
915 | |
916 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); | |
917 if (!bb) | |
918 continue; | |
919 | |
920 /* BB merging done by cleanup_tree_cfg_bb can end up propagating | |
921 out single-argument PHIs which in turn can expose | |
922 cleanup_control_flow_bb opportunities so we have to repeat | |
923 that here. */ | |
924 changed |= cleanup_control_flow_bb (bb); | |
925 changed |= cleanup_tree_cfg_bb (bb); | |
926 } | |
927 | |
928 end_recording_case_labels (); | |
929 BITMAP_FREE (cfgcleanup_altered_bbs); | |
0 | 930 |
931 gcc_assert (dom_info_available_p (CDI_DOMINATORS)); | |
932 | |
111 | 933 /* Do not renumber blocks if the SCEV cache is active, it is indexed by |
934 basic-block numbers. */ | |
935 if (! scev_initialized_p ()) | |
936 compact_blocks (); | |
937 | |
938 checking_verify_flow_info (); | |
0 | 939 |
940 timevar_pop (TV_TREE_CLEANUP_CFG); | |
941 | |
942 if (changed && current_loops) | |
111 | 943 { |
944 /* Removing edges and/or blocks may make recorded bounds refer | |
945 to stale GIMPLE stmts now, so clear them. */ | |
946 free_numbers_of_iterations_estimates (cfun); | |
947 loops_state_set (LOOPS_NEED_FIXUP); | |
948 } | |
0 | 949 |
950 return changed; | |
951 } | |
952 | |
953 /* Repairs loop structures. */ | |
954 | |
955 static void | |
956 repair_loop_structures (void) | |
957 { | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
958 bitmap changed_bbs; |
111 | 959 unsigned n_new_loops; |
960 | |
961 calculate_dominance_info (CDI_DOMINATORS); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
962 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
963 timevar_push (TV_REPAIR_LOOPS); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
964 changed_bbs = BITMAP_ALLOC (NULL); |
111 | 965 n_new_loops = fix_loop_structure (changed_bbs); |
0 | 966 |
967 /* This usually does nothing. But sometimes parts of cfg that originally | |
968 were inside a loop get out of it due to edge removal (since they | |
111 | 969 become unreachable by back edges from latch). Also a former |
970 irreducible loop can become reducible - in this case force a full | |
971 rewrite into loop-closed SSA form. */ | |
0 | 972 if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) |
111 | 973 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs, |
974 TODO_update_ssa); | |
0 | 975 |
976 BITMAP_FREE (changed_bbs); | |
977 | |
111 | 978 checking_verify_loop_structure (); |
0 | 979 scev_reset (); |
980 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
981 timevar_pop (TV_REPAIR_LOOPS); |
0 | 982 } |
983 | |
984 /* Cleanup cfg and repair loop structures. */ | |
985 | |
986 bool | |
987 cleanup_tree_cfg (void) | |
988 { | |
989 bool changed = cleanup_tree_cfg_noloop (); | |
990 | |
991 if (current_loops != NULL | |
992 && loops_state_satisfies_p (LOOPS_NEED_FIXUP)) | |
993 repair_loop_structures (); | |
994 | |
995 return changed; | |
996 } | |
997 | |
111 | 998 /* Tries to merge the PHI nodes at BB into those at BB's sole successor. |
999 Returns true if successful. */ | |
0 | 1000 |
111 | 1001 static bool |
0 | 1002 remove_forwarder_block_with_phi (basic_block bb) |
1003 { | |
1004 edge succ = single_succ_edge (bb); | |
1005 basic_block dest = succ->dest; | |
111 | 1006 gimple *label; |
0 | 1007 basic_block dombb, domdest, dom; |
1008 | |
1009 /* We check for infinite loops already in tree_forwarder_block_p. | |
1010 However it may happen that the infinite loop is created | |
1011 afterwards due to removal of forwarders. */ | |
1012 if (dest == bb) | |
111 | 1013 return false; |
1014 | |
1015 /* Removal of forwarders may expose new natural loops and thus | |
1016 a block may turn into a loop header. */ | |
1017 if (current_loops && bb_loop_header_p (bb)) | |
1018 return false; | |
0 | 1019 |
1020 /* If the destination block consists of a nonlocal label, do not | |
1021 merge it. */ | |
1022 label = first_stmt (dest); | |
111 | 1023 if (label) |
1024 if (glabel *label_stmt = dyn_cast <glabel *> (label)) | |
1025 if (DECL_NONLOCAL (gimple_label_label (label_stmt))) | |
1026 return false; | |
1027 | |
1028 /* Record BB's single pred in case we need to update the father | |
1029 loop's latch information later. */ | |
1030 basic_block pred = NULL; | |
1031 if (single_pred_p (bb)) | |
1032 pred = single_pred (bb); | |
0 | 1033 |
1034 /* Redirect each incoming edge to BB to DEST. */ | |
1035 while (EDGE_COUNT (bb->preds) > 0) | |
1036 { | |
1037 edge e = EDGE_PRED (bb, 0), s; | |
111 | 1038 gphi_iterator gsi; |
0 | 1039 |
1040 s = find_edge (e->src, dest); | |
1041 if (s) | |
1042 { | |
1043 /* We already have an edge S from E->src to DEST. If S and | |
1044 E->dest's sole successor edge have the same PHI arguments | |
1045 at DEST, redirect S to DEST. */ | |
1046 if (phi_alternatives_equal (dest, s, succ)) | |
1047 { | |
1048 e = redirect_edge_and_branch (e, dest); | |
1049 redirect_edge_var_map_clear (e); | |
1050 continue; | |
1051 } | |
1052 | |
1053 /* PHI arguments are different. Create a forwarder block by | |
1054 splitting E so that we can merge PHI arguments on E to | |
1055 DEST. */ | |
1056 e = single_succ_edge (split_edge (e)); | |
1057 } | |
111 | 1058 else |
1059 { | |
1060 /* If we merge the forwarder into a loop header verify if we | |
1061 are creating another loop latch edge. If so, reset | |
1062 number of iteration information of the loop. */ | |
1063 if (dest->loop_father->header == dest | |
1064 && dominated_by_p (CDI_DOMINATORS, e->src, dest)) | |
1065 { | |
1066 dest->loop_father->any_upper_bound = false; | |
1067 dest->loop_father->any_likely_upper_bound = false; | |
1068 free_numbers_of_iterations_estimates (dest->loop_father); | |
1069 } | |
1070 } | |
0 | 1071 |
1072 s = redirect_edge_and_branch (e, dest); | |
1073 | |
1074 /* redirect_edge_and_branch must not create a new edge. */ | |
1075 gcc_assert (s == e); | |
1076 | |
1077 /* Add to the PHI nodes at DEST each PHI argument removed at the | |
1078 destination of E. */ | |
1079 for (gsi = gsi_start_phis (dest); | |
1080 !gsi_end_p (gsi); | |
1081 gsi_next (&gsi)) | |
1082 { | |
111 | 1083 gphi *phi = gsi.phi (); |
0 | 1084 tree def = gimple_phi_arg_def (phi, succ->dest_idx); |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
1085 source_location locus = gimple_phi_arg_location_from_edge (phi, succ); |
0 | 1086 |
1087 if (TREE_CODE (def) == SSA_NAME) | |
1088 { | |
1089 /* If DEF is one of the results of PHI nodes removed during | |
1090 redirection, replace it with the PHI argument that used | |
1091 to be on E. */ | |
111 | 1092 vec<edge_var_map> *head = redirect_edge_var_map_vector (e); |
1093 size_t length = head ? head->length () : 0; | |
1094 for (size_t i = 0; i < length; i++) | |
0 | 1095 { |
111 | 1096 edge_var_map *vm = &(*head)[i]; |
0 | 1097 tree old_arg = redirect_edge_var_map_result (vm); |
1098 tree new_arg = redirect_edge_var_map_def (vm); | |
1099 | |
1100 if (def == old_arg) | |
1101 { | |
1102 def = new_arg; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
1103 locus = redirect_edge_var_map_location (vm); |
0 | 1104 break; |
1105 } | |
1106 } | |
1107 } | |
1108 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
47
diff
changeset
|
1109 add_phi_arg (phi, def, s, locus); |
0 | 1110 } |
1111 | |
1112 redirect_edge_var_map_clear (e); | |
1113 } | |
1114 | |
1115 /* Update the dominators. */ | |
1116 dombb = get_immediate_dominator (CDI_DOMINATORS, bb); | |
1117 domdest = get_immediate_dominator (CDI_DOMINATORS, dest); | |
1118 if (domdest == bb) | |
1119 { | |
1120 /* Shortcut to avoid calling (relatively expensive) | |
1121 nearest_common_dominator unless necessary. */ | |
1122 dom = dombb; | |
1123 } | |
1124 else | |
1125 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); | |
1126 | |
1127 set_immediate_dominator (CDI_DOMINATORS, dest, dom); | |
1128 | |
111 | 1129 /* Adjust latch infomation of BB's parent loop as otherwise |
1130 the cfg hook has a hard time not to kill the loop. */ | |
1131 if (current_loops && bb->loop_father->latch == bb) | |
1132 bb->loop_father->latch = pred; | |
1133 | |
0 | 1134 /* Remove BB since all of BB's incoming edges have been redirected |
1135 to DEST. */ | |
1136 delete_basic_block (bb); | |
111 | 1137 |
1138 return true; | |
0 | 1139 } |
1140 | |
1141 /* This pass merges PHI nodes if one feeds into another. For example, | |
1142 suppose we have the following: | |
1143 | |
1144 goto <bb 9> (<L9>); | |
1145 | |
1146 <L8>:; | |
1147 tem_17 = foo (); | |
1148 | |
1149 # tem_6 = PHI <tem_17(8), tem_23(7)>; | |
1150 <L9>:; | |
1151 | |
1152 # tem_3 = PHI <tem_6(9), tem_2(5)>; | |
1153 <L10>:; | |
1154 | |
1155 Then we merge the first PHI node into the second one like so: | |
1156 | |
1157 goto <bb 9> (<L10>); | |
1158 | |
1159 <L8>:; | |
1160 tem_17 = foo (); | |
1161 | |
1162 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>; | |
1163 <L10>:; | |
1164 */ | |
1165 | |
111 | 1166 namespace { |
1167 | |
1168 const pass_data pass_data_merge_phi = | |
0 | 1169 { |
111 | 1170 GIMPLE_PASS, /* type */ |
1171 "mergephi", /* name */ | |
1172 OPTGROUP_NONE, /* optinfo_flags */ | |
1173 TV_TREE_MERGE_PHI, /* tv_id */ | |
1174 ( PROP_cfg | PROP_ssa ), /* properties_required */ | |
1175 0, /* properties_provided */ | |
1176 0, /* properties_destroyed */ | |
1177 0, /* todo_flags_start */ | |
1178 0, /* todo_flags_finish */ | |
1179 }; | |
1180 | |
1181 class pass_merge_phi : public gimple_opt_pass | |
1182 { | |
1183 public: | |
1184 pass_merge_phi (gcc::context *ctxt) | |
1185 : gimple_opt_pass (pass_data_merge_phi, ctxt) | |
1186 {} | |
1187 | |
1188 /* opt_pass methods: */ | |
1189 opt_pass * clone () { return new pass_merge_phi (m_ctxt); } | |
1190 virtual unsigned int execute (function *); | |
1191 | |
1192 }; // class pass_merge_phi | |
1193 | |
1194 unsigned int | |
1195 pass_merge_phi::execute (function *fun) | |
1196 { | |
1197 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun)); | |
0 | 1198 basic_block *current = worklist; |
1199 basic_block bb; | |
1200 | |
1201 calculate_dominance_info (CDI_DOMINATORS); | |
1202 | |
1203 /* Find all PHI nodes that we may be able to merge. */ | |
111 | 1204 FOR_EACH_BB_FN (bb, fun) |
0 | 1205 { |
1206 basic_block dest; | |
1207 | |
1208 /* Look for a forwarder block with PHI nodes. */ | |
1209 if (!tree_forwarder_block_p (bb, true)) | |
1210 continue; | |
1211 | |
1212 dest = single_succ (bb); | |
1213 | |
1214 /* We have to feed into another basic block with PHI | |
1215 nodes. */ | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
1216 if (gimple_seq_empty_p (phi_nodes (dest)) |
0 | 1217 /* We don't want to deal with a basic block with |
1218 abnormal edges. */ | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1219 || bb_has_abnormal_pred (bb)) |
0 | 1220 continue; |
1221 | |
1222 if (!dominated_by_p (CDI_DOMINATORS, dest, bb)) | |
1223 { | |
1224 /* If BB does not dominate DEST, then the PHI nodes at | |
1225 DEST must be the only users of the results of the PHI | |
1226 nodes at BB. */ | |
1227 *current++ = bb; | |
1228 } | |
1229 else | |
1230 { | |
111 | 1231 gphi_iterator gsi; |
0 | 1232 unsigned int dest_idx = single_succ_edge (bb)->dest_idx; |
1233 | |
1234 /* BB dominates DEST. There may be many users of the PHI | |
1235 nodes in BB. However, there is still a trivial case we | |
1236 can handle. If the result of every PHI in BB is used | |
1237 only by a PHI in DEST, then we can trivially merge the | |
1238 PHI nodes from BB into DEST. */ | |
1239 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); | |
1240 gsi_next (&gsi)) | |
1241 { | |
111 | 1242 gphi *phi = gsi.phi (); |
0 | 1243 tree result = gimple_phi_result (phi); |
1244 use_operand_p imm_use; | |
111 | 1245 gimple *use_stmt; |
0 | 1246 |
1247 /* If the PHI's result is never used, then we can just | |
1248 ignore it. */ | |
1249 if (has_zero_uses (result)) | |
1250 continue; | |
1251 | |
1252 /* Get the single use of the result of this PHI node. */ | |
1253 if (!single_imm_use (result, &imm_use, &use_stmt) | |
1254 || gimple_code (use_stmt) != GIMPLE_PHI | |
1255 || gimple_bb (use_stmt) != dest | |
1256 || gimple_phi_arg_def (use_stmt, dest_idx) != result) | |
1257 break; | |
1258 } | |
1259 | |
1260 /* If the loop above iterated through all the PHI nodes | |
1261 in BB, then we can merge the PHIs from BB into DEST. */ | |
1262 if (gsi_end_p (gsi)) | |
1263 *current++ = bb; | |
1264 } | |
1265 } | |
1266 | |
1267 /* Now let's drain WORKLIST. */ | |
111 | 1268 bool changed = false; |
0 | 1269 while (current != worklist) |
1270 { | |
1271 bb = *--current; | |
111 | 1272 changed |= remove_forwarder_block_with_phi (bb); |
0 | 1273 } |
111 | 1274 free (worklist); |
0 | 1275 |
111 | 1276 /* Removing forwarder blocks can cause formerly irreducible loops |
1277 to become reducible if we merged two entry blocks. */ | |
1278 if (changed | |
1279 && current_loops) | |
1280 loops_state_set (LOOPS_NEED_FIXUP); | |
1281 | |
0 | 1282 return 0; |
1283 } | |
1284 | |
111 | 1285 } // anon namespace |
1286 | |
1287 gimple_opt_pass * | |
1288 make_pass_merge_phi (gcc::context *ctxt) | |
0 | 1289 { |
111 | 1290 return new pass_merge_phi (ctxt); |
0 | 1291 } |
1292 | |
111 | 1293 /* Pass: cleanup the CFG just before expanding trees to RTL. |
1294 This is just a round of label cleanups and case node grouping | |
1295 because after the tree optimizers have run such cleanups may | |
1296 be necessary. */ | |
1297 | |
1298 static unsigned int | |
1299 execute_cleanup_cfg_post_optimizing (void) | |
0 | 1300 { |
111 | 1301 unsigned int todo = execute_fixup_cfg (); |
1302 if (cleanup_tree_cfg ()) | |
1303 { | |
1304 todo &= ~TODO_cleanup_cfg; | |
1305 todo |= TODO_update_ssa; | |
1306 } | |
1307 maybe_remove_unreachable_handlers (); | |
1308 cleanup_dead_labels (); | |
1309 if (group_case_labels ()) | |
1310 todo |= TODO_cleanup_cfg; | |
1311 if ((flag_compare_debug_opt || flag_compare_debug) | |
1312 && flag_dump_final_insns) | |
1313 { | |
1314 FILE *final_output = fopen (flag_dump_final_insns, "a"); | |
1315 | |
1316 if (!final_output) | |
1317 { | |
1318 error ("could not open final insn dump file %qs: %m", | |
1319 flag_dump_final_insns); | |
1320 flag_dump_final_insns = NULL; | |
1321 } | |
1322 else | |
1323 { | |
1324 int save_unnumbered = flag_dump_unnumbered; | |
1325 int save_noaddr = flag_dump_noaddr; | |
1326 | |
1327 flag_dump_noaddr = flag_dump_unnumbered = 1; | |
1328 fprintf (final_output, "\n"); | |
131 | 1329 dump_enumerated_decls (final_output, |
1330 dump_flags | TDF_SLIM | TDF_NOUID); | |
111 | 1331 flag_dump_noaddr = save_noaddr; |
1332 flag_dump_unnumbered = save_unnumbered; | |
1333 if (fclose (final_output)) | |
1334 { | |
1335 error ("could not close final insn dump file %qs: %m", | |
1336 flag_dump_final_insns); | |
1337 flag_dump_final_insns = NULL; | |
1338 } | |
1339 } | |
1340 } | |
1341 return todo; | |
1342 } | |
1343 | |
1344 namespace { | |
1345 | |
1346 const pass_data pass_data_cleanup_cfg_post_optimizing = | |
1347 { | |
1348 GIMPLE_PASS, /* type */ | |
1349 "optimized", /* name */ | |
1350 OPTGROUP_NONE, /* optinfo_flags */ | |
1351 TV_TREE_CLEANUP_CFG, /* tv_id */ | |
1352 PROP_cfg, /* properties_required */ | |
1353 0, /* properties_provided */ | |
1354 0, /* properties_destroyed */ | |
1355 0, /* todo_flags_start */ | |
1356 TODO_remove_unused_locals, /* todo_flags_finish */ | |
0 | 1357 }; |
111 | 1358 |
1359 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass | |
1360 { | |
1361 public: | |
1362 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt) | |
1363 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt) | |
1364 {} | |
1365 | |
1366 /* opt_pass methods: */ | |
1367 virtual unsigned int execute (function *) | |
1368 { | |
1369 return execute_cleanup_cfg_post_optimizing (); | |
1370 } | |
1371 | |
1372 }; // class pass_cleanup_cfg_post_optimizing | |
1373 | |
1374 } // anon namespace | |
1375 | |
1376 gimple_opt_pass * | |
1377 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt) | |
1378 { | |
1379 return new pass_cleanup_cfg_post_optimizing (ctxt); | |
1380 } | |
1381 | |
1382 |