comparison gcc/tree-cfgcleanup.c @ 16:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents f6334be47118
children 84e7813d76e9
comparison
equal deleted inserted replaced
15:561a7518be6b 16:04ced10e8804
1 /* CFG cleanup for trees. 1 /* CFG cleanup for trees.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
3 Free Software Foundation, Inc.
4 3
5 This file is part of GCC. 4 This file is part of GCC.
6 5
7 GCC is free software; you can redistribute it and/or modify 6 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
19 <http://www.gnu.org/licenses/>. */ 18 <http://www.gnu.org/licenses/>. */
20 19
21 #include "config.h" 20 #include "config.h"
22 #include "system.h" 21 #include "system.h"
23 #include "coretypes.h" 22 #include "coretypes.h"
24 #include "tm.h" 23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h" 25 #include "tree.h"
26 #include "tm_p.h" 26 #include "gimple.h"
27 #include "basic-block.h" 27 #include "cfghooks.h"
28 #include "output.h" 28 #include "tree-pass.h"
29 #include "ssa.h"
29 #include "diagnostic-core.h" 30 #include "diagnostic-core.h"
30 #include "flags.h" 31 #include "fold-const.h"
31 #include "function.h" 32 #include "cfganal.h"
32 #include "ggc.h" 33 #include "cfgcleanup.h"
33 #include "langhooks.h" 34 #include "tree-eh.h"
34 #include "tree-flow.h" 35 #include "gimplify.h"
35 #include "timevar.h" 36 #include "gimple-iterator.h"
36 #include "tree-dump.h" 37 #include "tree-cfg.h"
37 #include "tree-pass.h" 38 #include "tree-ssa-loop-manip.h"
38 #include "except.h" 39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
39 #include "cfgloop.h" 41 #include "cfgloop.h"
40 #include "cfglayout.h"
41 #include "hashtab.h"
42 #include "tree-ssa-propagate.h"
43 #include "tree-scalar-evolution.h" 42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
46
44 47
45 /* The set of blocks in that at least one of the following changes happened: 48 /* The set of blocks in that at least one of the following changes happened:
46 -- the statement at the end of the block was changed 49 -- the statement at the end of the block was changed
47 -- the block was newly created 50 -- the block was newly created
48 -- the set of the predecessors of the block changed 51 -- the set of the predecessors of the block changed
52 bitmap cfgcleanup_altered_bbs; 55 bitmap cfgcleanup_altered_bbs;
53 56
54 /* Remove any fallthru edge from EV. Return true if an edge was removed. */ 57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
55 58
56 static bool 59 static bool
57 remove_fallthru_edge (VEC(edge,gc) *ev) 60 remove_fallthru_edge (vec<edge, va_gc> *ev)
58 { 61 {
59 edge_iterator ei; 62 edge_iterator ei;
60 edge e; 63 edge e;
61 64
62 FOR_EACH_EDGE (e, ei, ev) 65 FOR_EACH_EDGE (e, ei, ev)
63 if ((e->flags & EDGE_FALLTHRU) != 0) 66 if ((e->flags & EDGE_FALLTHRU) != 0)
64 { 67 {
65 remove_edge_and_dominated_blocks (e); 68 if (e->flags & EDGE_COMPLEX)
69 e->flags &= ~EDGE_FALLTHRU;
70 else
71 remove_edge_and_dominated_blocks (e);
66 return true; 72 return true;
67 } 73 }
68 return false; 74 return false;
69 } 75 }
70 76
77 /* Convert a SWTCH with single non-default case to gcond and replace it
78 at GSI. */
79
80 static bool
81 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
82 {
83 if (gimple_switch_num_labels (swtch) != 2)
84 return false;
85
86 tree index = gimple_switch_index (swtch);
87 tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
88 tree label = gimple_switch_label (swtch, 1);
89 tree low = CASE_LOW (label);
90 tree high = CASE_HIGH (label);
91
92 basic_block default_bb = label_to_block_fn (cfun, default_label);
93 basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
94
95 basic_block bb = gimple_bb (swtch);
96 gcond *cond;
97
98 /* Replace switch statement with condition statement. */
99 if (high)
100 {
101 tree lhs, rhs;
102 generate_range_test (bb, index, low, high, &lhs, &rhs);
103 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
104 }
105 else
106 cond = gimple_build_cond (EQ_EXPR, index,
107 fold_convert (TREE_TYPE (index), low),
108 NULL_TREE, NULL_TREE);
109
110 gsi_replace (&gsi, cond, true);
111
112 /* Update edges. */
113 edge case_edge = find_edge (bb, case_bb);
114 edge default_edge = find_edge (bb, default_bb);
115
116 case_edge->flags |= EDGE_TRUE_VALUE;
117 default_edge->flags |= EDGE_FALSE_VALUE;
118 return true;
119 }
71 120
72 /* Disconnect an unreachable block in the control expression starting 121 /* Disconnect an unreachable block in the control expression starting
73 at block BB. */ 122 at block BB. */
74 123
75 static bool 124 static bool
76 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi) 125 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi,
126 bool first_p)
77 { 127 {
78 edge taken_edge; 128 edge taken_edge;
79 bool retval = false; 129 bool retval = false;
80 gimple stmt = gsi_stmt (gsi); 130 gimple *stmt = gsi_stmt (gsi);
81 tree val;
82 131
83 if (!single_succ_p (bb)) 132 if (!single_succ_p (bb))
84 { 133 {
85 edge e; 134 edge e;
86 edge_iterator ei; 135 edge_iterator ei;
87 bool warned; 136 bool warned;
88 location_t loc; 137 tree val = NULL_TREE;
138
139 /* Try to convert a switch with just a single non-default case to
140 GIMPLE condition. */
141 if (gimple_code (stmt) == GIMPLE_SWITCH
142 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
143 stmt = gsi_stmt (gsi);
89 144
90 fold_defer_overflow_warnings (); 145 fold_defer_overflow_warnings ();
91 loc = gimple_location (stmt);
92 switch (gimple_code (stmt)) 146 switch (gimple_code (stmt))
93 { 147 {
94 case GIMPLE_COND: 148 case GIMPLE_COND:
95 { 149 /* During a first iteration on the CFG only remove trivially
96 tree lhs = gimple_cond_lhs (stmt); 150 dead edges but mark other conditions for re-evaluation. */
97 tree rhs = gimple_cond_rhs (stmt); 151 if (first_p)
98 /* For conditions try harder and lookup single-argument 152 {
99 PHI nodes. Only do so from the same basic-block though 153 val = const_binop (gimple_cond_code (stmt), boolean_type_node,
100 as other basic-blocks may be dead already. */ 154 gimple_cond_lhs (stmt),
101 if (TREE_CODE (lhs) == SSA_NAME 155 gimple_cond_rhs (stmt));
102 && !name_registered_for_update_p (lhs)) 156 if (! val)
103 { 157 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
104 gimple def_stmt = SSA_NAME_DEF_STMT (lhs); 158 }
105 if (gimple_code (def_stmt) == GIMPLE_PHI 159 else
106 && gimple_phi_num_args (def_stmt) == 1 160 {
107 && gimple_bb (def_stmt) == gimple_bb (stmt) 161 code_helper rcode;
108 && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME 162 tree ops[3] = {};
109 || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt, 163 if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
110 0)))) 164 no_follow_ssa_edges)
111 lhs = PHI_ARG_DEF (def_stmt, 0); 165 && rcode == INTEGER_CST)
112 } 166 val = ops[0];
113 if (TREE_CODE (rhs) == SSA_NAME 167 }
114 && !name_registered_for_update_p (rhs)) 168 break;
115 {
116 gimple def_stmt = SSA_NAME_DEF_STMT (rhs);
117 if (gimple_code (def_stmt) == GIMPLE_PHI
118 && gimple_phi_num_args (def_stmt) == 1
119 && gimple_bb (def_stmt) == gimple_bb (stmt)
120 && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
121 || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
122 0))))
123 rhs = PHI_ARG_DEF (def_stmt, 0);
124 }
125 val = fold_binary_loc (loc, gimple_cond_code (stmt),
126 boolean_type_node, lhs, rhs);
127 break;
128 }
129 169
130 case GIMPLE_SWITCH: 170 case GIMPLE_SWITCH:
131 val = gimple_switch_index (stmt); 171 val = gimple_switch_index (as_a <gswitch *> (stmt));
132 break; 172 break;
133 173
134 default: 174 default:
135 val = NULL_TREE; 175 ;
136 } 176 }
137 taken_edge = find_taken_edge (bb, val); 177 taken_edge = find_taken_edge (bb, val);
138 if (!taken_edge) 178 if (!taken_edge)
139 { 179 {
140 fold_undefer_and_ignore_overflow_warnings (); 180 fold_undefer_and_ignore_overflow_warnings ();
153 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); 193 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
154 warned = true; 194 warned = true;
155 } 195 }
156 196
157 taken_edge->probability += e->probability; 197 taken_edge->probability += e->probability;
158 taken_edge->count += e->count;
159 remove_edge_and_dominated_blocks (e); 198 remove_edge_and_dominated_blocks (e);
160 retval = true; 199 retval = true;
161 } 200 }
162 else 201 else
163 ei_next (&ei); 202 ei_next (&ei);
164 } 203 }
165 if (!warned) 204 if (!warned)
166 fold_undefer_and_ignore_overflow_warnings (); 205 fold_undefer_and_ignore_overflow_warnings ();
167 if (taken_edge->probability > REG_BR_PROB_BASE)
168 taken_edge->probability = REG_BR_PROB_BASE;
169 } 206 }
170 else 207 else
171 taken_edge = single_succ_edge (bb); 208 taken_edge = single_succ_edge (bb);
172 209
173 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index); 210 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
175 taken_edge->flags = EDGE_FALLTHRU; 212 taken_edge->flags = EDGE_FALLTHRU;
176 213
177 return retval; 214 return retval;
178 } 215 }
179 216
217 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
218 to updated gimple_call_flags. */
219
220 static void
221 cleanup_call_ctrl_altering_flag (gimple *bb_end)
222 {
223 if (!is_gimple_call (bb_end)
224 || !gimple_call_ctrl_altering_p (bb_end))
225 return;
226
227 int flags = gimple_call_flags (bb_end);
228 if (((flags & (ECF_CONST | ECF_PURE))
229 && !(flags & ECF_LOOPING_CONST_OR_PURE))
230 || (flags & ECF_LEAF))
231 gimple_call_set_ctrl_altering (bb_end, false);
232 }
233
180 /* Try to remove superfluous control structures in basic block BB. Returns 234 /* Try to remove superfluous control structures in basic block BB. Returns
181 true if anything changes. */ 235 true if anything changes. */
182 236
183 static bool 237 static bool
184 cleanup_control_flow_bb (basic_block bb) 238 cleanup_control_flow_bb (basic_block bb, bool first_p)
185 { 239 {
186 gimple_stmt_iterator gsi; 240 gimple_stmt_iterator gsi;
187 bool retval = false; 241 bool retval = false;
188 gimple stmt; 242 gimple *stmt;
189 243
190 /* If the last statement of the block could throw and now cannot, 244 /* If the last statement of the block could throw and now cannot,
191 we need to prune cfg. */ 245 we need to prune cfg. */
192 retval |= gimple_purge_dead_eh_edges (bb); 246 retval |= gimple_purge_dead_eh_edges (bb);
193 247
194 gsi = gsi_last_bb (bb); 248 gsi = gsi_last_nondebug_bb (bb);
195 if (gsi_end_p (gsi)) 249 if (gsi_end_p (gsi))
196 return retval; 250 return retval;
197 251
198 stmt = gsi_stmt (gsi); 252 stmt = gsi_stmt (gsi);
199 253
254 /* Try to cleanup ctrl altering flag for call which ends bb. */
255 cleanup_call_ctrl_altering_flag (stmt);
256
200 if (gimple_code (stmt) == GIMPLE_COND 257 if (gimple_code (stmt) == GIMPLE_COND
201 || gimple_code (stmt) == GIMPLE_SWITCH) 258 || gimple_code (stmt) == GIMPLE_SWITCH)
202 retval |= cleanup_control_expr_graph (bb, gsi); 259 {
260 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
261 retval |= cleanup_control_expr_graph (bb, gsi, first_p);
262 }
203 else if (gimple_code (stmt) == GIMPLE_GOTO 263 else if (gimple_code (stmt) == GIMPLE_GOTO
204 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR 264 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
205 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0)) 265 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
206 == LABEL_DECL)) 266 == LABEL_DECL))
207 { 267 {
210 edge e; 270 edge e;
211 tree label; 271 tree label;
212 edge_iterator ei; 272 edge_iterator ei;
213 basic_block target_block; 273 basic_block target_block;
214 274
275 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
215 /* First look at all the outgoing edges. Delete any outgoing 276 /* First look at all the outgoing edges. Delete any outgoing
216 edges which do not go to the right block. For the one 277 edges which do not go to the right block. For the one
217 edge which goes to the right block, fix up its flags. */ 278 edge which goes to the right block, fix up its flags. */
218 label = TREE_OPERAND (gimple_goto_dest (stmt), 0); 279 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
280 if (DECL_CONTEXT (label) != cfun->decl)
281 return retval;
219 target_block = label_to_block (label); 282 target_block = label_to_block (label);
220 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) 283 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
221 { 284 {
222 if (e->dest != target_block) 285 if (e->dest != target_block)
223 remove_edge_and_dominated_blocks (e); 286 remove_edge_and_dominated_blocks (e);
242 } 305 }
243 306
244 /* Check for indirect calls that have been turned into 307 /* Check for indirect calls that have been turned into
245 noreturn calls. */ 308 noreturn calls. */
246 else if (is_gimple_call (stmt) 309 else if (is_gimple_call (stmt)
247 && gimple_call_noreturn_p (stmt) 310 && gimple_call_noreturn_p (stmt))
248 && remove_fallthru_edge (bb->succs)) 311 {
249 retval = true; 312 /* If there are debug stmts after the noreturn call, remove them
313 now, they should be all unreachable anyway. */
314 for (gsi_next (&gsi); !gsi_end_p (gsi); )
315 gsi_remove (&gsi, true);
316 if (remove_fallthru_edge (bb->succs))
317 retval = true;
318 }
250 319
251 return retval; 320 return retval;
252 } 321 }
253 322
254 /* Return true if basic block BB does nothing except pass control 323 /* Return true if basic block BB does nothing except pass control
255 flow to another block and that we can safely insert a label at 324 flow to another block and that we can safely insert a label at
256 the start of the successor block. 325 the start of the successor block.
257 326
258 As a precondition, we require that BB be not equal to 327 As a precondition, we require that BB be not equal to
259 ENTRY_BLOCK_PTR. */ 328 the entry block. */
260 329
261 static bool 330 static bool
262 tree_forwarder_block_p (basic_block bb, bool phi_wanted) 331 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
263 { 332 {
264 gimple_stmt_iterator gsi; 333 gimple_stmt_iterator gsi;
267 /* BB must have a single outgoing edge. */ 336 /* BB must have a single outgoing edge. */
268 if (single_succ_p (bb) != 1 337 if (single_succ_p (bb) != 1
269 /* If PHI_WANTED is false, BB must not have any PHI nodes. 338 /* If PHI_WANTED is false, BB must not have any PHI nodes.
270 Otherwise, BB must have PHI nodes. */ 339 Otherwise, BB must have PHI nodes. */
271 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted 340 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
272 /* BB may not be a predecessor of EXIT_BLOCK_PTR. */ 341 /* BB may not be a predecessor of the exit block. */
273 || single_succ (bb) == EXIT_BLOCK_PTR 342 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
274 /* Nor should this be an infinite loop. */ 343 /* Nor should this be an infinite loop. */
275 || single_succ (bb) == bb 344 || single_succ (bb) == bb
276 /* BB may not have an abnormal outgoing edge. */ 345 /* BB may not have an abnormal outgoing edge. */
277 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL)) 346 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
278 return false; 347 return false;
279 348
280 gcc_checking_assert (bb != ENTRY_BLOCK_PTR); 349 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
281 350
282 locus = single_succ_edge (bb)->goto_locus; 351 locus = single_succ_edge (bb)->goto_locus;
283 352
284 /* There should not be an edge coming from entry, or an EH edge. */ 353 /* There should not be an edge coming from entry, or an EH edge. */
285 { 354 {
286 edge_iterator ei; 355 edge_iterator ei;
287 edge e; 356 edge e;
288 357
289 FOR_EACH_EDGE (e, ei, bb->preds) 358 FOR_EACH_EDGE (e, ei, bb->preds)
290 if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH)) 359 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
291 return false; 360 return false;
292 /* If goto_locus of any of the edges differs, prevent removing 361 /* If goto_locus of any of the edges differs, prevent removing
293 the forwarder block for -O0. */ 362 the forwarder block for -O0. */
294 else if (optimize == 0 && e->goto_locus != locus) 363 else if (optimize == 0 && e->goto_locus != locus)
295 return false; 364 return false;
297 366
298 /* Now walk through the statements backward. We can ignore labels, 367 /* Now walk through the statements backward. We can ignore labels,
299 anything else means this is not a forwarder block. */ 368 anything else means this is not a forwarder block. */
300 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) 369 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
301 { 370 {
302 gimple stmt = gsi_stmt (gsi); 371 gimple *stmt = gsi_stmt (gsi);
303 372
304 switch (gimple_code (stmt)) 373 switch (gimple_code (stmt))
305 { 374 {
306 case GIMPLE_LABEL: 375 case GIMPLE_LABEL:
307 if (DECL_NONLOCAL (gimple_label_label (stmt))) 376 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
308 return false; 377 return false;
309 if (optimize == 0 && gimple_location (stmt) != locus) 378 if (optimize == 0 && gimple_location (stmt) != locus)
310 return false; 379 return false;
311 break; 380 break;
312 381
321 } 390 }
322 391
323 if (current_loops) 392 if (current_loops)
324 { 393 {
325 basic_block dest; 394 basic_block dest;
326 /* Protect loop latches, headers and preheaders. */ 395 /* Protect loop headers. */
327 if (bb->loop_father->header == bb) 396 if (bb_loop_header_p (bb))
328 return false; 397 return false;
398
329 dest = EDGE_SUCC (bb, 0)->dest; 399 dest = EDGE_SUCC (bb, 0)->dest;
330 400 /* Protect loop preheaders and latches if requested. */
331 if (dest->loop_father->header == dest) 401 if (dest->loop_father->header == dest)
332 return false; 402 {
333 } 403 if (bb->loop_father == dest->loop_father)
404 {
405 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
406 return false;
407 /* If bb doesn't have a single predecessor we'd make this
408 loop have multiple latches. Don't do that if that
409 would in turn require disambiguating them. */
410 return (single_pred_p (bb)
411 || loops_state_satisfies_p
412 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
413 }
414 else if (bb->loop_father == loop_outer (dest->loop_father))
415 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
416 /* Always preserve other edges into loop headers that are
417 not simple latches or preheaders. */
418 return false;
419 }
420 }
421
334 return true; 422 return true;
335 } 423 }
336 424
337 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and 425 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
338 those alternatives are equal in each of the PHI nodes, then return 426 those alternatives are equal in each of the PHI nodes, then return
341 static bool 429 static bool
342 phi_alternatives_equal (basic_block dest, edge e1, edge e2) 430 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
343 { 431 {
344 int n1 = e1->dest_idx; 432 int n1 = e1->dest_idx;
345 int n2 = e2->dest_idx; 433 int n2 = e2->dest_idx;
346 gimple_stmt_iterator gsi; 434 gphi_iterator gsi;
347 435
348 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi)) 436 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
349 { 437 {
350 gimple phi = gsi_stmt (gsi); 438 gphi *phi = gsi.phi ();
351 tree val1 = gimple_phi_arg_def (phi, n1); 439 tree val1 = gimple_phi_arg_def (phi, n1);
352 tree val2 = gimple_phi_arg_def (phi, n2); 440 tree val2 = gimple_phi_arg_def (phi, n2);
353 441
354 gcc_assert (val1 != NULL_TREE); 442 gcc_assert (val1 != NULL_TREE);
355 gcc_assert (val2 != NULL_TREE); 443 gcc_assert (val2 != NULL_TREE);
366 static bool 454 static bool
367 remove_forwarder_block (basic_block bb) 455 remove_forwarder_block (basic_block bb)
368 { 456 {
369 edge succ = single_succ_edge (bb), e, s; 457 edge succ = single_succ_edge (bb), e, s;
370 basic_block dest = succ->dest; 458 basic_block dest = succ->dest;
371 gimple label; 459 gimple *label;
372 edge_iterator ei; 460 edge_iterator ei;
373 gimple_stmt_iterator gsi, gsi_to; 461 gimple_stmt_iterator gsi, gsi_to;
374 bool can_move_debug_stmts; 462 bool can_move_debug_stmts;
375 463
376 /* We check for infinite loops already in tree_forwarder_block_p. 464 /* We check for infinite loops already in tree_forwarder_block_p.
380 return false; 468 return false;
381 469
382 /* If the destination block consists of a nonlocal label or is a 470 /* If the destination block consists of a nonlocal label or is a
383 EH landing pad, do not merge it. */ 471 EH landing pad, do not merge it. */
384 label = first_stmt (dest); 472 label = first_stmt (dest);
385 if (label 473 if (label)
386 && gimple_code (label) == GIMPLE_LABEL 474 if (glabel *label_stmt = dyn_cast <glabel *> (label))
387 && (DECL_NONLOCAL (gimple_label_label (label)) 475 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
388 || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0)) 476 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
389 return false; 477 return false;
390 478
391 /* If there is an abnormal edge to basic block BB, but not into 479 /* If there is an abnormal edge to basic block BB, but not into
392 dest, problems might occur during removal of the phi node at out 480 dest, problems might occur during removal of the phi node at out
393 of ssa due to overlapping live ranges of registers. 481 of ssa due to overlapping live ranges of registers.
394 482
418 if (!phi_alternatives_equal (dest, succ, s)) 506 if (!phi_alternatives_equal (dest, succ, s))
419 return false; 507 return false;
420 } 508 }
421 } 509 }
422 510
423 can_move_debug_stmts = single_pred_p (dest); 511 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
512
513 basic_block pred = NULL;
514 if (single_pred_p (bb))
515 pred = single_pred (bb);
424 516
425 /* Redirect the edges. */ 517 /* Redirect the edges. */
426 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) 518 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
427 { 519 {
428 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index); 520 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
438 530
439 if (s == e) 531 if (s == e)
440 { 532 {
441 /* Create arguments for the phi nodes, since the edge was not 533 /* Create arguments for the phi nodes, since the edge was not
442 here before. */ 534 here before. */
443 for (gsi = gsi_start_phis (dest); 535 for (gphi_iterator psi = gsi_start_phis (dest);
444 !gsi_end_p (gsi); 536 !gsi_end_p (psi);
445 gsi_next (&gsi)) 537 gsi_next (&psi))
446 { 538 {
447 gimple phi = gsi_stmt (gsi); 539 gphi *phi = psi.phi ();
448 source_location l = gimple_phi_arg_location_from_edge (phi, succ); 540 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
449 add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s, l); 541 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
542 add_phi_arg (phi, unshare_expr (def), s, l);
450 } 543 }
451 } 544 }
452 } 545 }
453 546
454 /* Move nonlocal labels and computed goto targets as well as user 547 /* Move nonlocal labels and computed goto targets as well as user
461 { 554 {
462 tree decl; 555 tree decl;
463 label = gsi_stmt (gsi); 556 label = gsi_stmt (gsi);
464 if (is_gimple_debug (label)) 557 if (is_gimple_debug (label))
465 break; 558 break;
466 decl = gimple_label_label (label); 559 decl = gimple_label_label (as_a <glabel *> (label));
467 if (EH_LANDING_PAD_NR (decl) != 0 560 if (EH_LANDING_PAD_NR (decl) != 0
468 || DECL_NONLOCAL (decl) 561 || DECL_NONLOCAL (decl)
469 || FORCED_LABEL (decl) 562 || FORCED_LABEL (decl)
470 || !DECL_ARTIFICIAL (decl)) 563 || !DECL_ARTIFICIAL (decl))
471 { 564 {
474 } 567 }
475 else 568 else
476 gsi_next (&gsi); 569 gsi_next (&gsi);
477 } 570 }
478 571
479 /* Move debug statements if the destination has just a single 572 /* Move debug statements if the destination has a single predecessor. */
480 predecessor. */
481 if (can_move_debug_stmts) 573 if (can_move_debug_stmts)
482 { 574 {
483 gsi_to = gsi_after_labels (dest); 575 gsi_to = gsi_after_labels (dest);
484 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); ) 576 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
485 { 577 {
486 gimple debug = gsi_stmt (gsi); 578 gimple *debug = gsi_stmt (gsi);
487 if (!is_gimple_debug (debug)) 579 if (!is_gimple_debug (debug))
488 break; 580 break;
489 gsi_remove (&gsi, false); 581 gsi_remove (&gsi, false);
490 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT); 582 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
491 } 583 }
510 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); 602 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
511 603
512 set_immediate_dominator (CDI_DOMINATORS, dest, dom); 604 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
513 } 605 }
514 606
607 /* Adjust latch infomation of BB's parent loop as otherwise
608 the cfg hook has a hard time not to kill the loop. */
609 if (current_loops && bb->loop_father->latch == bb)
610 bb->loop_father->latch = pred;
611
515 /* And kill the forwarder block. */ 612 /* And kill the forwarder block. */
516 delete_basic_block (bb); 613 delete_basic_block (bb);
517 614
518 return true; 615 return true;
519 } 616 }
520 617
521 /* STMT is a call that has been discovered noreturn. Fixup the CFG 618 /* STMT is a call that has been discovered noreturn. Split the
522 and remove LHS. Return true if something changed. */ 619 block to prepare fixing up the CFG and remove LHS.
620 Return true if cleanup-cfg needs to run. */
523 621
524 bool 622 bool
525 fixup_noreturn_call (gimple stmt) 623 fixup_noreturn_call (gimple *stmt)
526 { 624 {
527 basic_block bb = gimple_bb (stmt); 625 basic_block bb = gimple_bb (stmt);
528 bool changed = false; 626 bool changed = false;
529 627
530 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN)) 628 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
531 return false; 629 return false;
532 630
533 /* First split basic block if stmt is not last. */ 631 /* First split basic block if stmt is not last. */
534 if (stmt != gsi_stmt (gsi_last_bb (bb))) 632 if (stmt != gsi_stmt (gsi_last_bb (bb)))
535 split_block (bb, stmt); 633 {
536 634 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
537 changed |= remove_fallthru_edge (bb->succs); 635 {
538 636 /* Don't split if there are only debug stmts
539 /* If there is LHS, remove it. */ 637 after stmt, that can result in -fcompare-debug
540 if (gimple_call_lhs (stmt)) 638 failures. Remove the debug stmts instead,
541 { 639 they should be all unreachable anyway. */
542 tree op = gimple_call_lhs (stmt); 640 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
641 for (gsi_next (&gsi); !gsi_end_p (gsi); )
642 gsi_remove (&gsi, true);
643 }
644 else
645 {
646 split_block (bb, stmt);
647 changed = true;
648 }
649 }
650
651 /* If there is an LHS, remove it, but only if its type has fixed size.
652 The LHS will need to be recreated during RTL expansion and creating
653 temporaries of variable-sized types is not supported. Also don't
654 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
655 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
656 has been changed into a call that does not return a value, like
657 __builtin_unreachable or __cxa_pure_virtual. */
658 tree lhs = gimple_call_lhs (stmt);
659 if (lhs
660 && (should_remove_lhs_p (lhs)
661 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
662 {
543 gimple_call_set_lhs (stmt, NULL_TREE); 663 gimple_call_set_lhs (stmt, NULL_TREE);
544 664
545 /* We need to remove SSA name to avoid checking errors. 665 /* We need to fix up the SSA name to avoid checking errors. */
546 All uses are dominated by the noreturn and thus will 666 if (TREE_CODE (lhs) == SSA_NAME)
547 be removed afterwards. 667 {
548 We proactively remove affected non-PHI statements to avoid 668 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
549 fixup_cfg from trying to update them and crashing. */ 669 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
550 if (TREE_CODE (op) == SSA_NAME) 670 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
551 { 671 set_ssa_default_def (cfun, new_var, lhs);
552 use_operand_p use_p; 672 }
553 imm_use_iterator iter; 673
554 gimple use_stmt;
555 bitmap_iterator bi;
556 unsigned int bb_index;
557
558 bitmap blocks = BITMAP_ALLOC (NULL);
559
560 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
561 {
562 if (gimple_code (use_stmt) != GIMPLE_PHI)
563 bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
564 else
565 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
566 SET_USE (use_p, error_mark_node);
567 }
568 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
569 delete_basic_block (BASIC_BLOCK (bb_index));
570 BITMAP_FREE (blocks);
571 release_ssa_name (op);
572 }
573 update_stmt (stmt); 674 update_stmt (stmt);
675 }
676
677 /* Mark the call as altering control flow. */
678 if (!gimple_call_ctrl_altering_p (stmt))
679 {
680 gimple_call_set_ctrl_altering (stmt, true);
574 changed = true; 681 changed = true;
575 } 682 }
576 /* Similarly remove VDEF if there is any. */ 683
577 else if (gimple_vdef (stmt))
578 update_stmt (stmt);
579 return changed; 684 return changed;
580 } 685 }
581 686
582 687 /* Return true if we want to merge BB1 and BB2 into a single block. */
583 /* Split basic blocks on calls in the middle of a basic block that are now
584 known not to return, and remove the unreachable code. */
585 688
586 static bool 689 static bool
587 split_bbs_on_noreturn_calls (void) 690 want_merge_blocks_p (basic_block bb1, basic_block bb2)
588 { 691 {
589 bool changed = false; 692 if (!can_merge_blocks_p (bb1, bb2))
590 gimple stmt;
591 basic_block bb;
592
593 /* Detect cases where a mid-block call is now known not to return. */
594 if (cfun->gimple_df)
595 while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun)))
596 {
597 stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun));
598 bb = gimple_bb (stmt);
599 /* BB might be deleted at this point, so verify first
600 BB is present in the cfg. */
601 if (bb == NULL
602 || bb->index < NUM_FIXED_BLOCKS
603 || bb->index >= n_basic_blocks
604 || BASIC_BLOCK (bb->index) != bb
605 || !gimple_call_noreturn_p (stmt))
606 continue;
607
608 changed |= fixup_noreturn_call (stmt);
609 }
610
611 return changed;
612 }
613
614 /* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it. */
615
616 static bool
617 cleanup_omp_return (basic_block bb)
618 {
619 gimple stmt = last_stmt (bb);
620 basic_block control_bb;
621
622 if (stmt == NULL
623 || gimple_code (stmt) != GIMPLE_OMP_RETURN
624 || !single_pred_p (bb))
625 return false; 693 return false;
626 694 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
627 control_bb = single_pred (bb); 695 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
628 stmt = last_stmt (control_bb); 696 return true;
629 697 return bb1->count.ok_for_merging (bb2->count);
630 if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH) 698 }
631 return false; 699
632
633 /* The block with the control statement normally has two entry edges -- one
634 from entry, one from continue. If continue is removed, return is
635 unreachable, so we remove it here as well. */
636 if (EDGE_COUNT (control_bb->preds) == 2)
637 return false;
638
639 gcc_assert (EDGE_COUNT (control_bb->preds) == 1);
640 remove_edge_and_dominated_blocks (single_pred_edge (bb));
641 return true;
642 }
643 700
644 /* Tries to cleanup cfg in basic block BB. Returns true if anything 701 /* Tries to cleanup cfg in basic block BB. Returns true if anything
645 changes. */ 702 changes. */
646 703
647 static bool 704 static bool
648 cleanup_tree_cfg_bb (basic_block bb) 705 cleanup_tree_cfg_bb (basic_block bb)
649 { 706 {
650 bool retval = false;
651
652 if (cleanup_omp_return (bb))
653 return true;
654
655 retval = cleanup_control_flow_bb (bb);
656
657 if (tree_forwarder_block_p (bb, false) 707 if (tree_forwarder_block_p (bb, false)
658 && remove_forwarder_block (bb)) 708 && remove_forwarder_block (bb))
659 return true; 709 return true;
660 710
711 /* If there is a merge opportunity with the predecessor
712 do nothing now but wait until we process the predecessor.
713 This happens when we visit BBs in a non-optimal order and
714 avoids quadratic behavior with adjusting stmts BB pointer. */
715 if (single_pred_p (bb)
716 && want_merge_blocks_p (single_pred (bb), bb))
717 /* But make sure we _do_ visit it. When we remove unreachable paths
718 ending in a backedge we fail to mark the destinations predecessors
719 as changed. */
720 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
721
661 /* Merging the blocks may create new opportunities for folding 722 /* Merging the blocks may create new opportunities for folding
662 conditional branches (due to the elimination of single-valued PHI 723 conditional branches (due to the elimination of single-valued PHI
663 nodes). */ 724 nodes). */
664 if (single_succ_p (bb) 725 else if (single_succ_p (bb)
665 && can_merge_blocks_p (bb, single_succ (bb))) 726 && want_merge_blocks_p (bb, single_succ (bb)))
666 { 727 {
667 merge_blocks (bb, single_succ (bb)); 728 merge_blocks (bb, single_succ (bb));
668 return true; 729 return true;
669 } 730 }
670 731
671 return retval; 732 return false;
672 } 733 }
673 734
674 /* Iterate the cfg cleanups, while anything changes. */ 735 /* Iterate the cfg cleanups, while anything changes. */
675 736
676 static bool 737 static bool
677 cleanup_tree_cfg_1 (void) 738 cleanup_tree_cfg_1 (void)
678 { 739 {
679 bool retval = false; 740 bool retval = false;
680 basic_block bb; 741 basic_block bb;
681 unsigned i, n; 742 unsigned i, n;
682
683 retval |= split_bbs_on_noreturn_calls ();
684 743
685 /* Prepare the worklists of altered blocks. */ 744 /* Prepare the worklists of altered blocks. */
686 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL); 745 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
687 746
688 /* During forwarder block cleanup, we may redirect edges out of 747 /* During forwarder block cleanup, we may redirect edges out of
689 SWITCH_EXPRs, which can get expensive. So we want to enable 748 SWITCH_EXPRs, which can get expensive. So we want to enable
690 recording of edge to CASE_LABEL_EXPR. */ 749 recording of edge to CASE_LABEL_EXPR. */
691 start_recording_case_labels (); 750 start_recording_case_labels ();
692 751
693 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB, 752 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
694 since the basic blocks may get removed. */ 753 since the basic blocks may get removed. */
695 n = last_basic_block; 754
755 /* Start by iterating over all basic blocks looking for edge removal
756 opportunities. Do this first because incoming SSA form may be
757 invalid and we want to avoid performing SSA related tasks such
758 as propgating out a PHI node during BB merging in that state. */
759 n = last_basic_block_for_fn (cfun);
696 for (i = NUM_FIXED_BLOCKS; i < n; i++) 760 for (i = NUM_FIXED_BLOCKS; i < n; i++)
697 { 761 {
698 bb = BASIC_BLOCK (i); 762 bb = BASIC_BLOCK_FOR_FN (cfun, i);
763 if (bb)
764 retval |= cleanup_control_flow_bb (bb, true);
765 }
766
767 /* After doing the above SSA form should be valid (or an update SSA
768 should be required). */
769
770 /* Continue by iterating over all basic blocks looking for BB merging
771 opportunities. */
772 n = last_basic_block_for_fn (cfun);
773 for (i = NUM_FIXED_BLOCKS; i < n; i++)
774 {
775 bb = BASIC_BLOCK_FOR_FN (cfun, i);
699 if (bb) 776 if (bb)
700 retval |= cleanup_tree_cfg_bb (bb); 777 retval |= cleanup_tree_cfg_bb (bb);
701 } 778 }
702 779
703 /* Now process the altered blocks, as long as any are available. */ 780 /* Now process the altered blocks, as long as any are available. */
706 i = bitmap_first_set_bit (cfgcleanup_altered_bbs); 783 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
707 bitmap_clear_bit (cfgcleanup_altered_bbs, i); 784 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
708 if (i < NUM_FIXED_BLOCKS) 785 if (i < NUM_FIXED_BLOCKS)
709 continue; 786 continue;
710 787
711 bb = BASIC_BLOCK (i); 788 bb = BASIC_BLOCK_FOR_FN (cfun, i);
712 if (!bb) 789 if (!bb)
713 continue; 790 continue;
714 791
792 retval |= cleanup_control_flow_bb (bb, false);
715 retval |= cleanup_tree_cfg_bb (bb); 793 retval |= cleanup_tree_cfg_bb (bb);
716
717 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
718 calls. */
719 retval |= split_bbs_on_noreturn_calls ();
720 } 794 }
721 795
722 end_recording_case_labels (); 796 end_recording_case_labels ();
723 BITMAP_FREE (cfgcleanup_altered_bbs); 797 BITMAP_FREE (cfgcleanup_altered_bbs);
724 return retval; 798 return retval;
725 } 799 }
726 800
801 static bool
802 mfb_keep_latches (edge e)
803 {
804 return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
805 }
727 806
728 /* Remove unreachable blocks and other miscellaneous clean up work. 807 /* Remove unreachable blocks and other miscellaneous clean up work.
729 Return true if the flowgraph was modified, false otherwise. */ 808 Return true if the flowgraph was modified, false otherwise. */
730 809
731 static bool 810 static bool
745 changed = delete_unreachable_blocks (); 824 changed = delete_unreachable_blocks ();
746 calculate_dominance_info (CDI_DOMINATORS); 825 calculate_dominance_info (CDI_DOMINATORS);
747 } 826 }
748 else 827 else
749 { 828 {
750 #ifdef ENABLE_CHECKING 829 checking_verify_dominators (CDI_DOMINATORS);
751 verify_dominators (CDI_DOMINATORS);
752 #endif
753 changed = false; 830 changed = false;
754 } 831 }
755 832
833 /* Ensure that we have single entries into loop headers. Otherwise
834 if one of the entries is becoming a latch due to CFG cleanup
835 (from formerly being part of an irreducible region) then we mess
836 up loop fixup and associate the old loop with a different region
837 which makes niter upper bounds invalid. See for example PR80549.
838 This needs to be done before we remove trivially dead edges as
839 we need to capture the dominance state before the pending transform. */
840 if (current_loops)
841 {
842 loop_p loop;
843 unsigned i;
844 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
845 if (loop && loop->header)
846 {
847 basic_block bb = loop->header;
848 edge_iterator ei;
849 edge e;
850 bool found_latch = false;
851 bool any_abnormal = false;
852 unsigned n = 0;
853 /* We are only interested in preserving existing loops, but
854 we need to check whether they are still real and of course
855 if we need to add a preheader at all. */
856 FOR_EACH_EDGE (e, ei, bb->preds)
857 {
858 if (e->flags & EDGE_ABNORMAL)
859 {
860 any_abnormal = true;
861 break;
862 }
863 if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
864 {
865 found_latch = true;
866 continue;
867 }
868 n++;
869 }
870 /* If we have more than one entry to the loop header
871 create a forwarder. */
872 if (found_latch && ! any_abnormal && n > 1)
873 {
874 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
875 NULL);
876 loop->header = fallthru->dest;
877 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
878 {
879 /* The loop updating from the CFG hook is incomplete
880 when we have multiple latches, fixup manually. */
881 remove_bb_from_loops (fallthru->src);
882 loop_p cloop = loop;
883 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
884 cloop = find_common_loop (cloop, e->src->loop_father);
885 add_bb_to_loop (fallthru->src, cloop);
886 }
887 }
888 }
889 }
890
756 changed |= cleanup_tree_cfg_1 (); 891 changed |= cleanup_tree_cfg_1 ();
757 892
758 gcc_assert (dom_info_available_p (CDI_DOMINATORS)); 893 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
759 compact_blocks (); 894
760 895 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
761 #ifdef ENABLE_CHECKING 896 basic-block numbers. */
762 verify_flow_info (); 897 if (! scev_initialized_p ())
763 #endif 898 compact_blocks ();
899
900 checking_verify_flow_info ();
764 901
765 timevar_pop (TV_TREE_CLEANUP_CFG); 902 timevar_pop (TV_TREE_CLEANUP_CFG);
766 903
767 if (changed && current_loops) 904 if (changed && current_loops)
768 loops_state_set (LOOPS_NEED_FIXUP); 905 {
906 /* Removing edges and/or blocks may make recorded bounds refer
907 to stale GIMPLE stmts now, so clear them. */
908 free_numbers_of_iterations_estimates (cfun);
909 loops_state_set (LOOPS_NEED_FIXUP);
910 }
769 911
770 return changed; 912 return changed;
771 } 913 }
772 914
773 /* Repairs loop structures. */ 915 /* Repairs loop structures. */
774 916
775 static void 917 static void
776 repair_loop_structures (void) 918 repair_loop_structures (void)
777 { 919 {
778 bitmap changed_bbs; 920 bitmap changed_bbs;
921 unsigned n_new_loops;
922
923 calculate_dominance_info (CDI_DOMINATORS);
779 924
780 timevar_push (TV_REPAIR_LOOPS); 925 timevar_push (TV_REPAIR_LOOPS);
781 changed_bbs = BITMAP_ALLOC (NULL); 926 changed_bbs = BITMAP_ALLOC (NULL);
782 fix_loop_structure (changed_bbs); 927 n_new_loops = fix_loop_structure (changed_bbs);
783 928
784 /* This usually does nothing. But sometimes parts of cfg that originally 929 /* This usually does nothing. But sometimes parts of cfg that originally
785 were inside a loop get out of it due to edge removal (since they 930 were inside a loop get out of it due to edge removal (since they
786 become unreachable by back edges from latch). */ 931 become unreachable by back edges from latch). Also a former
932 irreducible loop can become reducible - in this case force a full
933 rewrite into loop-closed SSA form. */
787 if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) 934 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
788 rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa); 935 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
936 TODO_update_ssa);
789 937
790 BITMAP_FREE (changed_bbs); 938 BITMAP_FREE (changed_bbs);
791 939
792 #ifdef ENABLE_CHECKING 940 checking_verify_loop_structure ();
793 verify_loop_structure ();
794 #endif
795 scev_reset (); 941 scev_reset ();
796 942
797 loops_state_clear (LOOPS_NEED_FIXUP);
798 timevar_pop (TV_REPAIR_LOOPS); 943 timevar_pop (TV_REPAIR_LOOPS);
799 } 944 }
800 945
801 /* Cleanup cfg and repair loop structures. */ 946 /* Cleanup cfg and repair loop structures. */
802 947
810 repair_loop_structures (); 955 repair_loop_structures ();
811 956
812 return changed; 957 return changed;
813 } 958 }
814 959
815 /* Merge the PHI nodes at BB into those at BB's sole successor. */ 960 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
816 961 Returns true if successful. */
817 static void 962
963 static bool
818 remove_forwarder_block_with_phi (basic_block bb) 964 remove_forwarder_block_with_phi (basic_block bb)
819 { 965 {
820 edge succ = single_succ_edge (bb); 966 edge succ = single_succ_edge (bb);
821 basic_block dest = succ->dest; 967 basic_block dest = succ->dest;
822 gimple label; 968 gimple *label;
823 basic_block dombb, domdest, dom; 969 basic_block dombb, domdest, dom;
824 970
825 /* We check for infinite loops already in tree_forwarder_block_p. 971 /* We check for infinite loops already in tree_forwarder_block_p.
826 However it may happen that the infinite loop is created 972 However it may happen that the infinite loop is created
827 afterwards due to removal of forwarders. */ 973 afterwards due to removal of forwarders. */
828 if (dest == bb) 974 if (dest == bb)
829 return; 975 return false;
976
977 /* Removal of forwarders may expose new natural loops and thus
978 a block may turn into a loop header. */
979 if (current_loops && bb_loop_header_p (bb))
980 return false;
830 981
831 /* If the destination block consists of a nonlocal label, do not 982 /* If the destination block consists of a nonlocal label, do not
832 merge it. */ 983 merge it. */
833 label = first_stmt (dest); 984 label = first_stmt (dest);
834 if (label 985 if (label)
835 && gimple_code (label) == GIMPLE_LABEL 986 if (glabel *label_stmt = dyn_cast <glabel *> (label))
836 && DECL_NONLOCAL (gimple_label_label (label))) 987 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
837 return; 988 return false;
989
990 /* Record BB's single pred in case we need to update the father
991 loop's latch information later. */
992 basic_block pred = NULL;
993 if (single_pred_p (bb))
994 pred = single_pred (bb);
838 995
839 /* Redirect each incoming edge to BB to DEST. */ 996 /* Redirect each incoming edge to BB to DEST. */
840 while (EDGE_COUNT (bb->preds) > 0) 997 while (EDGE_COUNT (bb->preds) > 0)
841 { 998 {
842 edge e = EDGE_PRED (bb, 0), s; 999 edge e = EDGE_PRED (bb, 0), s;
843 gimple_stmt_iterator gsi; 1000 gphi_iterator gsi;
844 1001
845 s = find_edge (e->src, dest); 1002 s = find_edge (e->src, dest);
846 if (s) 1003 if (s)
847 { 1004 {
848 /* We already have an edge S from E->src to DEST. If S and 1005 /* We already have an edge S from E->src to DEST. If S and
858 /* PHI arguments are different. Create a forwarder block by 1015 /* PHI arguments are different. Create a forwarder block by
859 splitting E so that we can merge PHI arguments on E to 1016 splitting E so that we can merge PHI arguments on E to
860 DEST. */ 1017 DEST. */
861 e = single_succ_edge (split_edge (e)); 1018 e = single_succ_edge (split_edge (e));
862 } 1019 }
1020 else
1021 {
1022 /* If we merge the forwarder into a loop header verify if we
1023 are creating another loop latch edge. If so, reset
1024 number of iteration information of the loop. */
1025 if (dest->loop_father->header == dest
1026 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1027 {
1028 dest->loop_father->any_upper_bound = false;
1029 dest->loop_father->any_likely_upper_bound = false;
1030 free_numbers_of_iterations_estimates (dest->loop_father);
1031 }
1032 }
863 1033
864 s = redirect_edge_and_branch (e, dest); 1034 s = redirect_edge_and_branch (e, dest);
865 1035
866 /* redirect_edge_and_branch must not create a new edge. */ 1036 /* redirect_edge_and_branch must not create a new edge. */
867 gcc_assert (s == e); 1037 gcc_assert (s == e);
870 destination of E. */ 1040 destination of E. */
871 for (gsi = gsi_start_phis (dest); 1041 for (gsi = gsi_start_phis (dest);
872 !gsi_end_p (gsi); 1042 !gsi_end_p (gsi);
873 gsi_next (&gsi)) 1043 gsi_next (&gsi))
874 { 1044 {
875 gimple phi = gsi_stmt (gsi); 1045 gphi *phi = gsi.phi ();
876 tree def = gimple_phi_arg_def (phi, succ->dest_idx); 1046 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
877 source_location locus = gimple_phi_arg_location_from_edge (phi, succ); 1047 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
878 1048
879 if (TREE_CODE (def) == SSA_NAME) 1049 if (TREE_CODE (def) == SSA_NAME)
880 { 1050 {
881 edge_var_map_vector head;
882 edge_var_map *vm;
883 size_t i;
884
885 /* If DEF is one of the results of PHI nodes removed during 1051 /* If DEF is one of the results of PHI nodes removed during
886 redirection, replace it with the PHI argument that used 1052 redirection, replace it with the PHI argument that used
887 to be on E. */ 1053 to be on E. */
888 head = redirect_edge_var_map_vector (e); 1054 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
889 FOR_EACH_VEC_ELT (edge_var_map, head, i, vm) 1055 size_t length = head ? head->length () : 0;
1056 for (size_t i = 0; i < length; i++)
890 { 1057 {
1058 edge_var_map *vm = &(*head)[i];
891 tree old_arg = redirect_edge_var_map_result (vm); 1059 tree old_arg = redirect_edge_var_map_result (vm);
892 tree new_arg = redirect_edge_var_map_def (vm); 1060 tree new_arg = redirect_edge_var_map_def (vm);
893 1061
894 if (def == old_arg) 1062 if (def == old_arg)
895 { 1063 {
918 else 1086 else
919 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb); 1087 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
920 1088
921 set_immediate_dominator (CDI_DOMINATORS, dest, dom); 1089 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
922 1090
1091 /* Adjust latch infomation of BB's parent loop as otherwise
1092 the cfg hook has a hard time not to kill the loop. */
1093 if (current_loops && bb->loop_father->latch == bb)
1094 bb->loop_father->latch = pred;
1095
923 /* Remove BB since all of BB's incoming edges have been redirected 1096 /* Remove BB since all of BB's incoming edges have been redirected
924 to DEST. */ 1097 to DEST. */
925 delete_basic_block (bb); 1098 delete_basic_block (bb);
1099
1100 return true;
926 } 1101 }
927 1102
928 /* This pass merges PHI nodes if one feeds into another. For example, 1103 /* This pass merges PHI nodes if one feeds into another. For example,
929 suppose we have the following: 1104 suppose we have the following:
930 1105
948 1123
949 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>; 1124 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
950 <L10>:; 1125 <L10>:;
951 */ 1126 */
952 1127
953 static unsigned int 1128 namespace {
954 merge_phi_nodes (void) 1129
955 { 1130 const pass_data pass_data_merge_phi =
956 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks); 1131 {
1132 GIMPLE_PASS, /* type */
1133 "mergephi", /* name */
1134 OPTGROUP_NONE, /* optinfo_flags */
1135 TV_TREE_MERGE_PHI, /* tv_id */
1136 ( PROP_cfg | PROP_ssa ), /* properties_required */
1137 0, /* properties_provided */
1138 0, /* properties_destroyed */
1139 0, /* todo_flags_start */
1140 0, /* todo_flags_finish */
1141 };
1142
1143 class pass_merge_phi : public gimple_opt_pass
1144 {
1145 public:
1146 pass_merge_phi (gcc::context *ctxt)
1147 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1148 {}
1149
1150 /* opt_pass methods: */
1151 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1152 virtual unsigned int execute (function *);
1153
1154 }; // class pass_merge_phi
1155
1156 unsigned int
1157 pass_merge_phi::execute (function *fun)
1158 {
1159 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
957 basic_block *current = worklist; 1160 basic_block *current = worklist;
958 basic_block bb; 1161 basic_block bb;
959 1162
960 calculate_dominance_info (CDI_DOMINATORS); 1163 calculate_dominance_info (CDI_DOMINATORS);
961 1164
962 /* Find all PHI nodes that we may be able to merge. */ 1165 /* Find all PHI nodes that we may be able to merge. */
963 FOR_EACH_BB (bb) 1166 FOR_EACH_BB_FN (bb, fun)
964 { 1167 {
965 basic_block dest; 1168 basic_block dest;
966 1169
967 /* Look for a forwarder block with PHI nodes. */ 1170 /* Look for a forwarder block with PHI nodes. */
968 if (!tree_forwarder_block_p (bb, true)) 1171 if (!tree_forwarder_block_p (bb, true))
985 nodes at BB. */ 1188 nodes at BB. */
986 *current++ = bb; 1189 *current++ = bb;
987 } 1190 }
988 else 1191 else
989 { 1192 {
990 gimple_stmt_iterator gsi; 1193 gphi_iterator gsi;
991 unsigned int dest_idx = single_succ_edge (bb)->dest_idx; 1194 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
992 1195
993 /* BB dominates DEST. There may be many users of the PHI 1196 /* BB dominates DEST. There may be many users of the PHI
994 nodes in BB. However, there is still a trivial case we 1197 nodes in BB. However, there is still a trivial case we
995 can handle. If the result of every PHI in BB is used 1198 can handle. If the result of every PHI in BB is used
996 only by a PHI in DEST, then we can trivially merge the 1199 only by a PHI in DEST, then we can trivially merge the
997 PHI nodes from BB into DEST. */ 1200 PHI nodes from BB into DEST. */
998 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); 1201 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
999 gsi_next (&gsi)) 1202 gsi_next (&gsi))
1000 { 1203 {
1001 gimple phi = gsi_stmt (gsi); 1204 gphi *phi = gsi.phi ();
1002 tree result = gimple_phi_result (phi); 1205 tree result = gimple_phi_result (phi);
1003 use_operand_p imm_use; 1206 use_operand_p imm_use;
1004 gimple use_stmt; 1207 gimple *use_stmt;
1005 1208
1006 /* If the PHI's result is never used, then we can just 1209 /* If the PHI's result is never used, then we can just
1007 ignore it. */ 1210 ignore it. */
1008 if (has_zero_uses (result)) 1211 if (has_zero_uses (result))
1009 continue; 1212 continue;
1022 *current++ = bb; 1225 *current++ = bb;
1023 } 1226 }
1024 } 1227 }
1025 1228
1026 /* Now let's drain WORKLIST. */ 1229 /* Now let's drain WORKLIST. */
1230 bool changed = false;
1027 while (current != worklist) 1231 while (current != worklist)
1028 { 1232 {
1029 bb = *--current; 1233 bb = *--current;
1030 remove_forwarder_block_with_phi (bb); 1234 changed |= remove_forwarder_block_with_phi (bb);
1031 } 1235 }
1032
1033 free (worklist); 1236 free (worklist);
1237
1238 /* Removing forwarder blocks can cause formerly irreducible loops
1239 to become reducible if we merged two entry blocks. */
1240 if (changed
1241 && current_loops)
1242 loops_state_set (LOOPS_NEED_FIXUP);
1243
1034 return 0; 1244 return 0;
1035 } 1245 }
1036 1246
1037 static bool 1247 } // anon namespace
1038 gate_merge_phi (void) 1248
1039 { 1249 gimple_opt_pass *
1040 return 1; 1250 make_pass_merge_phi (gcc::context *ctxt)
1041 } 1251 {
1042 1252 return new pass_merge_phi (ctxt);
1043 struct gimple_opt_pass pass_merge_phi = 1253 }
1044 { 1254
1045 { 1255 /* Pass: cleanup the CFG just before expanding trees to RTL.
1046 GIMPLE_PASS, 1256 This is just a round of label cleanups and case node grouping
1047 "mergephi", /* name */ 1257 because after the tree optimizers have run such cleanups may
1048 gate_merge_phi, /* gate */ 1258 be necessary. */
1049 merge_phi_nodes, /* execute */ 1259
1050 NULL, /* sub */ 1260 static unsigned int
1051 NULL, /* next */ 1261 execute_cleanup_cfg_post_optimizing (void)
1052 0, /* static_pass_number */ 1262 {
1053 TV_TREE_MERGE_PHI, /* tv_id */ 1263 unsigned int todo = execute_fixup_cfg ();
1054 PROP_cfg | PROP_ssa, /* properties_required */ 1264 if (cleanup_tree_cfg ())
1055 0, /* properties_provided */ 1265 {
1056 0, /* properties_destroyed */ 1266 todo &= ~TODO_cleanup_cfg;
1057 0, /* todo_flags_start */ 1267 todo |= TODO_update_ssa;
1058 TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */ 1268 }
1059 | TODO_verify_ssa 1269 maybe_remove_unreachable_handlers ();
1060 } 1270 cleanup_dead_labels ();
1271 if (group_case_labels ())
1272 todo |= TODO_cleanup_cfg;
1273 if ((flag_compare_debug_opt || flag_compare_debug)
1274 && flag_dump_final_insns)
1275 {
1276 FILE *final_output = fopen (flag_dump_final_insns, "a");
1277
1278 if (!final_output)
1279 {
1280 error ("could not open final insn dump file %qs: %m",
1281 flag_dump_final_insns);
1282 flag_dump_final_insns = NULL;
1283 }
1284 else
1285 {
1286 int save_unnumbered = flag_dump_unnumbered;
1287 int save_noaddr = flag_dump_noaddr;
1288
1289 flag_dump_noaddr = flag_dump_unnumbered = 1;
1290 fprintf (final_output, "\n");
1291 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1292 flag_dump_noaddr = save_noaddr;
1293 flag_dump_unnumbered = save_unnumbered;
1294 if (fclose (final_output))
1295 {
1296 error ("could not close final insn dump file %qs: %m",
1297 flag_dump_final_insns);
1298 flag_dump_final_insns = NULL;
1299 }
1300 }
1301 }
1302 return todo;
1303 }
1304
1305 namespace {
1306
1307 const pass_data pass_data_cleanup_cfg_post_optimizing =
1308 {
1309 GIMPLE_PASS, /* type */
1310 "optimized", /* name */
1311 OPTGROUP_NONE, /* optinfo_flags */
1312 TV_TREE_CLEANUP_CFG, /* tv_id */
1313 PROP_cfg, /* properties_required */
1314 0, /* properties_provided */
1315 0, /* properties_destroyed */
1316 0, /* todo_flags_start */
1317 TODO_remove_unused_locals, /* todo_flags_finish */
1061 }; 1318 };
1319
1320 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1321 {
1322 public:
1323 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1324 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1325 {}
1326
1327 /* opt_pass methods: */
1328 virtual unsigned int execute (function *)
1329 {
1330 return execute_cleanup_cfg_post_optimizing ();
1331 }
1332
1333 }; // class pass_cleanup_cfg_post_optimizing
1334
1335 } // anon namespace
1336
1337 gimple_opt_pass *
1338 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1339 {
1340 return new pass_cleanup_cfg_post_optimizing (ctxt);
1341 }
1342
1343