Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-ssa-propagate.c @ 158:494b0b89df80 default tip
...
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 25 May 2020 18:13:55 +0900 |
parents | 1830386684a0 |
children |
rev | line source |
---|---|
0 | 1 /* Generic SSA value propagation engine. |
145 | 2 Copyright (C) 2004-2020 Free Software Foundation, Inc. |
0 | 3 Contributed by Diego Novillo <dnovillo@redhat.com> |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 3, or (at your option) any | |
10 later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
111 | 24 #include "backend.h" |
0 | 25 #include "tree.h" |
111 | 26 #include "gimple.h" |
27 #include "ssa.h" | |
63
b7f97abdc517
update gcc from gcc-4.5.0 to gcc-4.6
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
28 #include "gimple-pretty-print.h" |
111 | 29 #include "dumpfile.h" |
30 #include "gimple-fold.h" | |
31 #include "tree-eh.h" | |
32 #include "gimplify.h" | |
33 #include "gimple-iterator.h" | |
34 #include "tree-cfg.h" | |
35 #include "tree-ssa.h" | |
0 | 36 #include "tree-ssa-propagate.h" |
111 | 37 #include "domwalk.h" |
38 #include "cfgloop.h" | |
39 #include "tree-cfgcleanup.h" | |
40 #include "cfganal.h" | |
0 | 41 |
42 /* This file implements a generic value propagation engine based on | |
43 the same propagation used by the SSA-CCP algorithm [1]. | |
44 | |
45 Propagation is performed by simulating the execution of every | |
46 statement that produces the value being propagated. Simulation | |
47 proceeds as follows: | |
48 | |
49 1- Initially, all edges of the CFG are marked not executable and | |
50 the CFG worklist is seeded with all the statements in the entry | |
51 basic block (block 0). | |
52 | |
53 2- Every statement S is simulated with a call to the call-back | |
54 function SSA_PROP_VISIT_STMT. This evaluation may produce 3 | |
55 results: | |
56 | |
57 SSA_PROP_NOT_INTERESTING: Statement S produces nothing of | |
58 interest and does not affect any of the work lists. | |
111 | 59 The statement may be simulated again if any of its input |
60 operands change in future iterations of the simulator. | |
0 | 61 |
62 SSA_PROP_VARYING: The value produced by S cannot be determined | |
63 at compile time. Further simulation of S is not required. | |
64 If S is a conditional jump, all the outgoing edges for the | |
65 block are considered executable and added to the work | |
66 list. | |
67 | |
68 SSA_PROP_INTERESTING: S produces a value that can be computed | |
69 at compile time. Its result can be propagated into the | |
70 statements that feed from S. Furthermore, if S is a | |
71 conditional jump, only the edge known to be taken is added | |
72 to the work list. Edges that are known not to execute are | |
73 never simulated. | |
74 | |
75 3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI. The | |
76 return value from SSA_PROP_VISIT_PHI has the same semantics as | |
77 described in #2. | |
78 | |
79 4- Three work lists are kept. Statements are only added to these | |
80 lists if they produce one of SSA_PROP_INTERESTING or | |
81 SSA_PROP_VARYING. | |
82 | |
83 CFG_BLOCKS contains the list of blocks to be simulated. | |
84 Blocks are added to this list if their incoming edges are | |
85 found executable. | |
86 | |
111 | 87 SSA_EDGE_WORKLIST contains the list of statements that we |
88 need to revisit. | |
0 | 89 |
90 5- Simulation terminates when all three work lists are drained. | |
91 | |
92 Before calling ssa_propagate, it is important to clear | |
93 prop_simulate_again_p for all the statements in the program that | |
94 should be simulated. This initialization allows an implementation | |
95 to specify which statements should never be simulated. | |
96 | |
97 It is also important to compute def-use information before calling | |
98 ssa_propagate. | |
99 | |
100 References: | |
101 | |
102 [1] Constant propagation with conditional branches, | |
103 Wegman and Zadeck, ACM TOPLAS 13(2):181-210. | |
104 | |
105 [2] Building an Optimizing Compiler, | |
106 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9. | |
107 | |
108 [3] Advanced Compiler Design and Implementation, | |
109 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */ | |
110 | |
131 | 111 /* Worklists of control flow edge destinations. This contains |
111 | 112 the CFG order number of the blocks so we can iterate in CFG |
131 | 113 order by visiting in bit-order. We use two worklists to |
114 first make forward progress before iterating. */ | |
111 | 115 static bitmap cfg_blocks; |
131 | 116 static bitmap cfg_blocks_back; |
111 | 117 static int *bb_to_cfg_order; |
118 static int *cfg_order_to_bb; | |
0 | 119 |
131 | 120 /* Worklists of SSA edges which will need reexamination as their |
0 | 121 definition has changed. SSA edges are def-use edges in the SSA |
122 web. For each D-U edge, we store the target statement or PHI node | |
131 | 123 UID in a bitmap. UIDs order stmts in execution order. We use |
124 two worklists to first make forward progress before iterating. */ | |
111 | 125 static bitmap ssa_edge_worklist; |
131 | 126 static bitmap ssa_edge_worklist_back; |
111 | 127 static vec<gimple *> uid_to_stmt; |
0 | 128 |
131 | 129 /* Current RPO index in the iteration. */ |
130 static int curr_order; | |
0 | 131 |
132 | |
133 /* We have just defined a new value for VAR. If IS_VARYING is true, | |
134 add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add | |
135 them to INTERESTING_SSA_EDGES. */ | |
136 | |
137 static void | |
111 | 138 add_ssa_edge (tree var) |
0 | 139 { |
140 imm_use_iterator iter; | |
141 use_operand_p use_p; | |
142 | |
143 FOR_EACH_IMM_USE_FAST (use_p, iter, var) | |
144 { | |
111 | 145 gimple *use_stmt = USE_STMT (use_p); |
131 | 146 if (!prop_simulate_again_p (use_stmt)) |
147 continue; | |
111 | 148 |
149 /* If we did not yet simulate the block wait for this to happen | |
150 and do not add the stmt to the SSA edge worklist. */ | |
131 | 151 basic_block use_bb = gimple_bb (use_stmt); |
152 if (! (use_bb->flags & BB_VISITED)) | |
111 | 153 continue; |
0 | 154 |
131 | 155 /* If this is a use on a not yet executable edge do not bother to |
156 queue it. */ | |
157 if (gimple_code (use_stmt) == GIMPLE_PHI | |
158 && !(EDGE_PRED (use_bb, PHI_ARG_INDEX_FROM_USE (use_p))->flags | |
159 & EDGE_EXECUTABLE)) | |
160 continue; | |
161 | |
162 bitmap worklist; | |
163 if (bb_to_cfg_order[gimple_bb (use_stmt)->index] < curr_order) | |
164 worklist = ssa_edge_worklist_back; | |
165 else | |
166 worklist = ssa_edge_worklist; | |
167 if (bitmap_set_bit (worklist, gimple_uid (use_stmt))) | |
0 | 168 { |
111 | 169 uid_to_stmt[gimple_uid (use_stmt)] = use_stmt; |
170 if (dump_file && (dump_flags & TDF_DETAILS)) | |
171 { | |
172 fprintf (dump_file, "ssa_edge_worklist: adding SSA use in "); | |
173 print_gimple_stmt (dump_file, use_stmt, 0, TDF_SLIM); | |
174 } | |
0 | 175 } |
176 } | |
177 } | |
178 | |
179 | |
180 /* Add edge E to the control flow worklist. */ | |
181 | |
182 static void | |
183 add_control_edge (edge e) | |
184 { | |
185 basic_block bb = e->dest; | |
111 | 186 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
0 | 187 return; |
188 | |
189 /* If the edge had already been executed, skip it. */ | |
190 if (e->flags & EDGE_EXECUTABLE) | |
191 return; | |
192 | |
193 e->flags |= EDGE_EXECUTABLE; | |
194 | |
131 | 195 int bb_order = bb_to_cfg_order[bb->index]; |
196 if (bb_order < curr_order) | |
197 bitmap_set_bit (cfg_blocks_back, bb_order); | |
198 else | |
199 bitmap_set_bit (cfg_blocks, bb_order); | |
0 | 200 |
201 if (dump_file && (dump_flags & TDF_DETAILS)) | |
111 | 202 fprintf (dump_file, "Adding destination of edge (%d -> %d) to worklist\n", |
0 | 203 e->src->index, e->dest->index); |
204 } | |
205 | |
206 | |
207 /* Simulate the execution of STMT and update the work lists accordingly. */ | |
208 | |
131 | 209 void |
210 ssa_propagation_engine::simulate_stmt (gimple *stmt) | |
0 | 211 { |
212 enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING; | |
213 edge taken_edge = NULL; | |
214 tree output_name = NULL_TREE; | |
215 | |
111 | 216 /* Pull the stmt off the SSA edge worklist. */ |
217 bitmap_clear_bit (ssa_edge_worklist, gimple_uid (stmt)); | |
218 | |
0 | 219 /* Don't bother visiting statements that are already |
220 considered varying by the propagator. */ | |
221 if (!prop_simulate_again_p (stmt)) | |
222 return; | |
223 | |
224 if (gimple_code (stmt) == GIMPLE_PHI) | |
225 { | |
131 | 226 val = visit_phi (as_a <gphi *> (stmt)); |
0 | 227 output_name = gimple_phi_result (stmt); |
228 } | |
229 else | |
131 | 230 val = visit_stmt (stmt, &taken_edge, &output_name); |
0 | 231 |
232 if (val == SSA_PROP_VARYING) | |
233 { | |
234 prop_set_simulate_again (stmt, false); | |
235 | |
236 /* If the statement produced a new varying value, add the SSA | |
237 edges coming out of OUTPUT_NAME. */ | |
238 if (output_name) | |
111 | 239 add_ssa_edge (output_name); |
0 | 240 |
241 /* If STMT transfers control out of its basic block, add | |
242 all outgoing edges to the work list. */ | |
243 if (stmt_ends_bb_p (stmt)) | |
244 { | |
245 edge e; | |
246 edge_iterator ei; | |
247 basic_block bb = gimple_bb (stmt); | |
248 FOR_EACH_EDGE (e, ei, bb->succs) | |
249 add_control_edge (e); | |
250 } | |
111 | 251 return; |
0 | 252 } |
253 else if (val == SSA_PROP_INTERESTING) | |
254 { | |
255 /* If the statement produced new value, add the SSA edges coming | |
256 out of OUTPUT_NAME. */ | |
257 if (output_name) | |
111 | 258 add_ssa_edge (output_name); |
0 | 259 |
260 /* If we know which edge is going to be taken out of this block, | |
261 add it to the CFG work list. */ | |
262 if (taken_edge) | |
263 add_control_edge (taken_edge); | |
264 } | |
111 | 265 |
266 /* If there are no SSA uses on the stmt whose defs are simulated | |
267 again then this stmt will be never visited again. */ | |
268 bool has_simulate_again_uses = false; | |
269 use_operand_p use_p; | |
270 ssa_op_iter iter; | |
271 if (gimple_code (stmt) == GIMPLE_PHI) | |
272 { | |
273 edge_iterator ei; | |
274 edge e; | |
275 tree arg; | |
276 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds) | |
277 if (!(e->flags & EDGE_EXECUTABLE) | |
278 || ((arg = PHI_ARG_DEF_FROM_EDGE (stmt, e)) | |
279 && TREE_CODE (arg) == SSA_NAME | |
280 && !SSA_NAME_IS_DEFAULT_DEF (arg) | |
281 && prop_simulate_again_p (SSA_NAME_DEF_STMT (arg)))) | |
282 { | |
283 has_simulate_again_uses = true; | |
284 break; | |
285 } | |
286 } | |
287 else | |
288 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE) | |
289 { | |
290 gimple *def_stmt = SSA_NAME_DEF_STMT (USE_FROM_PTR (use_p)); | |
291 if (!gimple_nop_p (def_stmt) | |
292 && prop_simulate_again_p (def_stmt)) | |
293 { | |
294 has_simulate_again_uses = true; | |
295 break; | |
296 } | |
297 } | |
298 if (!has_simulate_again_uses) | |
299 { | |
300 if (dump_file && (dump_flags & TDF_DETAILS)) | |
301 fprintf (dump_file, "marking stmt to be not simulated again\n"); | |
302 prop_set_simulate_again (stmt, false); | |
303 } | |
0 | 304 } |
305 | |
306 | |
307 /* Simulate the execution of BLOCK. Evaluate the statement associated | |
308 with each variable reference inside the block. */ | |
309 | |
131 | 310 void |
311 ssa_propagation_engine::simulate_block (basic_block block) | |
0 | 312 { |
313 gimple_stmt_iterator gsi; | |
314 | |
315 /* There is nothing to do for the exit block. */ | |
111 | 316 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
0 | 317 return; |
318 | |
319 if (dump_file && (dump_flags & TDF_DETAILS)) | |
320 fprintf (dump_file, "\nSimulating block %d\n", block->index); | |
321 | |
322 /* Always simulate PHI nodes, even if we have simulated this block | |
323 before. */ | |
324 for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi)) | |
325 simulate_stmt (gsi_stmt (gsi)); | |
326 | |
327 /* If this is the first time we've simulated this block, then we | |
328 must simulate each of its statements. */ | |
111 | 329 if (! (block->flags & BB_VISITED)) |
0 | 330 { |
331 gimple_stmt_iterator j; | |
332 unsigned int normal_edge_count; | |
333 edge e, normal_edge; | |
334 edge_iterator ei; | |
335 | |
336 for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j)) | |
111 | 337 simulate_stmt (gsi_stmt (j)); |
0 | 338 |
111 | 339 /* Note that we have simulated this block. */ |
340 block->flags |= BB_VISITED; | |
0 | 341 |
145 | 342 /* We cannot predict when abnormal and EH edges will be executed, so |
0 | 343 once a block is considered executable, we consider any |
344 outgoing abnormal edges as executable. | |
345 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
346 TODO: This is not exactly true. Simplifying statement might |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
347 prove it non-throwing and also computed goto can be handled |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
348 when destination is known. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
349 |
0 | 350 At the same time, if this block has only one successor that is |
351 reached by non-abnormal edges, then add that successor to the | |
352 worklist. */ | |
353 normal_edge_count = 0; | |
354 normal_edge = NULL; | |
355 FOR_EACH_EDGE (e, ei, block->succs) | |
356 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
357 if (e->flags & (EDGE_ABNORMAL | EDGE_EH)) |
0 | 358 add_control_edge (e); |
359 else | |
360 { | |
361 normal_edge_count++; | |
362 normal_edge = e; | |
363 } | |
364 } | |
365 | |
366 if (normal_edge_count == 1) | |
367 add_control_edge (normal_edge); | |
368 } | |
369 } | |
370 | |
371 | |
372 /* Initialize local data structures and work lists. */ | |
373 | |
374 static void | |
375 ssa_prop_init (void) | |
376 { | |
377 edge e; | |
378 edge_iterator ei; | |
379 basic_block bb; | |
380 | |
381 /* Worklists of SSA edges. */ | |
111 | 382 ssa_edge_worklist = BITMAP_ALLOC (NULL); |
131 | 383 ssa_edge_worklist_back = BITMAP_ALLOC (NULL); |
384 bitmap_tree_view (ssa_edge_worklist); | |
385 bitmap_tree_view (ssa_edge_worklist_back); | |
0 | 386 |
111 | 387 /* Worklist of basic-blocks. */ |
388 bb_to_cfg_order = XNEWVEC (int, last_basic_block_for_fn (cfun) + 1); | |
389 cfg_order_to_bb = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); | |
390 int n = pre_and_rev_post_order_compute_fn (cfun, NULL, | |
391 cfg_order_to_bb, false); | |
392 for (int i = 0; i < n; ++i) | |
393 bb_to_cfg_order[cfg_order_to_bb[i]] = i; | |
394 cfg_blocks = BITMAP_ALLOC (NULL); | |
131 | 395 cfg_blocks_back = BITMAP_ALLOC (NULL); |
0 | 396 |
397 /* Initially assume that every edge in the CFG is not executable. | |
111 | 398 (including the edges coming out of the entry block). Mark blocks |
399 as not visited, blocks not yet visited will have all their statements | |
400 simulated once an incoming edge gets executable. */ | |
401 set_gimple_stmt_max_uid (cfun, 0); | |
402 for (int i = 0; i < n; ++i) | |
0 | 403 { |
404 gimple_stmt_iterator si; | |
111 | 405 bb = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb[i]); |
406 | |
407 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) | |
408 { | |
409 gimple *stmt = gsi_stmt (si); | |
410 gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun)); | |
411 } | |
0 | 412 |
413 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
111 | 414 { |
415 gimple *stmt = gsi_stmt (si); | |
416 gimple_set_uid (stmt, inc_gimple_stmt_max_uid (cfun)); | |
417 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
418 |
111 | 419 bb->flags &= ~BB_VISITED; |
0 | 420 FOR_EACH_EDGE (e, ei, bb->succs) |
421 e->flags &= ~EDGE_EXECUTABLE; | |
422 } | |
111 | 423 uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun)); |
0 | 424 |
425 /* Seed the algorithm by adding the successors of the entry block to the | |
426 edge worklist. */ | |
111 | 427 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) |
428 { | |
429 e->flags &= ~EDGE_EXECUTABLE; | |
430 add_control_edge (e); | |
431 } | |
0 | 432 } |
433 | |
434 | |
435 /* Free allocated storage. */ | |
436 | |
437 static void | |
438 ssa_prop_fini (void) | |
439 { | |
111 | 440 BITMAP_FREE (cfg_blocks); |
131 | 441 BITMAP_FREE (cfg_blocks_back); |
111 | 442 free (bb_to_cfg_order); |
443 free (cfg_order_to_bb); | |
444 BITMAP_FREE (ssa_edge_worklist); | |
131 | 445 BITMAP_FREE (ssa_edge_worklist_back); |
111 | 446 uid_to_stmt.release (); |
0 | 447 } |
448 | |
449 | |
450 /* Return true if EXPR is an acceptable right-hand-side for a | |
451 GIMPLE assignment. We validate the entire tree, not just | |
452 the root node, thus catching expressions that embed complex | |
453 operands that are not permitted in GIMPLE. This function | |
454 is needed because the folding routines in fold-const.c | |
455 may return such expressions in some cases, e.g., an array | |
456 access with an embedded index addition. It may make more | |
457 sense to have folding routines that are sensitive to the | |
458 constraints on GIMPLE operands, rather than abandoning any | |
459 any attempt to fold if the usual folding turns out to be too | |
460 aggressive. */ | |
461 | |
462 bool | |
463 valid_gimple_rhs_p (tree expr) | |
464 { | |
465 enum tree_code code = TREE_CODE (expr); | |
466 | |
467 switch (TREE_CODE_CLASS (code)) | |
468 { | |
469 case tcc_declaration: | |
470 if (!is_gimple_variable (expr)) | |
471 return false; | |
472 break; | |
473 | |
474 case tcc_constant: | |
475 /* All constants are ok. */ | |
476 break; | |
477 | |
111 | 478 case tcc_comparison: |
479 /* GENERIC allows comparisons with non-boolean types, reject | |
480 those for GIMPLE. Let vector-typed comparisons pass - rules | |
481 for GENERIC and GIMPLE are the same here. */ | |
482 if (!(INTEGRAL_TYPE_P (TREE_TYPE (expr)) | |
483 && (TREE_CODE (TREE_TYPE (expr)) == BOOLEAN_TYPE | |
484 || TYPE_PRECISION (TREE_TYPE (expr)) == 1)) | |
485 && ! VECTOR_TYPE_P (TREE_TYPE (expr))) | |
486 return false; | |
487 | |
488 /* Fallthru. */ | |
0 | 489 case tcc_binary: |
490 if (!is_gimple_val (TREE_OPERAND (expr, 0)) | |
491 || !is_gimple_val (TREE_OPERAND (expr, 1))) | |
492 return false; | |
493 break; | |
494 | |
495 case tcc_unary: | |
496 if (!is_gimple_val (TREE_OPERAND (expr, 0))) | |
497 return false; | |
498 break; | |
499 | |
500 case tcc_expression: | |
501 switch (code) | |
502 { | |
503 case ADDR_EXPR: | |
504 { | |
505 tree t; | |
506 if (is_gimple_min_invariant (expr)) | |
507 return true; | |
508 t = TREE_OPERAND (expr, 0); | |
509 while (handled_component_p (t)) | |
510 { | |
511 /* ??? More checks needed, see the GIMPLE verifier. */ | |
512 if ((TREE_CODE (t) == ARRAY_REF | |
513 || TREE_CODE (t) == ARRAY_RANGE_REF) | |
514 && !is_gimple_val (TREE_OPERAND (t, 1))) | |
515 return false; | |
516 t = TREE_OPERAND (t, 0); | |
517 } | |
518 if (!is_gimple_id (t)) | |
519 return false; | |
520 } | |
521 break; | |
522 | |
523 default: | |
111 | 524 if (get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS) |
525 { | |
526 if (((code == VEC_COND_EXPR || code == COND_EXPR) | |
527 ? !is_gimple_condexpr (TREE_OPERAND (expr, 0)) | |
528 : !is_gimple_val (TREE_OPERAND (expr, 0))) | |
529 || !is_gimple_val (TREE_OPERAND (expr, 1)) | |
530 || !is_gimple_val (TREE_OPERAND (expr, 2))) | |
531 return false; | |
532 break; | |
533 } | |
0 | 534 return false; |
535 } | |
536 break; | |
537 | |
538 case tcc_vl_exp: | |
539 return false; | |
540 | |
541 case tcc_exceptional: | |
111 | 542 if (code == CONSTRUCTOR) |
543 { | |
544 unsigned i; | |
545 tree elt; | |
546 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt) | |
547 if (!is_gimple_val (elt)) | |
548 return false; | |
549 return true; | |
550 } | |
0 | 551 if (code != SSA_NAME) |
552 return false; | |
553 break; | |
554 | |
111 | 555 case tcc_reference: |
556 if (code == BIT_FIELD_REF) | |
557 return is_gimple_val (TREE_OPERAND (expr, 0)); | |
558 return false; | |
559 | |
0 | 560 default: |
561 return false; | |
562 } | |
563 | |
564 return true; | |
565 } | |
566 | |
567 | |
568 /* Return true if EXPR is a CALL_EXPR suitable for representation | |
569 as a single GIMPLE_CALL statement. If the arguments require | |
570 further gimplification, return false. */ | |
571 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
572 static bool |
0 | 573 valid_gimple_call_p (tree expr) |
574 { | |
575 unsigned i, nargs; | |
576 | |
577 if (TREE_CODE (expr) != CALL_EXPR) | |
578 return false; | |
579 | |
580 nargs = call_expr_nargs (expr); | |
581 for (i = 0; i < nargs; i++) | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
582 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
583 tree arg = CALL_EXPR_ARG (expr, i); |
111 | 584 if (is_gimple_reg_type (TREE_TYPE (arg))) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
585 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
586 if (!is_gimple_val (arg)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
587 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
588 } |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
589 else |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
590 if (!is_gimple_lvalue (arg)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
591 return false; |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
592 } |
0 | 593 |
594 return true; | |
595 } | |
596 | |
597 | |
598 /* Make SSA names defined by OLD_STMT point to NEW_STMT | |
599 as their defining statement. */ | |
600 | |
601 void | |
111 | 602 move_ssa_defining_stmt_for_defs (gimple *new_stmt, gimple *old_stmt) |
0 | 603 { |
604 tree var; | |
605 ssa_op_iter iter; | |
606 | |
607 if (gimple_in_ssa_p (cfun)) | |
608 { | |
609 /* Make defined SSA_NAMEs point to the new | |
610 statement as their definition. */ | |
611 FOR_EACH_SSA_TREE_OPERAND (var, old_stmt, iter, SSA_OP_ALL_DEFS) | |
612 { | |
613 if (TREE_CODE (var) == SSA_NAME) | |
614 SSA_NAME_DEF_STMT (var) = new_stmt; | |
615 } | |
616 } | |
617 } | |
618 | |
111 | 619 /* Helper function for update_gimple_call and update_call_from_tree. |
620 A GIMPLE_CALL STMT is being replaced with GIMPLE_CALL NEW_STMT. */ | |
621 | |
622 static void | |
623 finish_update_gimple_call (gimple_stmt_iterator *si_p, gimple *new_stmt, | |
624 gimple *stmt) | |
625 { | |
626 gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt)); | |
627 move_ssa_defining_stmt_for_defs (new_stmt, stmt); | |
145 | 628 gimple_move_vops (new_stmt, stmt); |
111 | 629 gimple_set_location (new_stmt, gimple_location (stmt)); |
630 if (gimple_block (new_stmt) == NULL_TREE) | |
631 gimple_set_block (new_stmt, gimple_block (stmt)); | |
632 gsi_replace (si_p, new_stmt, false); | |
633 } | |
634 | |
635 /* Update a GIMPLE_CALL statement at iterator *SI_P to call to FN | |
636 with number of arguments NARGS, where the arguments in GIMPLE form | |
637 follow NARGS argument. */ | |
638 | |
639 bool | |
640 update_gimple_call (gimple_stmt_iterator *si_p, tree fn, int nargs, ...) | |
641 { | |
642 va_list ap; | |
643 gcall *new_stmt, *stmt = as_a <gcall *> (gsi_stmt (*si_p)); | |
644 | |
645 gcc_assert (is_gimple_call (stmt)); | |
646 va_start (ap, nargs); | |
647 new_stmt = gimple_build_call_valist (fn, nargs, ap); | |
648 finish_update_gimple_call (si_p, new_stmt, stmt); | |
649 va_end (ap); | |
650 return true; | |
651 } | |
0 | 652 |
653 /* Update a GIMPLE_CALL statement at iterator *SI_P to reflect the | |
654 value of EXPR, which is expected to be the result of folding the | |
655 call. This can only be done if EXPR is a CALL_EXPR with valid | |
656 GIMPLE operands as arguments, or if it is a suitable RHS expression | |
657 for a GIMPLE_ASSIGN. More complex expressions will require | |
111 | 658 gimplification, which will introduce additional statements. In this |
0 | 659 event, no update is performed, and the function returns false. |
660 Note that we cannot mutate a GIMPLE_CALL in-place, so we always | |
661 replace the statement at *SI_P with an entirely new statement. | |
662 The new statement need not be a call, e.g., if the original call | |
663 folded to a constant. */ | |
664 | |
665 bool | |
666 update_call_from_tree (gimple_stmt_iterator *si_p, tree expr) | |
667 { | |
111 | 668 gimple *stmt = gsi_stmt (*si_p); |
0 | 669 |
670 if (valid_gimple_call_p (expr)) | |
671 { | |
672 /* The call has simplified to another call. */ | |
673 tree fn = CALL_EXPR_FN (expr); | |
674 unsigned i; | |
675 unsigned nargs = call_expr_nargs (expr); | |
111 | 676 vec<tree> args = vNULL; |
677 gcall *new_stmt; | |
0 | 678 |
679 if (nargs > 0) | |
680 { | |
111 | 681 args.create (nargs); |
682 args.safe_grow_cleared (nargs); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
683 |
0 | 684 for (i = 0; i < nargs; i++) |
111 | 685 args[i] = CALL_EXPR_ARG (expr, i); |
0 | 686 } |
687 | |
688 new_stmt = gimple_build_call_vec (fn, args); | |
111 | 689 finish_update_gimple_call (si_p, new_stmt, stmt); |
690 args.release (); | |
0 | 691 |
692 return true; | |
693 } | |
694 else if (valid_gimple_rhs_p (expr)) | |
695 { | |
111 | 696 tree lhs = gimple_call_lhs (stmt); |
697 gimple *new_stmt; | |
0 | 698 |
699 /* The call has simplified to an expression | |
700 that cannot be represented as a GIMPLE_CALL. */ | |
701 if (lhs) | |
702 { | |
703 /* A value is expected. | |
704 Introduce a new GIMPLE_ASSIGN statement. */ | |
705 STRIP_USELESS_TYPE_CONVERSION (expr); | |
706 new_stmt = gimple_build_assign (lhs, expr); | |
707 move_ssa_defining_stmt_for_defs (new_stmt, stmt); | |
145 | 708 gimple_move_vops (new_stmt, stmt); |
0 | 709 } |
710 else if (!TREE_SIDE_EFFECTS (expr)) | |
711 { | |
712 /* No value is expected, and EXPR has no effect. | |
713 Replace it with an empty statement. */ | |
714 new_stmt = gimple_build_nop (); | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
715 if (gimple_in_ssa_p (cfun)) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
716 { |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
717 unlink_stmt_vdef (stmt); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
718 release_defs (stmt); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
719 } |
0 | 720 } |
721 else | |
722 { | |
723 /* No value is expected, but EXPR has an effect, | |
724 e.g., it could be a reference to a volatile | |
725 variable. Create an assignment statement | |
726 with a dummy (unused) lhs variable. */ | |
727 STRIP_USELESS_TYPE_CONVERSION (expr); | |
111 | 728 if (gimple_in_ssa_p (cfun)) |
729 lhs = make_ssa_name (TREE_TYPE (expr)); | |
730 else | |
731 lhs = create_tmp_var (TREE_TYPE (expr)); | |
0 | 732 new_stmt = gimple_build_assign (lhs, expr); |
145 | 733 gimple_move_vops (new_stmt, stmt); |
0 | 734 move_ssa_defining_stmt_for_defs (new_stmt, stmt); |
735 } | |
736 gimple_set_location (new_stmt, gimple_location (stmt)); | |
737 gsi_replace (si_p, new_stmt, false); | |
738 return true; | |
739 } | |
740 else | |
741 /* The call simplified to an expression that is | |
742 not a valid GIMPLE RHS. */ | |
743 return false; | |
744 } | |
745 | |
746 /* Entry point to the propagation engine. | |
747 | |
131 | 748 The VISIT_STMT virtual function is called for every statement |
749 visited and the VISIT_PHI virtual function is called for every PHI | |
750 node visited. */ | |
0 | 751 |
752 void | |
131 | 753 ssa_propagation_engine::ssa_propagate (void) |
0 | 754 { |
755 ssa_prop_init (); | |
756 | |
131 | 757 curr_order = 0; |
758 | |
759 /* Iterate until the worklists are empty. We iterate both blocks | |
760 and stmts in RPO order, using sets of two worklists to first | |
761 complete the current iteration before iterating over backedges. */ | |
762 while (1) | |
0 | 763 { |
131 | 764 int next_block_order = (bitmap_empty_p (cfg_blocks) |
765 ? -1 : bitmap_first_set_bit (cfg_blocks)); | |
766 int next_stmt_uid = (bitmap_empty_p (ssa_edge_worklist) | |
767 ? -1 : bitmap_first_set_bit (ssa_edge_worklist)); | |
768 if (next_block_order == -1 && next_stmt_uid == -1) | |
0 | 769 { |
131 | 770 if (bitmap_empty_p (cfg_blocks_back) |
771 && bitmap_empty_p (ssa_edge_worklist_back)) | |
772 break; | |
773 | |
774 if (dump_file && (dump_flags & TDF_DETAILS)) | |
775 fprintf (dump_file, "Regular worklists empty, now processing " | |
776 "backedge destinations\n"); | |
777 std::swap (cfg_blocks, cfg_blocks_back); | |
778 std::swap (ssa_edge_worklist, ssa_edge_worklist_back); | |
111 | 779 continue; |
0 | 780 } |
781 | |
131 | 782 int next_stmt_bb_order = -1; |
783 gimple *next_stmt = NULL; | |
784 if (next_stmt_uid != -1) | |
785 { | |
786 next_stmt = uid_to_stmt[next_stmt_uid]; | |
787 next_stmt_bb_order = bb_to_cfg_order[gimple_bb (next_stmt)->index]; | |
788 } | |
789 | |
790 /* Pull the next block to simulate off the worklist if it comes first. */ | |
791 if (next_block_order != -1 | |
792 && (next_stmt_bb_order == -1 | |
793 || next_block_order <= next_stmt_bb_order)) | |
794 { | |
795 curr_order = next_block_order; | |
796 bitmap_clear_bit (cfg_blocks, next_block_order); | |
797 basic_block bb | |
798 = BASIC_BLOCK_FOR_FN (cfun, cfg_order_to_bb [next_block_order]); | |
799 simulate_block (bb); | |
800 } | |
801 /* Else simulate from the SSA edge worklist. */ | |
802 else | |
803 { | |
804 curr_order = next_stmt_bb_order; | |
805 if (dump_file && (dump_flags & TDF_DETAILS)) | |
806 { | |
807 fprintf (dump_file, "\nSimulating statement: "); | |
808 print_gimple_stmt (dump_file, next_stmt, 0, dump_flags); | |
809 } | |
810 simulate_stmt (next_stmt); | |
811 } | |
0 | 812 } |
813 | |
814 ssa_prop_fini (); | |
815 } | |
816 | |
817 /* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref' | |
818 is a non-volatile pointer dereference, a structure reference or a | |
819 reference to a single _DECL. Ignore volatile memory references | |
820 because they are not interesting for the optimizers. */ | |
821 | |
822 bool | |
111 | 823 stmt_makes_single_store (gimple *stmt) |
0 | 824 { |
825 tree lhs; | |
826 | |
827 if (gimple_code (stmt) != GIMPLE_ASSIGN | |
828 && gimple_code (stmt) != GIMPLE_CALL) | |
829 return false; | |
830 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
831 if (!gimple_vdef (stmt)) |
0 | 832 return false; |
833 | |
834 lhs = gimple_get_lhs (stmt); | |
835 | |
836 /* A call statement may have a null LHS. */ | |
837 if (!lhs) | |
838 return false; | |
839 | |
840 return (!TREE_THIS_VOLATILE (lhs) | |
841 && (DECL_P (lhs) | |
842 || REFERENCE_CLASS_P (lhs))); | |
843 } | |
844 | |
845 | |
846 /* Propagation statistics. */ | |
847 struct prop_stats_d | |
848 { | |
849 long num_const_prop; | |
850 long num_copy_prop; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
851 long num_stmts_folded; |
0 | 852 long num_dce; |
853 }; | |
854 | |
855 static struct prop_stats_d prop_stats; | |
856 | |
857 /* Replace USE references in statement STMT with the values stored in | |
858 PROP_VALUE. Return true if at least one reference was replaced. */ | |
859 | |
111 | 860 bool |
131 | 861 substitute_and_fold_engine::replace_uses_in (gimple *stmt) |
0 | 862 { |
863 bool replaced = false; | |
864 use_operand_p use; | |
865 ssa_op_iter iter; | |
866 | |
867 FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE) | |
868 { | |
869 tree tuse = USE_FROM_PTR (use); | |
131 | 870 tree val = get_value (tuse); |
0 | 871 |
872 if (val == tuse || val == NULL_TREE) | |
873 continue; | |
874 | |
875 if (gimple_code (stmt) == GIMPLE_ASM | |
876 && !may_propagate_copy_into_asm (tuse)) | |
877 continue; | |
878 | |
879 if (!may_propagate_copy (tuse, val)) | |
880 continue; | |
881 | |
882 if (TREE_CODE (val) != SSA_NAME) | |
883 prop_stats.num_const_prop++; | |
884 else | |
885 prop_stats.num_copy_prop++; | |
886 | |
887 propagate_value (use, val); | |
888 | |
889 replaced = true; | |
890 } | |
891 | |
892 return replaced; | |
893 } | |
894 | |
895 | |
896 /* Replace propagated values into all the arguments for PHI using the | |
897 values from PROP_VALUE. */ | |
898 | |
131 | 899 bool |
900 substitute_and_fold_engine::replace_phi_args_in (gphi *phi) | |
0 | 901 { |
902 size_t i; | |
903 bool replaced = false; | |
904 | |
905 if (dump_file && (dump_flags & TDF_DETAILS)) | |
906 { | |
907 fprintf (dump_file, "Folding PHI node: "); | |
908 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM); | |
909 } | |
910 | |
911 for (i = 0; i < gimple_phi_num_args (phi); i++) | |
912 { | |
913 tree arg = gimple_phi_arg_def (phi, i); | |
914 | |
915 if (TREE_CODE (arg) == SSA_NAME) | |
916 { | |
131 | 917 tree val = get_value (arg); |
0 | 918 |
919 if (val && val != arg && may_propagate_copy (arg, val)) | |
920 { | |
111 | 921 edge e = gimple_phi_arg_edge (phi, i); |
922 | |
0 | 923 if (TREE_CODE (val) != SSA_NAME) |
924 prop_stats.num_const_prop++; | |
925 else | |
926 prop_stats.num_copy_prop++; | |
927 | |
928 propagate_value (PHI_ARG_DEF_PTR (phi, i), val); | |
929 replaced = true; | |
930 | |
931 /* If we propagated a copy and this argument flows | |
932 through an abnormal edge, update the replacement | |
933 accordingly. */ | |
934 if (TREE_CODE (val) == SSA_NAME | |
111 | 935 && e->flags & EDGE_ABNORMAL |
936 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val)) | |
937 { | |
938 /* This can only occur for virtual operands, since | |
939 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val)) | |
940 would prevent replacement. */ | |
941 gcc_checking_assert (virtual_operand_p (val)); | |
942 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1; | |
943 } | |
0 | 944 } |
945 } | |
946 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
947 |
0 | 948 if (dump_file && (dump_flags & TDF_DETAILS)) |
949 { | |
950 if (!replaced) | |
951 fprintf (dump_file, "No folding possible\n"); | |
952 else | |
953 { | |
954 fprintf (dump_file, "Folded into: "); | |
955 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM); | |
956 fprintf (dump_file, "\n"); | |
957 } | |
958 } | |
111 | 959 |
960 return replaced; | |
0 | 961 } |
962 | |
963 | |
111 | 964 class substitute_and_fold_dom_walker : public dom_walker |
965 { | |
966 public: | |
967 substitute_and_fold_dom_walker (cdi_direction direction, | |
131 | 968 class substitute_and_fold_engine *engine) |
969 : dom_walker (direction), | |
970 something_changed (false), | |
971 substitute_and_fold_engine (engine) | |
111 | 972 { |
973 stmts_to_remove.create (0); | |
974 stmts_to_fixup.create (0); | |
975 need_eh_cleanup = BITMAP_ALLOC (NULL); | |
976 } | |
977 ~substitute_and_fold_dom_walker () | |
978 { | |
979 stmts_to_remove.release (); | |
980 stmts_to_fixup.release (); | |
981 BITMAP_FREE (need_eh_cleanup); | |
982 } | |
983 | |
984 virtual edge before_dom_children (basic_block); | |
985 virtual void after_dom_children (basic_block) {} | |
986 | |
987 bool something_changed; | |
988 vec<gimple *> stmts_to_remove; | |
989 vec<gimple *> stmts_to_fixup; | |
990 bitmap need_eh_cleanup; | |
131 | 991 |
992 class substitute_and_fold_engine *substitute_and_fold_engine; | |
111 | 993 }; |
994 | |
995 edge | |
996 substitute_and_fold_dom_walker::before_dom_children (basic_block bb) | |
997 { | |
998 /* Propagate known values into PHI nodes. */ | |
999 for (gphi_iterator i = gsi_start_phis (bb); | |
1000 !gsi_end_p (i); | |
1001 gsi_next (&i)) | |
1002 { | |
1003 gphi *phi = i.phi (); | |
1004 tree res = gimple_phi_result (phi); | |
1005 if (virtual_operand_p (res)) | |
1006 continue; | |
1007 if (res && TREE_CODE (res) == SSA_NAME) | |
1008 { | |
131 | 1009 tree sprime = substitute_and_fold_engine->get_value (res); |
111 | 1010 if (sprime |
1011 && sprime != res | |
1012 && may_propagate_copy (res, sprime)) | |
1013 { | |
1014 stmts_to_remove.safe_push (phi); | |
1015 continue; | |
1016 } | |
1017 } | |
131 | 1018 something_changed |= substitute_and_fold_engine->replace_phi_args_in (phi); |
111 | 1019 } |
1020 | |
1021 /* Propagate known values into stmts. In some case it exposes | |
1022 more trivially deletable stmts to walk backward. */ | |
1023 for (gimple_stmt_iterator i = gsi_start_bb (bb); | |
1024 !gsi_end_p (i); | |
1025 gsi_next (&i)) | |
1026 { | |
1027 bool did_replace; | |
1028 gimple *stmt = gsi_stmt (i); | |
1029 | |
1030 /* No point propagating into a stmt we have a value for we | |
1031 can propagate into all uses. Mark it for removal instead. */ | |
1032 tree lhs = gimple_get_lhs (stmt); | |
1033 if (lhs && TREE_CODE (lhs) == SSA_NAME) | |
1034 { | |
131 | 1035 tree sprime = substitute_and_fold_engine->get_value (lhs); |
111 | 1036 if (sprime |
1037 && sprime != lhs | |
1038 && may_propagate_copy (lhs, sprime) | |
131 | 1039 && !stmt_could_throw_p (cfun, stmt) |
111 | 1040 && !gimple_has_side_effects (stmt) |
1041 /* We have to leave ASSERT_EXPRs around for jump-threading. */ | |
1042 && (!is_gimple_assign (stmt) | |
1043 || gimple_assign_rhs_code (stmt) != ASSERT_EXPR)) | |
1044 { | |
1045 stmts_to_remove.safe_push (stmt); | |
1046 continue; | |
1047 } | |
1048 } | |
1049 | |
1050 /* Replace the statement with its folded version and mark it | |
1051 folded. */ | |
1052 did_replace = false; | |
1053 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1054 { | |
1055 fprintf (dump_file, "Folding statement: "); | |
1056 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); | |
1057 } | |
1058 | |
1059 gimple *old_stmt = stmt; | |
1060 bool was_noreturn = (is_gimple_call (stmt) | |
1061 && gimple_call_noreturn_p (stmt)); | |
1062 | |
1063 /* Replace real uses in the statement. */ | |
131 | 1064 did_replace |= substitute_and_fold_engine->replace_uses_in (stmt); |
111 | 1065 |
1066 /* If we made a replacement, fold the statement. */ | |
1067 if (did_replace) | |
1068 { | |
1069 fold_stmt (&i, follow_single_use_edges); | |
1070 stmt = gsi_stmt (i); | |
1071 gimple_set_modified (stmt, true); | |
1072 } | |
145 | 1073 /* Also fold if we want to fold all statements. */ |
1074 else if (substitute_and_fold_engine->fold_all_stmts | |
1075 && fold_stmt (&i, follow_single_use_edges)) | |
1076 { | |
1077 did_replace = true; | |
1078 stmt = gsi_stmt (i); | |
1079 gimple_set_modified (stmt, true); | |
1080 } | |
111 | 1081 |
1082 /* Some statements may be simplified using propagator | |
1083 specific information. Do this before propagating | |
1084 into the stmt to not disturb pass specific information. */ | |
131 | 1085 update_stmt_if_modified (stmt); |
1086 if (substitute_and_fold_engine->fold_stmt(&i)) | |
111 | 1087 { |
131 | 1088 did_replace = true; |
1089 prop_stats.num_stmts_folded++; | |
1090 stmt = gsi_stmt (i); | |
1091 gimple_set_modified (stmt, true); | |
111 | 1092 } |
1093 | |
1094 /* If this is a control statement the propagator left edges | |
1095 unexecuted on force the condition in a way consistent with | |
1096 that. See PR66945 for cases where the propagator can end | |
1097 up with a different idea of a taken edge than folding | |
1098 (once undefined behavior is involved). */ | |
1099 if (gimple_code (stmt) == GIMPLE_COND) | |
1100 { | |
1101 if ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE) | |
1102 ^ (EDGE_SUCC (bb, 1)->flags & EDGE_EXECUTABLE)) | |
1103 { | |
1104 if (((EDGE_SUCC (bb, 0)->flags & EDGE_TRUE_VALUE) != 0) | |
1105 == ((EDGE_SUCC (bb, 0)->flags & EDGE_EXECUTABLE) != 0)) | |
1106 gimple_cond_make_true (as_a <gcond *> (stmt)); | |
1107 else | |
1108 gimple_cond_make_false (as_a <gcond *> (stmt)); | |
1109 gimple_set_modified (stmt, true); | |
1110 did_replace = true; | |
1111 } | |
1112 } | |
1113 | |
1114 /* Now cleanup. */ | |
1115 if (did_replace) | |
1116 { | |
1117 /* If we cleaned up EH information from the statement, | |
1118 remove EH edges. */ | |
1119 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)) | |
1120 bitmap_set_bit (need_eh_cleanup, bb->index); | |
1121 | |
1122 /* If we turned a not noreturn call into a noreturn one | |
1123 schedule it for fixup. */ | |
1124 if (!was_noreturn | |
1125 && is_gimple_call (stmt) | |
1126 && gimple_call_noreturn_p (stmt)) | |
1127 stmts_to_fixup.safe_push (stmt); | |
1128 | |
1129 if (gimple_assign_single_p (stmt)) | |
1130 { | |
1131 tree rhs = gimple_assign_rhs1 (stmt); | |
1132 | |
1133 if (TREE_CODE (rhs) == ADDR_EXPR) | |
1134 recompute_tree_invariant_for_addr_expr (rhs); | |
1135 } | |
1136 | |
1137 /* Determine what needs to be done to update the SSA form. */ | |
1138 update_stmt_if_modified (stmt); | |
1139 if (!is_gimple_debug (stmt)) | |
1140 something_changed = true; | |
1141 } | |
1142 | |
1143 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1144 { | |
1145 if (did_replace) | |
1146 { | |
1147 fprintf (dump_file, "Folded into: "); | |
1148 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); | |
1149 fprintf (dump_file, "\n"); | |
1150 } | |
1151 else | |
1152 fprintf (dump_file, "Not folded\n"); | |
1153 } | |
1154 } | |
1155 return NULL; | |
1156 } | |
1157 | |
1158 | |
1159 | |
0 | 1160 /* Perform final substitution and folding of propagated values. |
145 | 1161 Process the whole function if BLOCK is null, otherwise only |
1162 process the blocks that BLOCK dominates. In the latter case, | |
1163 it is the caller's responsibility to ensure that dominator | |
1164 information is available and up-to-date. | |
0 | 1165 |
1166 PROP_VALUE[I] contains the single value that should be substituted | |
1167 at every use of SSA name N_I. If PROP_VALUE is NULL, no values are | |
1168 substituted. | |
1169 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1170 If FOLD_FN is non-NULL the function will be invoked on all statements |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1171 before propagating values for pass specific simplification. |
0 | 1172 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1173 DO_DCE is true if trivially dead stmts can be removed. |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
1174 |
111 | 1175 If DO_DCE is true, the statements within a BB are walked from |
1176 last to first element. Otherwise we scan from first to last element. | |
1177 | |
0 | 1178 Return TRUE when something changed. */ |
1179 | |
1180 bool | |
145 | 1181 substitute_and_fold_engine::substitute_and_fold (basic_block block) |
0 | 1182 { |
1183 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1184 fprintf (dump_file, "\nSubstituting values and folding statements\n\n"); | |
1185 | |
1186 memset (&prop_stats, 0, sizeof (prop_stats)); | |
1187 | |
145 | 1188 /* Don't call calculate_dominance_info when iterating over a subgraph. |
1189 Callers that are using the interface this way are likely to want to | |
1190 iterate over several disjoint subgraphs, and it would be expensive | |
1191 in enable-checking builds to revalidate the whole dominance tree | |
1192 each time. */ | |
1193 if (block) | |
1194 gcc_assert (dom_info_state (CDI_DOMINATORS)); | |
1195 else | |
1196 calculate_dominance_info (CDI_DOMINATORS); | |
131 | 1197 substitute_and_fold_dom_walker walker (CDI_DOMINATORS, this); |
145 | 1198 walker.walk (block ? block : ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
0 | 1199 |
111 | 1200 /* We cannot remove stmts during the BB walk, especially not release |
1201 SSA names there as that destroys the lattice of our callers. | |
1202 Remove stmts in reverse order to make debug stmt creation possible. */ | |
1203 while (!walker.stmts_to_remove.is_empty ()) | |
1204 { | |
1205 gimple *stmt = walker.stmts_to_remove.pop (); | |
1206 if (dump_file && dump_flags & TDF_DETAILS) | |
1207 { | |
1208 fprintf (dump_file, "Removing dead stmt "); | |
1209 print_gimple_stmt (dump_file, stmt, 0); | |
1210 fprintf (dump_file, "\n"); | |
1211 } | |
1212 prop_stats.num_dce++; | |
1213 gimple_stmt_iterator gsi = gsi_for_stmt (stmt); | |
1214 if (gimple_code (stmt) == GIMPLE_PHI) | |
1215 remove_phi_node (&gsi, true); | |
1216 else | |
1217 { | |
1218 unlink_stmt_vdef (stmt); | |
1219 gsi_remove (&gsi, true); | |
1220 release_defs (stmt); | |
1221 } | |
1222 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1223 |
111 | 1224 if (!bitmap_empty_p (walker.need_eh_cleanup)) |
1225 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup); | |
0 | 1226 |
111 | 1227 /* Fixup stmts that became noreturn calls. This may require splitting |
1228 blocks and thus isn't possible during the dominator walk. Do this | |
1229 in reverse order so we don't inadvertedly remove a stmt we want to | |
1230 fixup by visiting a dominating now noreturn call first. */ | |
1231 while (!walker.stmts_to_fixup.is_empty ()) | |
1232 { | |
1233 gimple *stmt = walker.stmts_to_fixup.pop (); | |
1234 if (dump_file && dump_flags & TDF_DETAILS) | |
1235 { | |
1236 fprintf (dump_file, "Fixing up noreturn call "); | |
1237 print_gimple_stmt (dump_file, stmt, 0); | |
1238 fprintf (dump_file, "\n"); | |
0 | 1239 } |
111 | 1240 fixup_noreturn_call (stmt); |
0 | 1241 } |
1242 | |
1243 statistics_counter_event (cfun, "Constants propagated", | |
1244 prop_stats.num_const_prop); | |
1245 statistics_counter_event (cfun, "Copies propagated", | |
1246 prop_stats.num_copy_prop); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1247 statistics_counter_event (cfun, "Statements folded", |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1248 prop_stats.num_stmts_folded); |
0 | 1249 statistics_counter_event (cfun, "Statements deleted", |
1250 prop_stats.num_dce); | |
111 | 1251 |
1252 return walker.something_changed; | |
1253 } | |
1254 | |
1255 | |
1256 /* Return true if we may propagate ORIG into DEST, false otherwise. */ | |
1257 | |
1258 bool | |
1259 may_propagate_copy (tree dest, tree orig) | |
1260 { | |
1261 tree type_d = TREE_TYPE (dest); | |
1262 tree type_o = TREE_TYPE (orig); | |
1263 | |
1264 /* If ORIG is a default definition which flows in from an abnormal edge | |
1265 then the copy can be propagated. It is important that we do so to avoid | |
1266 uninitialized copies. */ | |
1267 if (TREE_CODE (orig) == SSA_NAME | |
1268 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig) | |
1269 && SSA_NAME_IS_DEFAULT_DEF (orig) | |
1270 && (SSA_NAME_VAR (orig) == NULL_TREE | |
1271 || TREE_CODE (SSA_NAME_VAR (orig)) == VAR_DECL)) | |
1272 ; | |
1273 /* Otherwise if ORIG just flows in from an abnormal edge then the copy cannot | |
1274 be propagated. */ | |
1275 else if (TREE_CODE (orig) == SSA_NAME | |
1276 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)) | |
1277 return false; | |
1278 /* Similarly if DEST flows in from an abnormal edge then the copy cannot be | |
1279 propagated. */ | |
1280 else if (TREE_CODE (dest) == SSA_NAME | |
1281 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest)) | |
1282 return false; | |
1283 | |
1284 /* Do not copy between types for which we *do* need a conversion. */ | |
1285 if (!useless_type_conversion_p (type_d, type_o)) | |
1286 return false; | |
1287 | |
1288 /* Generally propagating virtual operands is not ok as that may | |
1289 create overlapping life-ranges. */ | |
1290 if (TREE_CODE (dest) == SSA_NAME && virtual_operand_p (dest)) | |
1291 return false; | |
1292 | |
1293 /* Anything else is OK. */ | |
1294 return true; | |
1295 } | |
1296 | |
1297 /* Like may_propagate_copy, but use as the destination expression | |
1298 the principal expression (typically, the RHS) contained in | |
1299 statement DEST. This is more efficient when working with the | |
1300 gimple tuples representation. */ | |
1301 | |
1302 bool | |
1303 may_propagate_copy_into_stmt (gimple *dest, tree orig) | |
1304 { | |
1305 tree type_d; | |
1306 tree type_o; | |
1307 | |
1308 /* If the statement is a switch or a single-rhs assignment, | |
1309 then the expression to be replaced by the propagation may | |
1310 be an SSA_NAME. Fortunately, there is an explicit tree | |
1311 for the expression, so we delegate to may_propagate_copy. */ | |
1312 | |
1313 if (gimple_assign_single_p (dest)) | |
1314 return may_propagate_copy (gimple_assign_rhs1 (dest), orig); | |
1315 else if (gswitch *dest_swtch = dyn_cast <gswitch *> (dest)) | |
1316 return may_propagate_copy (gimple_switch_index (dest_swtch), orig); | |
1317 | |
1318 /* In other cases, the expression is not materialized, so there | |
1319 is no destination to pass to may_propagate_copy. On the other | |
1320 hand, the expression cannot be an SSA_NAME, so the analysis | |
1321 is much simpler. */ | |
1322 | |
1323 if (TREE_CODE (orig) == SSA_NAME | |
1324 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)) | |
1325 return false; | |
1326 | |
1327 if (is_gimple_assign (dest)) | |
1328 type_d = TREE_TYPE (gimple_assign_lhs (dest)); | |
1329 else if (gimple_code (dest) == GIMPLE_COND) | |
1330 type_d = boolean_type_node; | |
1331 else if (is_gimple_call (dest) | |
1332 && gimple_call_lhs (dest) != NULL_TREE) | |
1333 type_d = TREE_TYPE (gimple_call_lhs (dest)); | |
1334 else | |
1335 gcc_unreachable (); | |
1336 | |
1337 type_o = TREE_TYPE (orig); | |
1338 | |
1339 if (!useless_type_conversion_p (type_d, type_o)) | |
1340 return false; | |
1341 | |
1342 return true; | |
1343 } | |
1344 | |
1345 /* Similarly, but we know that we're propagating into an ASM_EXPR. */ | |
1346 | |
1347 bool | |
1348 may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED) | |
1349 { | |
1350 return true; | |
0 | 1351 } |
1352 | |
111 | 1353 |
1354 /* Common code for propagate_value and replace_exp. | |
1355 | |
1356 Replace use operand OP_P with VAL. FOR_PROPAGATION indicates if the | |
1357 replacement is done to propagate a value or not. */ | |
1358 | |
1359 static void | |
1360 replace_exp_1 (use_operand_p op_p, tree val, | |
1361 bool for_propagation ATTRIBUTE_UNUSED) | |
1362 { | |
1363 if (flag_checking) | |
1364 { | |
1365 tree op = USE_FROM_PTR (op_p); | |
1366 gcc_assert (!(for_propagation | |
1367 && TREE_CODE (op) == SSA_NAME | |
1368 && TREE_CODE (val) == SSA_NAME | |
1369 && !may_propagate_copy (op, val))); | |
1370 } | |
1371 | |
1372 if (TREE_CODE (val) == SSA_NAME) | |
1373 SET_USE (op_p, val); | |
1374 else | |
1375 SET_USE (op_p, unshare_expr (val)); | |
1376 } | |
1377 | |
1378 | |
1379 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME) | |
1380 into the operand pointed to by OP_P. | |
1381 | |
1382 Use this version for const/copy propagation as it will perform additional | |
1383 checks to ensure validity of the const/copy propagation. */ | |
1384 | |
1385 void | |
1386 propagate_value (use_operand_p op_p, tree val) | |
1387 { | |
1388 replace_exp_1 (op_p, val, true); | |
1389 } | |
1390 | |
1391 /* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME). | |
1392 | |
1393 Use this version when not const/copy propagating values. For example, | |
1394 PRE uses this version when building expressions as they would appear | |
1395 in specific blocks taking into account actions of PHI nodes. | |
1396 | |
1397 The statement in which an expression has been replaced should be | |
1398 folded using fold_stmt_inplace. */ | |
1399 | |
1400 void | |
1401 replace_exp (use_operand_p op_p, tree val) | |
1402 { | |
1403 replace_exp_1 (op_p, val, false); | |
1404 } | |
1405 | |
1406 | |
1407 /* Propagate the value VAL (assumed to be a constant or another SSA_NAME) | |
1408 into the tree pointed to by OP_P. | |
1409 | |
1410 Use this version for const/copy propagation when SSA operands are not | |
1411 available. It will perform the additional checks to ensure validity of | |
1412 the const/copy propagation, but will not update any operand information. | |
1413 Be sure to mark the stmt as modified. */ | |
1414 | |
1415 void | |
1416 propagate_tree_value (tree *op_p, tree val) | |
1417 { | |
1418 if (TREE_CODE (val) == SSA_NAME) | |
1419 *op_p = val; | |
1420 else | |
1421 *op_p = unshare_expr (val); | |
1422 } | |
1423 | |
1424 | |
1425 /* Like propagate_tree_value, but use as the operand to replace | |
1426 the principal expression (typically, the RHS) contained in the | |
1427 statement referenced by iterator GSI. Note that it is not | |
1428 always possible to update the statement in-place, so a new | |
1429 statement may be created to replace the original. */ | |
1430 | |
1431 void | |
1432 propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val) | |
1433 { | |
1434 gimple *stmt = gsi_stmt (*gsi); | |
1435 | |
1436 if (is_gimple_assign (stmt)) | |
1437 { | |
1438 tree expr = NULL_TREE; | |
1439 if (gimple_assign_single_p (stmt)) | |
1440 expr = gimple_assign_rhs1 (stmt); | |
1441 propagate_tree_value (&expr, val); | |
1442 gimple_assign_set_rhs_from_tree (gsi, expr); | |
1443 } | |
1444 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) | |
1445 { | |
1446 tree lhs = NULL_TREE; | |
1447 tree rhs = build_zero_cst (TREE_TYPE (val)); | |
1448 propagate_tree_value (&lhs, val); | |
1449 gimple_cond_set_code (cond_stmt, NE_EXPR); | |
1450 gimple_cond_set_lhs (cond_stmt, lhs); | |
1451 gimple_cond_set_rhs (cond_stmt, rhs); | |
1452 } | |
1453 else if (is_gimple_call (stmt) | |
1454 && gimple_call_lhs (stmt) != NULL_TREE) | |
1455 { | |
1456 tree expr = NULL_TREE; | |
1457 bool res; | |
1458 propagate_tree_value (&expr, val); | |
1459 res = update_call_from_tree (gsi, expr); | |
1460 gcc_assert (res); | |
1461 } | |
1462 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt)) | |
1463 propagate_tree_value (gimple_switch_index_ptr (swtch_stmt), val); | |
1464 else | |
1465 gcc_unreachable (); | |
1466 } |