0
|
1 /* SSA Dominator optimizations for trees
|
|
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
|
|
3 Free Software Foundation, Inc.
|
|
4 Contributed by Diego Novillo <dnovillo@redhat.com>
|
|
5
|
|
6 This file is part of GCC.
|
|
7
|
|
8 GCC is free software; you can redistribute it and/or modify
|
|
9 it under the terms of the GNU General Public License as published by
|
|
10 the Free Software Foundation; either version 3, or (at your option)
|
|
11 any later version.
|
|
12
|
|
13 GCC is distributed in the hope that it will be useful,
|
|
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
16 GNU General Public License for more details.
|
|
17
|
|
18 You should have received a copy of the GNU General Public License
|
|
19 along with GCC; see the file COPYING3. If not see
|
|
20 <http://www.gnu.org/licenses/>. */
|
|
21
|
|
22 #include "config.h"
|
|
23 #include "system.h"
|
|
24 #include "coretypes.h"
|
|
25 #include "tm.h"
|
|
26 #include "tree.h"
|
|
27 #include "flags.h"
|
|
28 #include "rtl.h"
|
|
29 #include "tm_p.h"
|
|
30 #include "ggc.h"
|
|
31 #include "basic-block.h"
|
|
32 #include "cfgloop.h"
|
|
33 #include "output.h"
|
|
34 #include "expr.h"
|
|
35 #include "function.h"
|
|
36 #include "diagnostic.h"
|
|
37 #include "timevar.h"
|
|
38 #include "tree-dump.h"
|
|
39 #include "tree-flow.h"
|
|
40 #include "domwalk.h"
|
|
41 #include "real.h"
|
|
42 #include "tree-pass.h"
|
|
43 #include "tree-ssa-propagate.h"
|
|
44 #include "langhooks.h"
|
|
45 #include "params.h"
|
|
46
|
|
47 /* This file implements optimizations on the dominator tree. */
|
|
48
|
|
49 /* Representation of a "naked" right-hand-side expression, to be used
|
|
50 in recording available expressions in the expression hash table. */
|
|
51
|
|
52 enum expr_kind
|
|
53 {
|
|
54 EXPR_SINGLE,
|
|
55 EXPR_UNARY,
|
|
56 EXPR_BINARY,
|
|
57 EXPR_CALL
|
|
58 };
|
|
59
|
|
60 struct hashable_expr
|
|
61 {
|
|
62 tree type;
|
|
63 enum expr_kind kind;
|
|
64 union {
|
|
65 struct { tree rhs; } single;
|
|
66 struct { enum tree_code op; tree opnd; } unary;
|
|
67 struct { enum tree_code op; tree opnd0; tree opnd1; } binary;
|
|
68 struct { tree fn; bool pure; size_t nargs; tree *args; } call;
|
|
69 } ops;
|
|
70 };
|
|
71
|
|
72 /* Structure for recording known values of a conditional expression
|
|
73 at the exits from its block. */
|
|
74
|
|
75 struct cond_equivalence
|
|
76 {
|
|
77 struct hashable_expr cond;
|
|
78 tree value;
|
|
79 };
|
|
80
|
|
81 /* Structure for recording edge equivalences as well as any pending
|
|
82 edge redirections during the dominator optimizer.
|
|
83
|
|
84 Computing and storing the edge equivalences instead of creating
|
|
85 them on-demand can save significant amounts of time, particularly
|
|
86 for pathological cases involving switch statements.
|
|
87
|
|
88 These structures live for a single iteration of the dominator
|
|
89 optimizer in the edge's AUX field. At the end of an iteration we
|
|
90 free each of these structures and update the AUX field to point
|
|
91 to any requested redirection target (the code for updating the
|
|
92 CFG and SSA graph for edge redirection expects redirection edge
|
|
93 targets to be in the AUX field for each edge. */
|
|
94
|
|
95 struct edge_info
|
|
96 {
|
|
97 /* If this edge creates a simple equivalence, the LHS and RHS of
|
|
98 the equivalence will be stored here. */
|
|
99 tree lhs;
|
|
100 tree rhs;
|
|
101
|
|
102 /* Traversing an edge may also indicate one or more particular conditions
|
|
103 are true or false. The number of recorded conditions can vary, but
|
|
104 can be determined by the condition's code. So we have an array
|
|
105 and its maximum index rather than use a varray. */
|
|
106 struct cond_equivalence *cond_equivalences;
|
|
107 unsigned int max_cond_equivalences;
|
|
108 };
|
|
109
|
|
110 /* Hash table with expressions made available during the renaming process.
|
|
111 When an assignment of the form X_i = EXPR is found, the statement is
|
|
112 stored in this table. If the same expression EXPR is later found on the
|
|
113 RHS of another statement, it is replaced with X_i (thus performing
|
|
114 global redundancy elimination). Similarly as we pass through conditionals
|
|
115 we record the conditional itself as having either a true or false value
|
|
116 in this table. */
|
|
117 static htab_t avail_exprs;
|
|
118
|
|
119 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
|
|
120 expressions it enters into the hash table along with a marker entry
|
|
121 (null). When we finish processing the block, we pop off entries and
|
|
122 remove the expressions from the global hash table until we hit the
|
|
123 marker. */
|
|
124 typedef struct expr_hash_elt * expr_hash_elt_t;
|
|
125 DEF_VEC_P(expr_hash_elt_t);
|
|
126 DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
|
|
127
|
|
128 static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
|
|
129
|
|
130 /* Stack of statements we need to rescan during finalization for newly
|
|
131 exposed variables.
|
|
132
|
|
133 Statement rescanning must occur after the current block's available
|
|
134 expressions are removed from AVAIL_EXPRS. Else we may change the
|
|
135 hash code for an expression and be unable to find/remove it from
|
|
136 AVAIL_EXPRS. */
|
|
137 typedef gimple *gimple_p;
|
|
138 DEF_VEC_P(gimple_p);
|
|
139 DEF_VEC_ALLOC_P(gimple_p,heap);
|
|
140
|
|
141 static VEC(gimple_p,heap) *stmts_to_rescan;
|
|
142
|
|
143 /* Structure for entries in the expression hash table. */
|
|
144
|
|
145 struct expr_hash_elt
|
|
146 {
|
|
147 /* The value (lhs) of this expression. */
|
|
148 tree lhs;
|
|
149
|
|
150 /* The expression (rhs) we want to record. */
|
|
151 struct hashable_expr expr;
|
|
152
|
|
153 /* The stmt pointer if this element corresponds to a statement. */
|
|
154 gimple stmt;
|
|
155
|
|
156 /* The hash value for RHS. */
|
|
157 hashval_t hash;
|
|
158
|
|
159 /* A unique stamp, typically the address of the hash
|
|
160 element itself, used in removing entries from the table. */
|
|
161 struct expr_hash_elt *stamp;
|
|
162 };
|
|
163
|
|
164 /* Stack of dest,src pairs that need to be restored during finalization.
|
|
165
|
|
166 A NULL entry is used to mark the end of pairs which need to be
|
|
167 restored during finalization of this block. */
|
|
168 static VEC(tree,heap) *const_and_copies_stack;
|
|
169
|
|
170 /* Track whether or not we have changed the control flow graph. */
|
|
171 static bool cfg_altered;
|
|
172
|
|
173 /* Bitmap of blocks that have had EH statements cleaned. We should
|
|
174 remove their dead edges eventually. */
|
|
175 static bitmap need_eh_cleanup;
|
|
176
|
|
177 /* Statistics for dominator optimizations. */
|
|
178 struct opt_stats_d
|
|
179 {
|
|
180 long num_stmts;
|
|
181 long num_exprs_considered;
|
|
182 long num_re;
|
|
183 long num_const_prop;
|
|
184 long num_copy_prop;
|
|
185 };
|
|
186
|
|
187 static struct opt_stats_d opt_stats;
|
|
188
|
|
189 /* Local functions. */
|
|
190 static void optimize_stmt (struct dom_walk_data *,
|
|
191 basic_block,
|
|
192 gimple_stmt_iterator);
|
|
193 static tree lookup_avail_expr (gimple, bool);
|
|
194 static hashval_t avail_expr_hash (const void *);
|
|
195 static hashval_t real_avail_expr_hash (const void *);
|
|
196 static int avail_expr_eq (const void *, const void *);
|
|
197 static void htab_statistics (FILE *, htab_t);
|
|
198 static void record_cond (struct cond_equivalence *);
|
|
199 static void record_const_or_copy (tree, tree);
|
|
200 static void record_equality (tree, tree);
|
|
201 static void record_equivalences_from_phis (basic_block);
|
|
202 static void record_equivalences_from_incoming_edge (basic_block);
|
|
203 static bool eliminate_redundant_computations (gimple_stmt_iterator *);
|
|
204 static void record_equivalences_from_stmt (gimple, int);
|
|
205 static void dom_thread_across_edge (struct dom_walk_data *, edge);
|
|
206 static void dom_opt_finalize_block (struct dom_walk_data *, basic_block);
|
|
207 static void dom_opt_initialize_block (struct dom_walk_data *, basic_block);
|
|
208 static void propagate_to_outgoing_edges (struct dom_walk_data *, basic_block);
|
|
209 static void remove_local_expressions_from_table (void);
|
|
210 static void restore_vars_to_original_value (void);
|
|
211 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
|
|
212
|
|
213
|
|
214 /* Given a statement STMT, initialize the hash table element pointed to
|
|
215 by ELEMENT. */
|
|
216
|
|
217 static void
|
|
218 initialize_hash_element (gimple stmt, tree lhs,
|
|
219 struct expr_hash_elt *element)
|
|
220 {
|
|
221 enum gimple_code code = gimple_code (stmt);
|
|
222 struct hashable_expr *expr = &element->expr;
|
|
223
|
|
224 if (code == GIMPLE_ASSIGN)
|
|
225 {
|
|
226 enum tree_code subcode = gimple_assign_rhs_code (stmt);
|
|
227
|
|
228 expr->type = NULL_TREE;
|
|
229
|
|
230 switch (get_gimple_rhs_class (subcode))
|
|
231 {
|
|
232 case GIMPLE_SINGLE_RHS:
|
|
233 expr->kind = EXPR_SINGLE;
|
|
234 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
|
|
235 break;
|
|
236 case GIMPLE_UNARY_RHS:
|
|
237 expr->kind = EXPR_UNARY;
|
|
238 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
|
|
239 expr->ops.unary.op = subcode;
|
|
240 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
|
|
241 break;
|
|
242 case GIMPLE_BINARY_RHS:
|
|
243 expr->kind = EXPR_BINARY;
|
|
244 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
|
|
245 expr->ops.binary.op = subcode;
|
|
246 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
|
|
247 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
|
|
248 break;
|
|
249 default:
|
|
250 gcc_unreachable ();
|
|
251 }
|
|
252 }
|
|
253 else if (code == GIMPLE_COND)
|
|
254 {
|
|
255 expr->type = boolean_type_node;
|
|
256 expr->kind = EXPR_BINARY;
|
|
257 expr->ops.binary.op = gimple_cond_code (stmt);
|
|
258 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
|
|
259 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
|
|
260 }
|
|
261 else if (code == GIMPLE_CALL)
|
|
262 {
|
|
263 size_t nargs = gimple_call_num_args (stmt);
|
|
264 size_t i;
|
|
265
|
|
266 gcc_assert (gimple_call_lhs (stmt));
|
|
267
|
|
268 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
|
|
269 expr->kind = EXPR_CALL;
|
|
270 expr->ops.call.fn = gimple_call_fn (stmt);
|
|
271
|
|
272 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
|
|
273 expr->ops.call.pure = true;
|
|
274 else
|
|
275 expr->ops.call.pure = false;
|
|
276
|
|
277 expr->ops.call.nargs = nargs;
|
|
278 expr->ops.call.args = (tree *) xcalloc (nargs, sizeof (tree));
|
|
279 for (i = 0; i < nargs; i++)
|
|
280 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
|
|
281 }
|
|
282 else if (code == GIMPLE_SWITCH)
|
|
283 {
|
|
284 expr->type = TREE_TYPE (gimple_switch_index (stmt));
|
|
285 expr->kind = EXPR_SINGLE;
|
|
286 expr->ops.single.rhs = gimple_switch_index (stmt);
|
|
287 }
|
|
288 else if (code == GIMPLE_GOTO)
|
|
289 {
|
|
290 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
|
|
291 expr->kind = EXPR_SINGLE;
|
|
292 expr->ops.single.rhs = gimple_goto_dest (stmt);
|
|
293 }
|
|
294 else
|
|
295 gcc_unreachable ();
|
|
296
|
|
297 element->lhs = lhs;
|
|
298 element->stmt = stmt;
|
|
299 element->hash = avail_expr_hash (element);
|
|
300 element->stamp = element;
|
|
301 }
|
|
302
|
|
303 /* Given a conditional expression COND as a tree, initialize
|
|
304 a hashable_expr expression EXPR. The conditional must be a
|
|
305 comparison or logical negation. A constant or a variable is
|
|
306 not permitted. */
|
|
307
|
|
308 static void
|
|
309 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
|
|
310 {
|
|
311 expr->type = boolean_type_node;
|
|
312
|
|
313 if (COMPARISON_CLASS_P (cond))
|
|
314 {
|
|
315 expr->kind = EXPR_BINARY;
|
|
316 expr->ops.binary.op = TREE_CODE (cond);
|
|
317 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
|
|
318 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
|
|
319 }
|
|
320 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
|
|
321 {
|
|
322 expr->kind = EXPR_UNARY;
|
|
323 expr->ops.unary.op = TRUTH_NOT_EXPR;
|
|
324 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
|
|
325 }
|
|
326 else
|
|
327 gcc_unreachable ();
|
|
328 }
|
|
329
|
|
330 /* Given a hashable_expr expression EXPR and an LHS,
|
|
331 initialize the hash table element pointed to by ELEMENT. */
|
|
332
|
|
333 static void
|
|
334 initialize_hash_element_from_expr (struct hashable_expr *expr,
|
|
335 tree lhs,
|
|
336 struct expr_hash_elt *element)
|
|
337 {
|
|
338 element->expr = *expr;
|
|
339 element->lhs = lhs;
|
|
340 element->stmt = NULL;
|
|
341 element->hash = avail_expr_hash (element);
|
|
342 element->stamp = element;
|
|
343 }
|
|
344
|
|
345 /* Compare two hashable_expr structures for equivalence.
|
|
346 They are considered equivalent when the the expressions
|
|
347 they denote must necessarily be equal. The logic is intended
|
|
348 to follow that of operand_equal_p in fold-const.c */
|
|
349
|
|
350 static bool
|
|
351 hashable_expr_equal_p (const struct hashable_expr *expr0,
|
|
352 const struct hashable_expr *expr1)
|
|
353 {
|
|
354 tree type0 = expr0->type;
|
|
355 tree type1 = expr1->type;
|
|
356
|
|
357 /* If either type is NULL, there is nothing to check. */
|
|
358 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
|
|
359 return false;
|
|
360
|
|
361 /* If both types don't have the same signedness, precision, and mode,
|
|
362 then we can't consider them equal. */
|
|
363 if (type0 != type1
|
|
364 && (TREE_CODE (type0) == ERROR_MARK
|
|
365 || TREE_CODE (type1) == ERROR_MARK
|
|
366 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
|
|
367 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
|
|
368 || TYPE_MODE (type0) != TYPE_MODE (type1)))
|
|
369 return false;
|
|
370
|
|
371 if (expr0->kind != expr1->kind)
|
|
372 return false;
|
|
373
|
|
374 switch (expr0->kind)
|
|
375 {
|
|
376 case EXPR_SINGLE:
|
|
377 return operand_equal_p (expr0->ops.single.rhs,
|
|
378 expr1->ops.single.rhs, 0);
|
|
379
|
|
380 case EXPR_UNARY:
|
|
381 if (expr0->ops.unary.op != expr1->ops.unary.op)
|
|
382 return false;
|
|
383
|
|
384 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
|
|
385 || expr0->ops.unary.op == NON_LVALUE_EXPR)
|
|
386 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
|
|
387 return false;
|
|
388
|
|
389 return operand_equal_p (expr0->ops.unary.opnd,
|
|
390 expr1->ops.unary.opnd, 0);
|
|
391
|
|
392 case EXPR_BINARY:
|
|
393 {
|
|
394 if (expr0->ops.binary.op != expr1->ops.binary.op)
|
|
395 return false;
|
|
396
|
|
397 if (operand_equal_p (expr0->ops.binary.opnd0,
|
|
398 expr1->ops.binary.opnd0, 0)
|
|
399 && operand_equal_p (expr0->ops.binary.opnd1,
|
|
400 expr1->ops.binary.opnd1, 0))
|
|
401 return true;
|
|
402
|
|
403 /* For commutative ops, allow the other order. */
|
|
404 return (commutative_tree_code (expr0->ops.binary.op)
|
|
405 && operand_equal_p (expr0->ops.binary.opnd0,
|
|
406 expr1->ops.binary.opnd1, 0)
|
|
407 && operand_equal_p (expr0->ops.binary.opnd1,
|
|
408 expr1->ops.binary.opnd0, 0));
|
|
409 }
|
|
410
|
|
411 case EXPR_CALL:
|
|
412 {
|
|
413 size_t i;
|
|
414
|
|
415 /* If the calls are to different functions, then they
|
|
416 clearly cannot be equal. */
|
|
417 if (! operand_equal_p (expr0->ops.call.fn,
|
|
418 expr1->ops.call.fn, 0))
|
|
419 return false;
|
|
420
|
|
421 if (! expr0->ops.call.pure)
|
|
422 return false;
|
|
423
|
|
424 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
|
|
425 return false;
|
|
426
|
|
427 for (i = 0; i < expr0->ops.call.nargs; i++)
|
|
428 if (! operand_equal_p (expr0->ops.call.args[i],
|
|
429 expr1->ops.call.args[i], 0))
|
|
430 return false;
|
|
431
|
|
432 return true;
|
|
433 }
|
|
434
|
|
435 default:
|
|
436 gcc_unreachable ();
|
|
437 }
|
|
438 }
|
|
439
|
|
440 /* Compute a hash value for a hashable_expr value EXPR and a
|
|
441 previously accumulated hash value VAL. If two hashable_expr
|
|
442 values compare equal with hashable_expr_equal_p, they must
|
|
443 hash to the same value, given an identical value of VAL.
|
|
444 The logic is intended to follow iterative_hash_expr in tree.c. */
|
|
445
|
|
446 static hashval_t
|
|
447 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
|
|
448 {
|
|
449 switch (expr->kind)
|
|
450 {
|
|
451 case EXPR_SINGLE:
|
|
452 val = iterative_hash_expr (expr->ops.single.rhs, val);
|
|
453 break;
|
|
454
|
|
455 case EXPR_UNARY:
|
|
456 val = iterative_hash_object (expr->ops.unary.op, val);
|
|
457
|
|
458 /* Make sure to include signedness in the hash computation.
|
|
459 Don't hash the type, that can lead to having nodes which
|
|
460 compare equal according to operand_equal_p, but which
|
|
461 have different hash codes. */
|
|
462 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
|
|
463 || expr->ops.unary.op == NON_LVALUE_EXPR)
|
|
464 val += TYPE_UNSIGNED (expr->type);
|
|
465
|
|
466 val = iterative_hash_expr (expr->ops.unary.opnd, val);
|
|
467 break;
|
|
468
|
|
469 case EXPR_BINARY:
|
|
470 val = iterative_hash_object (expr->ops.binary.op, val);
|
|
471 if (commutative_tree_code (expr->ops.binary.op))
|
|
472 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
|
|
473 expr->ops.binary.opnd1, val);
|
|
474 else
|
|
475 {
|
|
476 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
|
|
477 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
|
|
478 }
|
|
479 break;
|
|
480
|
|
481 case EXPR_CALL:
|
|
482 {
|
|
483 size_t i;
|
|
484 enum tree_code code = CALL_EXPR;
|
|
485
|
|
486 val = iterative_hash_object (code, val);
|
|
487 val = iterative_hash_expr (expr->ops.call.fn, val);
|
|
488 for (i = 0; i < expr->ops.call.nargs; i++)
|
|
489 val = iterative_hash_expr (expr->ops.call.args[i], val);
|
|
490 }
|
|
491 break;
|
|
492
|
|
493 default:
|
|
494 gcc_unreachable ();
|
|
495 }
|
|
496
|
|
497 return val;
|
|
498 }
|
|
499
|
|
500 /* Print a diagnostic dump of an expression hash table entry. */
|
|
501
|
|
502 static void
|
|
503 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
|
|
504 {
|
|
505 if (element->stmt)
|
|
506 fprintf (stream, "STMT ");
|
|
507 else
|
|
508 fprintf (stream, "COND ");
|
|
509
|
|
510 if (element->lhs)
|
|
511 {
|
|
512 print_generic_expr (stream, element->lhs, 0);
|
|
513 fprintf (stream, " = ");
|
|
514 }
|
|
515
|
|
516 switch (element->expr.kind)
|
|
517 {
|
|
518 case EXPR_SINGLE:
|
|
519 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
|
|
520 break;
|
|
521
|
|
522 case EXPR_UNARY:
|
|
523 fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
|
|
524 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
|
|
525 break;
|
|
526
|
|
527 case EXPR_BINARY:
|
|
528 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
|
|
529 fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
|
|
530 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
|
|
531 break;
|
|
532
|
|
533 case EXPR_CALL:
|
|
534 {
|
|
535 size_t i;
|
|
536 size_t nargs = element->expr.ops.call.nargs;
|
|
537
|
|
538 print_generic_expr (stream, element->expr.ops.call.fn, 0);
|
|
539 fprintf (stream, " (");
|
|
540 for (i = 0; i < nargs; i++)
|
|
541 {
|
|
542 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
|
|
543 if (i + 1 < nargs)
|
|
544 fprintf (stream, ", ");
|
|
545 }
|
|
546 fprintf (stream, ")");
|
|
547 }
|
|
548 break;
|
|
549 }
|
|
550 fprintf (stream, "\n");
|
|
551
|
|
552 if (element->stmt)
|
|
553 {
|
|
554 fprintf (stream, " ");
|
|
555 print_gimple_stmt (stream, element->stmt, 0, 0);
|
|
556 }
|
|
557 }
|
|
558
|
|
559 /* Delete an expr_hash_elt and reclaim its storage. */
|
|
560
|
|
561 static void
|
|
562 free_expr_hash_elt (void *elt)
|
|
563 {
|
|
564 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
|
|
565
|
|
566 if (element->expr.kind == EXPR_CALL)
|
|
567 free (element->expr.ops.call.args);
|
|
568
|
|
569 free (element);
|
|
570 }
|
|
571
|
|
572 /* Allocate an EDGE_INFO for edge E and attach it to E.
|
|
573 Return the new EDGE_INFO structure. */
|
|
574
|
|
575 static struct edge_info *
|
|
576 allocate_edge_info (edge e)
|
|
577 {
|
|
578 struct edge_info *edge_info;
|
|
579
|
|
580 edge_info = XCNEW (struct edge_info);
|
|
581
|
|
582 e->aux = edge_info;
|
|
583 return edge_info;
|
|
584 }
|
|
585
|
|
586 /* Free all EDGE_INFO structures associated with edges in the CFG.
|
|
587 If a particular edge can be threaded, copy the redirection
|
|
588 target from the EDGE_INFO structure into the edge's AUX field
|
|
589 as required by code to update the CFG and SSA graph for
|
|
590 jump threading. */
|
|
591
|
|
592 static void
|
|
593 free_all_edge_infos (void)
|
|
594 {
|
|
595 basic_block bb;
|
|
596 edge_iterator ei;
|
|
597 edge e;
|
|
598
|
|
599 FOR_EACH_BB (bb)
|
|
600 {
|
|
601 FOR_EACH_EDGE (e, ei, bb->preds)
|
|
602 {
|
|
603 struct edge_info *edge_info = (struct edge_info *) e->aux;
|
|
604
|
|
605 if (edge_info)
|
|
606 {
|
|
607 if (edge_info->cond_equivalences)
|
|
608 free (edge_info->cond_equivalences);
|
|
609 free (edge_info);
|
|
610 e->aux = NULL;
|
|
611 }
|
|
612 }
|
|
613 }
|
|
614 }
|
|
615
|
|
616 /* Jump threading, redundancy elimination and const/copy propagation.
|
|
617
|
|
618 This pass may expose new symbols that need to be renamed into SSA. For
|
|
619 every new symbol exposed, its corresponding bit will be set in
|
|
620 VARS_TO_RENAME. */
|
|
621
|
|
622 static unsigned int
|
|
623 tree_ssa_dominator_optimize (void)
|
|
624 {
|
|
625 struct dom_walk_data walk_data;
|
|
626 unsigned int i;
|
|
627
|
|
628 memset (&opt_stats, 0, sizeof (opt_stats));
|
|
629
|
|
630 /* Create our hash tables. */
|
|
631 avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
|
|
632 avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
|
|
633 const_and_copies_stack = VEC_alloc (tree, heap, 20);
|
|
634 stmts_to_rescan = VEC_alloc (gimple_p, heap, 20);
|
|
635 need_eh_cleanup = BITMAP_ALLOC (NULL);
|
|
636
|
|
637 /* Setup callbacks for the generic dominator tree walker. */
|
|
638 walk_data.walk_stmts_backward = false;
|
|
639 walk_data.dom_direction = CDI_DOMINATORS;
|
|
640 walk_data.initialize_block_local_data = NULL;
|
|
641 walk_data.before_dom_children_before_stmts = dom_opt_initialize_block;
|
|
642 walk_data.before_dom_children_walk_stmts = optimize_stmt;
|
|
643 walk_data.before_dom_children_after_stmts = propagate_to_outgoing_edges;
|
|
644 walk_data.after_dom_children_before_stmts = NULL;
|
|
645 walk_data.after_dom_children_walk_stmts = NULL;
|
|
646 walk_data.after_dom_children_after_stmts = dom_opt_finalize_block;
|
|
647 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
|
|
648 When we attach more stuff we'll need to fill this out with a real
|
|
649 structure. */
|
|
650 walk_data.global_data = NULL;
|
|
651 walk_data.block_local_data_size = 0;
|
|
652 walk_data.interesting_blocks = NULL;
|
|
653
|
|
654 /* Now initialize the dominator walker. */
|
|
655 init_walk_dominator_tree (&walk_data);
|
|
656
|
|
657 calculate_dominance_info (CDI_DOMINATORS);
|
|
658 cfg_altered = false;
|
|
659
|
|
660 /* We need to know loop structures in order to avoid destroying them
|
|
661 in jump threading. Note that we still can e.g. thread through loop
|
|
662 headers to an exit edge, or through loop header to the loop body, assuming
|
|
663 that we update the loop info. */
|
|
664 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
|
|
665
|
|
666 /* We need accurate information regarding back edges in the CFG
|
|
667 for jump threading; this may include back edges that are not part of
|
|
668 a single loop. */
|
|
669 mark_dfs_back_edges ();
|
|
670
|
|
671 /* Recursively walk the dominator tree optimizing statements. */
|
|
672 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
|
|
673
|
|
674 {
|
|
675 gimple_stmt_iterator gsi;
|
|
676 basic_block bb;
|
|
677 FOR_EACH_BB (bb)
|
|
678 {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
|
|
679 update_stmt_if_modified (gsi_stmt (gsi));
|
|
680 }
|
|
681 }
|
|
682
|
|
683 /* If we exposed any new variables, go ahead and put them into
|
|
684 SSA form now, before we handle jump threading. This simplifies
|
|
685 interactions between rewriting of _DECL nodes into SSA form
|
|
686 and rewriting SSA_NAME nodes into SSA form after block
|
|
687 duplication and CFG manipulation. */
|
|
688 update_ssa (TODO_update_ssa);
|
|
689
|
|
690 free_all_edge_infos ();
|
|
691
|
|
692 /* Thread jumps, creating duplicate blocks as needed. */
|
|
693 cfg_altered |= thread_through_all_blocks (first_pass_instance);
|
|
694
|
|
695 if (cfg_altered)
|
|
696 free_dominance_info (CDI_DOMINATORS);
|
|
697
|
|
698 /* Removal of statements may make some EH edges dead. Purge
|
|
699 such edges from the CFG as needed. */
|
|
700 if (!bitmap_empty_p (need_eh_cleanup))
|
|
701 {
|
|
702 unsigned i;
|
|
703 bitmap_iterator bi;
|
|
704
|
|
705 /* Jump threading may have created forwarder blocks from blocks
|
|
706 needing EH cleanup; the new successor of these blocks, which
|
|
707 has inherited from the original block, needs the cleanup. */
|
|
708 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
|
|
709 {
|
|
710 basic_block bb = BASIC_BLOCK (i);
|
|
711 if (single_succ_p (bb) == 1
|
|
712 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
|
|
713 {
|
|
714 bitmap_clear_bit (need_eh_cleanup, i);
|
|
715 bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
|
|
716 }
|
|
717 }
|
|
718
|
|
719 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
|
|
720 bitmap_zero (need_eh_cleanup);
|
|
721 }
|
|
722
|
|
723 /* Finally, remove everything except invariants in SSA_NAME_VALUE.
|
|
724
|
|
725 Long term we will be able to let everything in SSA_NAME_VALUE
|
|
726 persist. However, for now, we know this is the safe thing to do. */
|
|
727 for (i = 0; i < num_ssa_names; i++)
|
|
728 {
|
|
729 tree name = ssa_name (i);
|
|
730 tree value;
|
|
731
|
|
732 if (!name)
|
|
733 continue;
|
|
734
|
|
735 value = SSA_NAME_VALUE (name);
|
|
736 if (value && !is_gimple_min_invariant (value))
|
|
737 SSA_NAME_VALUE (name) = NULL;
|
|
738 }
|
|
739
|
|
740 statistics_counter_event (cfun, "Redundant expressions eliminated",
|
|
741 opt_stats.num_re);
|
|
742 statistics_counter_event (cfun, "Constants propagated",
|
|
743 opt_stats.num_const_prop);
|
|
744 statistics_counter_event (cfun, "Copies propagated",
|
|
745 opt_stats.num_copy_prop);
|
|
746
|
|
747 /* Debugging dumps. */
|
|
748 if (dump_file && (dump_flags & TDF_STATS))
|
|
749 dump_dominator_optimization_stats (dump_file);
|
|
750
|
|
751 loop_optimizer_finalize ();
|
|
752
|
|
753 /* Delete our main hashtable. */
|
|
754 htab_delete (avail_exprs);
|
|
755
|
|
756 /* And finalize the dominator walker. */
|
|
757 fini_walk_dominator_tree (&walk_data);
|
|
758
|
|
759 /* Free asserted bitmaps and stacks. */
|
|
760 BITMAP_FREE (need_eh_cleanup);
|
|
761
|
|
762 VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
|
|
763 VEC_free (tree, heap, const_and_copies_stack);
|
|
764 VEC_free (gimple_p, heap, stmts_to_rescan);
|
|
765
|
|
766 return 0;
|
|
767 }
|
|
768
|
|
769 static bool
|
|
770 gate_dominator (void)
|
|
771 {
|
|
772 return flag_tree_dom != 0;
|
|
773 }
|
|
774
|
|
775 struct gimple_opt_pass pass_dominator =
|
|
776 {
|
|
777 {
|
|
778 GIMPLE_PASS,
|
|
779 "dom", /* name */
|
|
780 gate_dominator, /* gate */
|
|
781 tree_ssa_dominator_optimize, /* execute */
|
|
782 NULL, /* sub */
|
|
783 NULL, /* next */
|
|
784 0, /* static_pass_number */
|
|
785 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
|
|
786 PROP_cfg | PROP_ssa | PROP_alias, /* properties_required */
|
|
787 0, /* properties_provided */
|
|
788 0, /* properties_destroyed */
|
|
789 0, /* todo_flags_start */
|
|
790 TODO_dump_func
|
|
791 | TODO_update_ssa
|
|
792 | TODO_cleanup_cfg
|
|
793 | TODO_verify_ssa /* todo_flags_finish */
|
|
794 }
|
|
795 };
|
|
796
|
|
797
|
|
798 /* Given a conditional statement CONDSTMT, convert the
|
|
799 condition to a canonical form. */
|
|
800
|
|
801 static void
|
|
802 canonicalize_comparison (gimple condstmt)
|
|
803 {
|
|
804 tree op0;
|
|
805 tree op1;
|
|
806 enum tree_code code;
|
|
807
|
|
808 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
|
|
809
|
|
810 op0 = gimple_cond_lhs (condstmt);
|
|
811 op1 = gimple_cond_rhs (condstmt);
|
|
812
|
|
813 code = gimple_cond_code (condstmt);
|
|
814
|
|
815 /* If it would be profitable to swap the operands, then do so to
|
|
816 canonicalize the statement, enabling better optimization.
|
|
817
|
|
818 By placing canonicalization of such expressions here we
|
|
819 transparently keep statements in canonical form, even
|
|
820 when the statement is modified. */
|
|
821 if (tree_swap_operands_p (op0, op1, false))
|
|
822 {
|
|
823 /* For relationals we need to swap the operands
|
|
824 and change the code. */
|
|
825 if (code == LT_EXPR
|
|
826 || code == GT_EXPR
|
|
827 || code == LE_EXPR
|
|
828 || code == GE_EXPR)
|
|
829 {
|
|
830 code = swap_tree_comparison (code);
|
|
831
|
|
832 gimple_cond_set_code (condstmt, code);
|
|
833 gimple_cond_set_lhs (condstmt, op1);
|
|
834 gimple_cond_set_rhs (condstmt, op0);
|
|
835
|
|
836 update_stmt (condstmt);
|
|
837 }
|
|
838 }
|
|
839 }
|
|
840
|
|
841 /* Initialize local stacks for this optimizer and record equivalences
|
|
842 upon entry to BB. Equivalences can come from the edge traversed to
|
|
843 reach BB or they may come from PHI nodes at the start of BB. */
|
|
844
|
|
845 static void
|
|
846 dom_opt_initialize_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
|
|
847 basic_block bb)
|
|
848 {
|
|
849 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
850 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
|
|
851
|
|
852 /* Push a marker on the stacks of local information so that we know how
|
|
853 far to unwind when we finalize this block. */
|
|
854 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
|
|
855 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
|
|
856
|
|
857 record_equivalences_from_incoming_edge (bb);
|
|
858
|
|
859 /* PHI nodes can create equivalences too. */
|
|
860 record_equivalences_from_phis (bb);
|
|
861 }
|
|
862
|
|
863 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
|
|
864 LIMIT entries left in LOCALs. */
|
|
865
|
|
866 static void
|
|
867 remove_local_expressions_from_table (void)
|
|
868 {
|
|
869 /* Remove all the expressions made available in this block. */
|
|
870 while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
|
|
871 {
|
|
872 struct expr_hash_elt element;
|
|
873 expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
|
|
874
|
|
875 if (victim == NULL)
|
|
876 break;
|
|
877
|
|
878 element = *victim;
|
|
879
|
|
880 /* This must precede the actual removal from the hash table,
|
|
881 as ELEMENT and the table entry may share a call argument
|
|
882 vector which will be freed during removal. */
|
|
883 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
884 {
|
|
885 fprintf (dump_file, "<<<< ");
|
|
886 print_expr_hash_elt (dump_file, &element);
|
|
887 }
|
|
888
|
|
889 htab_remove_elt_with_hash (avail_exprs, &element, element.hash);
|
|
890 }
|
|
891 }
|
|
892
|
|
893 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
|
|
894 CONST_AND_COPIES to its original state, stopping when we hit a
|
|
895 NULL marker. */
|
|
896
|
|
897 static void
|
|
898 restore_vars_to_original_value (void)
|
|
899 {
|
|
900 while (VEC_length (tree, const_and_copies_stack) > 0)
|
|
901 {
|
|
902 tree prev_value, dest;
|
|
903
|
|
904 dest = VEC_pop (tree, const_and_copies_stack);
|
|
905
|
|
906 if (dest == NULL)
|
|
907 break;
|
|
908
|
|
909 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
910 {
|
|
911 fprintf (dump_file, "<<<< COPY ");
|
|
912 print_generic_expr (dump_file, dest, 0);
|
|
913 fprintf (dump_file, " = ");
|
|
914 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
|
|
915 fprintf (dump_file, "\n");
|
|
916 }
|
|
917
|
|
918 prev_value = VEC_pop (tree, const_and_copies_stack);
|
|
919 SSA_NAME_VALUE (dest) = prev_value;
|
|
920 }
|
|
921 }
|
|
922
|
|
923 /* A trivial wrapper so that we can present the generic jump
|
|
924 threading code with a simple API for simplifying statements. */
|
|
925 static tree
|
|
926 simplify_stmt_for_jump_threading (gimple stmt,
|
|
927 gimple within_stmt ATTRIBUTE_UNUSED)
|
|
928 {
|
|
929 return lookup_avail_expr (stmt, false);
|
|
930 }
|
|
931
|
|
932 /* Wrapper for common code to attempt to thread an edge. For example,
|
|
933 it handles lazily building the dummy condition and the bookkeeping
|
|
934 when jump threading is successful. */
|
|
935
|
|
936 static void
|
|
937 dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
|
|
938 {
|
|
939 if (! walk_data->global_data)
|
|
940 {
|
|
941 gimple dummy_cond =
|
|
942 gimple_build_cond (NE_EXPR,
|
|
943 integer_zero_node, integer_zero_node,
|
|
944 NULL, NULL);
|
|
945 walk_data->global_data = dummy_cond;
|
|
946 }
|
|
947
|
|
948 thread_across_edge ((gimple) walk_data->global_data, e, false,
|
|
949 &const_and_copies_stack,
|
|
950 simplify_stmt_for_jump_threading);
|
|
951 }
|
|
952
|
|
953 /* We have finished processing the dominator children of BB, perform
|
|
954 any finalization actions in preparation for leaving this node in
|
|
955 the dominator tree. */
|
|
956
|
|
957 static void
|
|
958 dom_opt_finalize_block (struct dom_walk_data *walk_data, basic_block bb)
|
|
959 {
|
|
960 gimple last;
|
|
961
|
|
962 /* If we have an outgoing edge to a block with multiple incoming and
|
|
963 outgoing edges, then we may be able to thread the edge, i.e., we
|
|
964 may be able to statically determine which of the outgoing edges
|
|
965 will be traversed when the incoming edge from BB is traversed. */
|
|
966 if (single_succ_p (bb)
|
|
967 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
|
|
968 && potentially_threadable_block (single_succ (bb)))
|
|
969 {
|
|
970 dom_thread_across_edge (walk_data, single_succ_edge (bb));
|
|
971 }
|
|
972 else if ((last = last_stmt (bb))
|
|
973 && gimple_code (last) == GIMPLE_COND
|
|
974 && EDGE_COUNT (bb->succs) == 2
|
|
975 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
|
|
976 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
|
|
977 {
|
|
978 edge true_edge, false_edge;
|
|
979
|
|
980 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
|
|
981
|
|
982 /* Only try to thread the edge if it reaches a target block with
|
|
983 more than one predecessor and more than one successor. */
|
|
984 if (potentially_threadable_block (true_edge->dest))
|
|
985 {
|
|
986 struct edge_info *edge_info;
|
|
987 unsigned int i;
|
|
988
|
|
989 /* Push a marker onto the available expression stack so that we
|
|
990 unwind any expressions related to the TRUE arm before processing
|
|
991 the false arm below. */
|
|
992 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
|
|
993 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
|
|
994
|
|
995 edge_info = (struct edge_info *) true_edge->aux;
|
|
996
|
|
997 /* If we have info associated with this edge, record it into
|
|
998 our equivalence tables. */
|
|
999 if (edge_info)
|
|
1000 {
|
|
1001 struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
|
|
1002 tree lhs = edge_info->lhs;
|
|
1003 tree rhs = edge_info->rhs;
|
|
1004
|
|
1005 /* If we have a simple NAME = VALUE equivalence, record it. */
|
|
1006 if (lhs && TREE_CODE (lhs) == SSA_NAME)
|
|
1007 record_const_or_copy (lhs, rhs);
|
|
1008
|
|
1009 /* If we have 0 = COND or 1 = COND equivalences, record them
|
|
1010 into our expression hash tables. */
|
|
1011 if (cond_equivalences)
|
|
1012 for (i = 0; i < edge_info->max_cond_equivalences; i++)
|
|
1013 record_cond (&cond_equivalences[i]);
|
|
1014 }
|
|
1015
|
|
1016 dom_thread_across_edge (walk_data, true_edge);
|
|
1017
|
|
1018 /* And restore the various tables to their state before
|
|
1019 we threaded this edge. */
|
|
1020 remove_local_expressions_from_table ();
|
|
1021 }
|
|
1022
|
|
1023 /* Similarly for the ELSE arm. */
|
|
1024 if (potentially_threadable_block (false_edge->dest))
|
|
1025 {
|
|
1026 struct edge_info *edge_info;
|
|
1027 unsigned int i;
|
|
1028
|
|
1029 VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
|
|
1030 edge_info = (struct edge_info *) false_edge->aux;
|
|
1031
|
|
1032 /* If we have info associated with this edge, record it into
|
|
1033 our equivalence tables. */
|
|
1034 if (edge_info)
|
|
1035 {
|
|
1036 struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
|
|
1037 tree lhs = edge_info->lhs;
|
|
1038 tree rhs = edge_info->rhs;
|
|
1039
|
|
1040 /* If we have a simple NAME = VALUE equivalence, record it. */
|
|
1041 if (lhs && TREE_CODE (lhs) == SSA_NAME)
|
|
1042 record_const_or_copy (lhs, rhs);
|
|
1043
|
|
1044 /* If we have 0 = COND or 1 = COND equivalences, record them
|
|
1045 into our expression hash tables. */
|
|
1046 if (cond_equivalences)
|
|
1047 for (i = 0; i < edge_info->max_cond_equivalences; i++)
|
|
1048 record_cond (&cond_equivalences[i]);
|
|
1049 }
|
|
1050
|
|
1051 /* Now thread the edge. */
|
|
1052 dom_thread_across_edge (walk_data, false_edge);
|
|
1053
|
|
1054 /* No need to remove local expressions from our tables
|
|
1055 or restore vars to their original value as that will
|
|
1056 be done immediately below. */
|
|
1057 }
|
|
1058 }
|
|
1059
|
|
1060 remove_local_expressions_from_table ();
|
|
1061 restore_vars_to_original_value ();
|
|
1062
|
|
1063 /* If we queued any statements to rescan in this block, then
|
|
1064 go ahead and rescan them now. */
|
|
1065 while (VEC_length (gimple_p, stmts_to_rescan) > 0)
|
|
1066 {
|
|
1067 gimple *stmt_p = VEC_last (gimple_p, stmts_to_rescan);
|
|
1068 gimple stmt = *stmt_p;
|
|
1069 basic_block stmt_bb = gimple_bb (stmt);
|
|
1070
|
|
1071 if (stmt_bb != bb)
|
|
1072 break;
|
|
1073
|
|
1074 VEC_pop (gimple_p, stmts_to_rescan);
|
|
1075 pop_stmt_changes (stmt_p);
|
|
1076 }
|
|
1077 }
|
|
1078
|
|
1079 /* PHI nodes can create equivalences too.
|
|
1080
|
|
1081 Ignoring any alternatives which are the same as the result, if
|
|
1082 all the alternatives are equal, then the PHI node creates an
|
|
1083 equivalence. */
|
|
1084
|
|
1085 static void
|
|
1086 record_equivalences_from_phis (basic_block bb)
|
|
1087 {
|
|
1088 gimple_stmt_iterator gsi;
|
|
1089
|
|
1090 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
|
|
1091 {
|
|
1092 gimple phi = gsi_stmt (gsi);
|
|
1093
|
|
1094 tree lhs = gimple_phi_result (phi);
|
|
1095 tree rhs = NULL;
|
|
1096 size_t i;
|
|
1097
|
|
1098 for (i = 0; i < gimple_phi_num_args (phi); i++)
|
|
1099 {
|
|
1100 tree t = gimple_phi_arg_def (phi, i);
|
|
1101
|
|
1102 /* Ignore alternatives which are the same as our LHS. Since
|
|
1103 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
|
|
1104 can simply compare pointers. */
|
|
1105 if (lhs == t)
|
|
1106 continue;
|
|
1107
|
|
1108 /* If we have not processed an alternative yet, then set
|
|
1109 RHS to this alternative. */
|
|
1110 if (rhs == NULL)
|
|
1111 rhs = t;
|
|
1112 /* If we have processed an alternative (stored in RHS), then
|
|
1113 see if it is equal to this one. If it isn't, then stop
|
|
1114 the search. */
|
|
1115 else if (! operand_equal_for_phi_arg_p (rhs, t))
|
|
1116 break;
|
|
1117 }
|
|
1118
|
|
1119 /* If we had no interesting alternatives, then all the RHS alternatives
|
|
1120 must have been the same as LHS. */
|
|
1121 if (!rhs)
|
|
1122 rhs = lhs;
|
|
1123
|
|
1124 /* If we managed to iterate through each PHI alternative without
|
|
1125 breaking out of the loop, then we have a PHI which may create
|
|
1126 a useful equivalence. We do not need to record unwind data for
|
|
1127 this, since this is a true assignment and not an equivalence
|
|
1128 inferred from a comparison. All uses of this ssa name are dominated
|
|
1129 by this assignment, so unwinding just costs time and space. */
|
|
1130 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
|
|
1131 SSA_NAME_VALUE (lhs) = rhs;
|
|
1132 }
|
|
1133 }
|
|
1134
|
|
1135 /* Ignoring loop backedges, if BB has precisely one incoming edge then
|
|
1136 return that edge. Otherwise return NULL. */
|
|
1137 static edge
|
|
1138 single_incoming_edge_ignoring_loop_edges (basic_block bb)
|
|
1139 {
|
|
1140 edge retval = NULL;
|
|
1141 edge e;
|
|
1142 edge_iterator ei;
|
|
1143
|
|
1144 FOR_EACH_EDGE (e, ei, bb->preds)
|
|
1145 {
|
|
1146 /* A loop back edge can be identified by the destination of
|
|
1147 the edge dominating the source of the edge. */
|
|
1148 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
|
|
1149 continue;
|
|
1150
|
|
1151 /* If we have already seen a non-loop edge, then we must have
|
|
1152 multiple incoming non-loop edges and thus we return NULL. */
|
|
1153 if (retval)
|
|
1154 return NULL;
|
|
1155
|
|
1156 /* This is the first non-loop incoming edge we have found. Record
|
|
1157 it. */
|
|
1158 retval = e;
|
|
1159 }
|
|
1160
|
|
1161 return retval;
|
|
1162 }
|
|
1163
|
|
1164 /* Record any equivalences created by the incoming edge to BB. If BB
|
|
1165 has more than one incoming edge, then no equivalence is created. */
|
|
1166
|
|
1167 static void
|
|
1168 record_equivalences_from_incoming_edge (basic_block bb)
|
|
1169 {
|
|
1170 edge e;
|
|
1171 basic_block parent;
|
|
1172 struct edge_info *edge_info;
|
|
1173
|
|
1174 /* If our parent block ended with a control statement, then we may be
|
|
1175 able to record some equivalences based on which outgoing edge from
|
|
1176 the parent was followed. */
|
|
1177 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
|
|
1178
|
|
1179 e = single_incoming_edge_ignoring_loop_edges (bb);
|
|
1180
|
|
1181 /* If we had a single incoming edge from our parent block, then enter
|
|
1182 any data associated with the edge into our tables. */
|
|
1183 if (e && e->src == parent)
|
|
1184 {
|
|
1185 unsigned int i;
|
|
1186
|
|
1187 edge_info = (struct edge_info *) e->aux;
|
|
1188
|
|
1189 if (edge_info)
|
|
1190 {
|
|
1191 tree lhs = edge_info->lhs;
|
|
1192 tree rhs = edge_info->rhs;
|
|
1193 struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
|
|
1194
|
|
1195 if (lhs)
|
|
1196 record_equality (lhs, rhs);
|
|
1197
|
|
1198 if (cond_equivalences)
|
|
1199 for (i = 0; i < edge_info->max_cond_equivalences; i++)
|
|
1200 record_cond (&cond_equivalences[i]);
|
|
1201 }
|
|
1202 }
|
|
1203 }
|
|
1204
|
|
1205 /* Dump SSA statistics on FILE. */
|
|
1206
|
|
1207 void
|
|
1208 dump_dominator_optimization_stats (FILE *file)
|
|
1209 {
|
|
1210 fprintf (file, "Total number of statements: %6ld\n\n",
|
|
1211 opt_stats.num_stmts);
|
|
1212 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
|
|
1213 opt_stats.num_exprs_considered);
|
|
1214
|
|
1215 fprintf (file, "\nHash table statistics:\n");
|
|
1216
|
|
1217 fprintf (file, " avail_exprs: ");
|
|
1218 htab_statistics (file, avail_exprs);
|
|
1219 }
|
|
1220
|
|
1221
|
|
1222 /* Dump SSA statistics on stderr. */
|
|
1223
|
|
1224 void
|
|
1225 debug_dominator_optimization_stats (void)
|
|
1226 {
|
|
1227 dump_dominator_optimization_stats (stderr);
|
|
1228 }
|
|
1229
|
|
1230
|
|
1231 /* Dump statistics for the hash table HTAB. */
|
|
1232
|
|
1233 static void
|
|
1234 htab_statistics (FILE *file, htab_t htab)
|
|
1235 {
|
|
1236 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
|
|
1237 (long) htab_size (htab),
|
|
1238 (long) htab_elements (htab),
|
|
1239 htab_collisions (htab));
|
|
1240 }
|
|
1241
|
|
1242
|
|
1243 /* Enter condition equivalence into the expression hash table.
|
|
1244 This indicates that a conditional expression has a known
|
|
1245 boolean value. */
|
|
1246
|
|
1247 static void
|
|
1248 record_cond (struct cond_equivalence *p)
|
|
1249 {
|
|
1250 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
|
|
1251 void **slot;
|
|
1252
|
|
1253 initialize_hash_element_from_expr (&p->cond, p->value, element);
|
|
1254
|
|
1255 slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
|
|
1256 element->hash, INSERT);
|
|
1257 if (*slot == NULL)
|
|
1258 {
|
|
1259 *slot = (void *) element;
|
|
1260
|
|
1261 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
1262 {
|
|
1263 fprintf (dump_file, "1>>> ");
|
|
1264 print_expr_hash_elt (dump_file, element);
|
|
1265 }
|
|
1266
|
|
1267 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
|
|
1268 }
|
|
1269 else
|
|
1270 free (element);
|
|
1271 }
|
|
1272
|
|
1273 /* Build a cond_equivalence record indicating that the comparison
|
|
1274 CODE holds between operands OP0 and OP1. */
|
|
1275
|
|
1276 static void
|
|
1277 build_and_record_new_cond (enum tree_code code,
|
|
1278 tree op0, tree op1,
|
|
1279 struct cond_equivalence *p)
|
|
1280 {
|
|
1281 struct hashable_expr *cond = &p->cond;
|
|
1282
|
|
1283 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
|
|
1284
|
|
1285 cond->type = boolean_type_node;
|
|
1286 cond->kind = EXPR_BINARY;
|
|
1287 cond->ops.binary.op = code;
|
|
1288 cond->ops.binary.opnd0 = op0;
|
|
1289 cond->ops.binary.opnd1 = op1;
|
|
1290
|
|
1291 p->value = boolean_true_node;
|
|
1292 }
|
|
1293
|
|
1294 /* Record that COND is true and INVERTED is false into the edge information
|
|
1295 structure. Also record that any conditions dominated by COND are true
|
|
1296 as well.
|
|
1297
|
|
1298 For example, if a < b is true, then a <= b must also be true. */
|
|
1299
|
|
1300 static void
|
|
1301 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
|
|
1302 {
|
|
1303 tree op0, op1;
|
|
1304
|
|
1305 if (!COMPARISON_CLASS_P (cond))
|
|
1306 return;
|
|
1307
|
|
1308 op0 = TREE_OPERAND (cond, 0);
|
|
1309 op1 = TREE_OPERAND (cond, 1);
|
|
1310
|
|
1311 switch (TREE_CODE (cond))
|
|
1312 {
|
|
1313 case LT_EXPR:
|
|
1314 case GT_EXPR:
|
|
1315 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
|
|
1316 {
|
|
1317 edge_info->max_cond_equivalences = 6;
|
|
1318 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 6);
|
|
1319 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
|
|
1320 &edge_info->cond_equivalences[4]);
|
|
1321 build_and_record_new_cond (LTGT_EXPR, op0, op1,
|
|
1322 &edge_info->cond_equivalences[5]);
|
|
1323 }
|
|
1324 else
|
|
1325 {
|
|
1326 edge_info->max_cond_equivalences = 4;
|
|
1327 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
|
|
1328 }
|
|
1329
|
|
1330 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
|
|
1331 ? LE_EXPR : GE_EXPR),
|
|
1332 op0, op1, &edge_info->cond_equivalences[2]);
|
|
1333 build_and_record_new_cond (NE_EXPR, op0, op1,
|
|
1334 &edge_info->cond_equivalences[3]);
|
|
1335 break;
|
|
1336
|
|
1337 case GE_EXPR:
|
|
1338 case LE_EXPR:
|
|
1339 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
|
|
1340 {
|
|
1341 edge_info->max_cond_equivalences = 3;
|
|
1342 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 3);
|
|
1343 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
|
|
1344 &edge_info->cond_equivalences[2]);
|
|
1345 }
|
|
1346 else
|
|
1347 {
|
|
1348 edge_info->max_cond_equivalences = 2;
|
|
1349 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
|
|
1350 }
|
|
1351 break;
|
|
1352
|
|
1353 case EQ_EXPR:
|
|
1354 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
|
|
1355 {
|
|
1356 edge_info->max_cond_equivalences = 5;
|
|
1357 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 5);
|
|
1358 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
|
|
1359 &edge_info->cond_equivalences[4]);
|
|
1360 }
|
|
1361 else
|
|
1362 {
|
|
1363 edge_info->max_cond_equivalences = 4;
|
|
1364 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
|
|
1365 }
|
|
1366 build_and_record_new_cond (LE_EXPR, op0, op1,
|
|
1367 &edge_info->cond_equivalences[2]);
|
|
1368 build_and_record_new_cond (GE_EXPR, op0, op1,
|
|
1369 &edge_info->cond_equivalences[3]);
|
|
1370 break;
|
|
1371
|
|
1372 case UNORDERED_EXPR:
|
|
1373 edge_info->max_cond_equivalences = 8;
|
|
1374 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 8);
|
|
1375 build_and_record_new_cond (NE_EXPR, op0, op1,
|
|
1376 &edge_info->cond_equivalences[2]);
|
|
1377 build_and_record_new_cond (UNLE_EXPR, op0, op1,
|
|
1378 &edge_info->cond_equivalences[3]);
|
|
1379 build_and_record_new_cond (UNGE_EXPR, op0, op1,
|
|
1380 &edge_info->cond_equivalences[4]);
|
|
1381 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
|
|
1382 &edge_info->cond_equivalences[5]);
|
|
1383 build_and_record_new_cond (UNLT_EXPR, op0, op1,
|
|
1384 &edge_info->cond_equivalences[6]);
|
|
1385 build_and_record_new_cond (UNGT_EXPR, op0, op1,
|
|
1386 &edge_info->cond_equivalences[7]);
|
|
1387 break;
|
|
1388
|
|
1389 case UNLT_EXPR:
|
|
1390 case UNGT_EXPR:
|
|
1391 edge_info->max_cond_equivalences = 4;
|
|
1392 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
|
|
1393 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
|
|
1394 ? UNLE_EXPR : UNGE_EXPR),
|
|
1395 op0, op1, &edge_info->cond_equivalences[2]);
|
|
1396 build_and_record_new_cond (NE_EXPR, op0, op1,
|
|
1397 &edge_info->cond_equivalences[3]);
|
|
1398 break;
|
|
1399
|
|
1400 case UNEQ_EXPR:
|
|
1401 edge_info->max_cond_equivalences = 4;
|
|
1402 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
|
|
1403 build_and_record_new_cond (UNLE_EXPR, op0, op1,
|
|
1404 &edge_info->cond_equivalences[2]);
|
|
1405 build_and_record_new_cond (UNGE_EXPR, op0, op1,
|
|
1406 &edge_info->cond_equivalences[3]);
|
|
1407 break;
|
|
1408
|
|
1409 case LTGT_EXPR:
|
|
1410 edge_info->max_cond_equivalences = 4;
|
|
1411 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
|
|
1412 build_and_record_new_cond (NE_EXPR, op0, op1,
|
|
1413 &edge_info->cond_equivalences[2]);
|
|
1414 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
|
|
1415 &edge_info->cond_equivalences[3]);
|
|
1416 break;
|
|
1417
|
|
1418 default:
|
|
1419 edge_info->max_cond_equivalences = 2;
|
|
1420 edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
|
|
1421 break;
|
|
1422 }
|
|
1423
|
|
1424 /* Now store the original true and false conditions into the first
|
|
1425 two slots. */
|
|
1426 initialize_expr_from_cond (cond, &edge_info->cond_equivalences[0].cond);
|
|
1427 edge_info->cond_equivalences[0].value = boolean_true_node;
|
|
1428
|
|
1429 /* It is possible for INVERTED to be the negation of a comparison,
|
|
1430 and not a valid RHS or GIMPLE_COND condition. This happens because
|
|
1431 invert_truthvalue may return such an expression when asked to invert
|
|
1432 a floating-point comparison. These comparisons are not assumed to
|
|
1433 obey the trichotomy law. */
|
|
1434 initialize_expr_from_cond (inverted, &edge_info->cond_equivalences[1].cond);
|
|
1435 edge_info->cond_equivalences[1].value = boolean_false_node;
|
|
1436 }
|
|
1437
|
|
1438 /* A helper function for record_const_or_copy and record_equality.
|
|
1439 Do the work of recording the value and undo info. */
|
|
1440
|
|
1441 static void
|
|
1442 record_const_or_copy_1 (tree x, tree y, tree prev_x)
|
|
1443 {
|
|
1444 SSA_NAME_VALUE (x) = y;
|
|
1445
|
|
1446 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
1447 {
|
|
1448 fprintf (dump_file, "0>>> COPY ");
|
|
1449 print_generic_expr (dump_file, x, 0);
|
|
1450 fprintf (dump_file, " = ");
|
|
1451 print_generic_expr (dump_file, y, 0);
|
|
1452 fprintf (dump_file, "\n");
|
|
1453 }
|
|
1454
|
|
1455 VEC_reserve (tree, heap, const_and_copies_stack, 2);
|
|
1456 VEC_quick_push (tree, const_and_copies_stack, prev_x);
|
|
1457 VEC_quick_push (tree, const_and_copies_stack, x);
|
|
1458 }
|
|
1459
|
|
1460 /* Return the loop depth of the basic block of the defining statement of X.
|
|
1461 This number should not be treated as absolutely correct because the loop
|
|
1462 information may not be completely up-to-date when dom runs. However, it
|
|
1463 will be relatively correct, and as more passes are taught to keep loop info
|
|
1464 up to date, the result will become more and more accurate. */
|
|
1465
|
|
1466 int
|
|
1467 loop_depth_of_name (tree x)
|
|
1468 {
|
|
1469 gimple defstmt;
|
|
1470 basic_block defbb;
|
|
1471
|
|
1472 /* If it's not an SSA_NAME, we have no clue where the definition is. */
|
|
1473 if (TREE_CODE (x) != SSA_NAME)
|
|
1474 return 0;
|
|
1475
|
|
1476 /* Otherwise return the loop depth of the defining statement's bb.
|
|
1477 Note that there may not actually be a bb for this statement, if the
|
|
1478 ssa_name is live on entry. */
|
|
1479 defstmt = SSA_NAME_DEF_STMT (x);
|
|
1480 defbb = gimple_bb (defstmt);
|
|
1481 if (!defbb)
|
|
1482 return 0;
|
|
1483
|
|
1484 return defbb->loop_depth;
|
|
1485 }
|
|
1486
|
|
1487 /* Record that X is equal to Y in const_and_copies. Record undo
|
|
1488 information in the block-local vector. */
|
|
1489
|
|
1490 static void
|
|
1491 record_const_or_copy (tree x, tree y)
|
|
1492 {
|
|
1493 tree prev_x = SSA_NAME_VALUE (x);
|
|
1494
|
|
1495 gcc_assert (TREE_CODE (x) == SSA_NAME);
|
|
1496
|
|
1497 if (TREE_CODE (y) == SSA_NAME)
|
|
1498 {
|
|
1499 tree tmp = SSA_NAME_VALUE (y);
|
|
1500 if (tmp)
|
|
1501 y = tmp;
|
|
1502 }
|
|
1503
|
|
1504 record_const_or_copy_1 (x, y, prev_x);
|
|
1505 }
|
|
1506
|
|
1507 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
|
|
1508 This constrains the cases in which we may treat this as assignment. */
|
|
1509
|
|
1510 static void
|
|
1511 record_equality (tree x, tree y)
|
|
1512 {
|
|
1513 tree prev_x = NULL, prev_y = NULL;
|
|
1514
|
|
1515 if (TREE_CODE (x) == SSA_NAME)
|
|
1516 prev_x = SSA_NAME_VALUE (x);
|
|
1517 if (TREE_CODE (y) == SSA_NAME)
|
|
1518 prev_y = SSA_NAME_VALUE (y);
|
|
1519
|
|
1520 /* If one of the previous values is invariant, or invariant in more loops
|
|
1521 (by depth), then use that.
|
|
1522 Otherwise it doesn't matter which value we choose, just so
|
|
1523 long as we canonicalize on one value. */
|
|
1524 if (is_gimple_min_invariant (y))
|
|
1525 ;
|
|
1526 else if (is_gimple_min_invariant (x)
|
|
1527 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
|
|
1528 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
|
|
1529 else if (prev_x && is_gimple_min_invariant (prev_x))
|
|
1530 x = y, y = prev_x, prev_x = prev_y;
|
|
1531 else if (prev_y)
|
|
1532 y = prev_y;
|
|
1533
|
|
1534 /* After the swapping, we must have one SSA_NAME. */
|
|
1535 if (TREE_CODE (x) != SSA_NAME)
|
|
1536 return;
|
|
1537
|
|
1538 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
|
|
1539 variable compared against zero. If we're honoring signed zeros,
|
|
1540 then we cannot record this value unless we know that the value is
|
|
1541 nonzero. */
|
|
1542 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
|
|
1543 && (TREE_CODE (y) != REAL_CST
|
|
1544 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
|
|
1545 return;
|
|
1546
|
|
1547 record_const_or_copy_1 (x, y, prev_x);
|
|
1548 }
|
|
1549
|
|
1550 /* Returns true when STMT is a simple iv increment. It detects the
|
|
1551 following situation:
|
|
1552
|
|
1553 i_1 = phi (..., i_2)
|
|
1554 i_2 = i_1 +/- ... */
|
|
1555
|
|
1556 static bool
|
|
1557 simple_iv_increment_p (gimple stmt)
|
|
1558 {
|
|
1559 tree lhs, preinc;
|
|
1560 gimple phi;
|
|
1561 size_t i;
|
|
1562
|
|
1563 if (gimple_code (stmt) != GIMPLE_ASSIGN)
|
|
1564 return false;
|
|
1565
|
|
1566 lhs = gimple_assign_lhs (stmt);
|
|
1567 if (TREE_CODE (lhs) != SSA_NAME)
|
|
1568 return false;
|
|
1569
|
|
1570 if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
|
|
1571 && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
|
|
1572 return false;
|
|
1573
|
|
1574 preinc = gimple_assign_rhs1 (stmt);
|
|
1575
|
|
1576 if (TREE_CODE (preinc) != SSA_NAME)
|
|
1577 return false;
|
|
1578
|
|
1579 phi = SSA_NAME_DEF_STMT (preinc);
|
|
1580 if (gimple_code (phi) != GIMPLE_PHI)
|
|
1581 return false;
|
|
1582
|
|
1583 for (i = 0; i < gimple_phi_num_args (phi); i++)
|
|
1584 if (gimple_phi_arg_def (phi, i) == lhs)
|
|
1585 return true;
|
|
1586
|
|
1587 return false;
|
|
1588 }
|
|
1589
|
|
1590 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
|
|
1591 known value for that SSA_NAME (or NULL if no value is known).
|
|
1592
|
|
1593 Propagate values from CONST_AND_COPIES into the PHI nodes of the
|
|
1594 successors of BB. */
|
|
1595
|
|
1596 static void
|
|
1597 cprop_into_successor_phis (basic_block bb)
|
|
1598 {
|
|
1599 edge e;
|
|
1600 edge_iterator ei;
|
|
1601
|
|
1602 FOR_EACH_EDGE (e, ei, bb->succs)
|
|
1603 {
|
|
1604 int indx;
|
|
1605 gimple_stmt_iterator gsi;
|
|
1606
|
|
1607 /* If this is an abnormal edge, then we do not want to copy propagate
|
|
1608 into the PHI alternative associated with this edge. */
|
|
1609 if (e->flags & EDGE_ABNORMAL)
|
|
1610 continue;
|
|
1611
|
|
1612 gsi = gsi_start_phis (e->dest);
|
|
1613 if (gsi_end_p (gsi))
|
|
1614 continue;
|
|
1615
|
|
1616 indx = e->dest_idx;
|
|
1617 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
|
|
1618 {
|
|
1619 tree new_val;
|
|
1620 use_operand_p orig_p;
|
|
1621 tree orig_val;
|
|
1622 gimple phi = gsi_stmt (gsi);
|
|
1623
|
|
1624 /* The alternative may be associated with a constant, so verify
|
|
1625 it is an SSA_NAME before doing anything with it. */
|
|
1626 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
|
|
1627 orig_val = get_use_from_ptr (orig_p);
|
|
1628 if (TREE_CODE (orig_val) != SSA_NAME)
|
|
1629 continue;
|
|
1630
|
|
1631 /* If we have *ORIG_P in our constant/copy table, then replace
|
|
1632 ORIG_P with its value in our constant/copy table. */
|
|
1633 new_val = SSA_NAME_VALUE (orig_val);
|
|
1634 if (new_val
|
|
1635 && new_val != orig_val
|
|
1636 && (TREE_CODE (new_val) == SSA_NAME
|
|
1637 || is_gimple_min_invariant (new_val))
|
|
1638 && may_propagate_copy (orig_val, new_val))
|
|
1639 propagate_value (orig_p, new_val);
|
|
1640 }
|
|
1641 }
|
|
1642 }
|
|
1643
|
|
1644 /* We have finished optimizing BB, record any information implied by
|
|
1645 taking a specific outgoing edge from BB. */
|
|
1646
|
|
1647 static void
|
|
1648 record_edge_info (basic_block bb)
|
|
1649 {
|
|
1650 gimple_stmt_iterator gsi = gsi_last_bb (bb);
|
|
1651 struct edge_info *edge_info;
|
|
1652
|
|
1653 if (! gsi_end_p (gsi))
|
|
1654 {
|
|
1655 gimple stmt = gsi_stmt (gsi);
|
|
1656
|
|
1657 if (gimple_code (stmt) == GIMPLE_SWITCH)
|
|
1658 {
|
|
1659 tree index = gimple_switch_index (stmt);
|
|
1660
|
|
1661 if (TREE_CODE (index) == SSA_NAME)
|
|
1662 {
|
|
1663 int i;
|
|
1664 int n_labels = gimple_switch_num_labels (stmt);
|
|
1665 tree *info = XCNEWVEC (tree, last_basic_block);
|
|
1666 edge e;
|
|
1667 edge_iterator ei;
|
|
1668
|
|
1669 for (i = 0; i < n_labels; i++)
|
|
1670 {
|
|
1671 tree label = gimple_switch_label (stmt, i);
|
|
1672 basic_block target_bb = label_to_block (CASE_LABEL (label));
|
|
1673 if (CASE_HIGH (label)
|
|
1674 || !CASE_LOW (label)
|
|
1675 || info[target_bb->index])
|
|
1676 info[target_bb->index] = error_mark_node;
|
|
1677 else
|
|
1678 info[target_bb->index] = label;
|
|
1679 }
|
|
1680
|
|
1681 FOR_EACH_EDGE (e, ei, bb->succs)
|
|
1682 {
|
|
1683 basic_block target_bb = e->dest;
|
|
1684 tree label = info[target_bb->index];
|
|
1685
|
|
1686 if (label != NULL && label != error_mark_node)
|
|
1687 {
|
|
1688 tree x = fold_convert (TREE_TYPE (index), CASE_LOW (label));
|
|
1689 edge_info = allocate_edge_info (e);
|
|
1690 edge_info->lhs = index;
|
|
1691 edge_info->rhs = x;
|
|
1692 }
|
|
1693 }
|
|
1694 free (info);
|
|
1695 }
|
|
1696 }
|
|
1697
|
|
1698 /* A COND_EXPR may create equivalences too. */
|
|
1699 if (gimple_code (stmt) == GIMPLE_COND)
|
|
1700 {
|
|
1701 edge true_edge;
|
|
1702 edge false_edge;
|
|
1703
|
|
1704 tree op0 = gimple_cond_lhs (stmt);
|
|
1705 tree op1 = gimple_cond_rhs (stmt);
|
|
1706 enum tree_code code = gimple_cond_code (stmt);
|
|
1707
|
|
1708 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
|
|
1709
|
|
1710 /* Special case comparing booleans against a constant as we
|
|
1711 know the value of OP0 on both arms of the branch. i.e., we
|
|
1712 can record an equivalence for OP0 rather than COND. */
|
|
1713 if ((code == EQ_EXPR || code == NE_EXPR)
|
|
1714 && TREE_CODE (op0) == SSA_NAME
|
|
1715 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
|
|
1716 && is_gimple_min_invariant (op1))
|
|
1717 {
|
|
1718 if (code == EQ_EXPR)
|
|
1719 {
|
|
1720 edge_info = allocate_edge_info (true_edge);
|
|
1721 edge_info->lhs = op0;
|
|
1722 edge_info->rhs = (integer_zerop (op1)
|
|
1723 ? boolean_false_node
|
|
1724 : boolean_true_node);
|
|
1725
|
|
1726 edge_info = allocate_edge_info (false_edge);
|
|
1727 edge_info->lhs = op0;
|
|
1728 edge_info->rhs = (integer_zerop (op1)
|
|
1729 ? boolean_true_node
|
|
1730 : boolean_false_node);
|
|
1731 }
|
|
1732 else
|
|
1733 {
|
|
1734 edge_info = allocate_edge_info (true_edge);
|
|
1735 edge_info->lhs = op0;
|
|
1736 edge_info->rhs = (integer_zerop (op1)
|
|
1737 ? boolean_true_node
|
|
1738 : boolean_false_node);
|
|
1739
|
|
1740 edge_info = allocate_edge_info (false_edge);
|
|
1741 edge_info->lhs = op0;
|
|
1742 edge_info->rhs = (integer_zerop (op1)
|
|
1743 ? boolean_false_node
|
|
1744 : boolean_true_node);
|
|
1745 }
|
|
1746 }
|
|
1747 else if (is_gimple_min_invariant (op0)
|
|
1748 && (TREE_CODE (op1) == SSA_NAME
|
|
1749 || is_gimple_min_invariant (op1)))
|
|
1750 {
|
|
1751 tree cond = build2 (code, boolean_type_node, op0, op1);
|
|
1752 tree inverted = invert_truthvalue (cond);
|
|
1753 struct edge_info *edge_info;
|
|
1754
|
|
1755 edge_info = allocate_edge_info (true_edge);
|
|
1756 record_conditions (edge_info, cond, inverted);
|
|
1757
|
|
1758 if (code == EQ_EXPR)
|
|
1759 {
|
|
1760 edge_info->lhs = op1;
|
|
1761 edge_info->rhs = op0;
|
|
1762 }
|
|
1763
|
|
1764 edge_info = allocate_edge_info (false_edge);
|
|
1765 record_conditions (edge_info, inverted, cond);
|
|
1766
|
|
1767 if (code == NE_EXPR)
|
|
1768 {
|
|
1769 edge_info->lhs = op1;
|
|
1770 edge_info->rhs = op0;
|
|
1771 }
|
|
1772 }
|
|
1773
|
|
1774 else if (TREE_CODE (op0) == SSA_NAME
|
|
1775 && (is_gimple_min_invariant (op1)
|
|
1776 || TREE_CODE (op1) == SSA_NAME))
|
|
1777 {
|
|
1778 tree cond = build2 (code, boolean_type_node, op0, op1);
|
|
1779 tree inverted = invert_truthvalue (cond);
|
|
1780 struct edge_info *edge_info;
|
|
1781
|
|
1782 edge_info = allocate_edge_info (true_edge);
|
|
1783 record_conditions (edge_info, cond, inverted);
|
|
1784
|
|
1785 if (code == EQ_EXPR)
|
|
1786 {
|
|
1787 edge_info->lhs = op0;
|
|
1788 edge_info->rhs = op1;
|
|
1789 }
|
|
1790
|
|
1791 edge_info = allocate_edge_info (false_edge);
|
|
1792 record_conditions (edge_info, inverted, cond);
|
|
1793
|
|
1794 if (TREE_CODE (cond) == NE_EXPR)
|
|
1795 {
|
|
1796 edge_info->lhs = op0;
|
|
1797 edge_info->rhs = op1;
|
|
1798 }
|
|
1799 }
|
|
1800 }
|
|
1801
|
|
1802 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
|
|
1803 }
|
|
1804 }
|
|
1805
|
|
1806 /* Propagate information from BB to its outgoing edges.
|
|
1807
|
|
1808 This can include equivalence information implied by control statements
|
|
1809 at the end of BB and const/copy propagation into PHIs in BB's
|
|
1810 successor blocks. */
|
|
1811
|
|
1812 static void
|
|
1813 propagate_to_outgoing_edges (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
|
|
1814 basic_block bb)
|
|
1815 {
|
|
1816 record_edge_info (bb);
|
|
1817 cprop_into_successor_phis (bb);
|
|
1818 }
|
|
1819
|
|
1820 /* Search for redundant computations in STMT. If any are found, then
|
|
1821 replace them with the variable holding the result of the computation.
|
|
1822
|
|
1823 If safe, record this expression into the available expression hash
|
|
1824 table. */
|
|
1825
|
|
1826 static bool
|
|
1827 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
|
|
1828 {
|
|
1829 tree expr_type;
|
|
1830 tree cached_lhs;
|
|
1831 bool insert = true;
|
|
1832 bool retval = false;
|
|
1833 bool assigns_var_p = false;
|
|
1834
|
|
1835 gimple stmt = gsi_stmt (*gsi);
|
|
1836
|
|
1837 tree def = gimple_get_lhs (stmt);
|
|
1838
|
|
1839 /* Certain expressions on the RHS can be optimized away, but can not
|
|
1840 themselves be entered into the hash tables. */
|
|
1841 if (! def
|
|
1842 || TREE_CODE (def) != SSA_NAME
|
|
1843 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
|
|
1844 || !ZERO_SSA_OPERANDS (stmt, SSA_OP_VDEF)
|
|
1845 /* Do not record equivalences for increments of ivs. This would create
|
|
1846 overlapping live ranges for a very questionable gain. */
|
|
1847 || simple_iv_increment_p (stmt))
|
|
1848 insert = false;
|
|
1849
|
|
1850 /* Check if the expression has been computed before. */
|
|
1851 cached_lhs = lookup_avail_expr (stmt, insert);
|
|
1852
|
|
1853 opt_stats.num_exprs_considered++;
|
|
1854
|
|
1855 /* Get the type of the expression we are trying to optimize. */
|
|
1856 if (is_gimple_assign (stmt))
|
|
1857 {
|
|
1858 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
|
|
1859 assigns_var_p = true;
|
|
1860 }
|
|
1861 else if (gimple_code (stmt) == GIMPLE_COND)
|
|
1862 expr_type = boolean_type_node;
|
|
1863 else if (is_gimple_call (stmt))
|
|
1864 {
|
|
1865 gcc_assert (gimple_call_lhs (stmt));
|
|
1866 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
|
|
1867 assigns_var_p = true;
|
|
1868 }
|
|
1869 else if (gimple_code (stmt) == GIMPLE_SWITCH)
|
|
1870 expr_type = TREE_TYPE (gimple_switch_index (stmt));
|
|
1871 else
|
|
1872 gcc_unreachable ();
|
|
1873
|
|
1874 if (!cached_lhs)
|
|
1875 return false;
|
|
1876
|
|
1877 /* It is safe to ignore types here since we have already done
|
|
1878 type checking in the hashing and equality routines. In fact
|
|
1879 type checking here merely gets in the way of constant
|
|
1880 propagation. Also, make sure that it is safe to propagate
|
|
1881 CACHED_LHS into the expression in STMT. */
|
|
1882 if ((TREE_CODE (cached_lhs) != SSA_NAME
|
|
1883 && (assigns_var_p
|
|
1884 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
|
|
1885 || may_propagate_copy_into_stmt (stmt, cached_lhs))
|
|
1886 {
|
|
1887 #if defined ENABLE_CHECKING
|
|
1888 gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME
|
|
1889 || is_gimple_min_invariant (cached_lhs));
|
|
1890 #endif
|
|
1891
|
|
1892 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
1893 {
|
|
1894 fprintf (dump_file, " Replaced redundant expr '");
|
|
1895 print_gimple_expr (dump_file, stmt, 0, dump_flags);
|
|
1896 fprintf (dump_file, "' with '");
|
|
1897 print_generic_expr (dump_file, cached_lhs, dump_flags);
|
|
1898 fprintf (dump_file, "'\n");
|
|
1899 }
|
|
1900
|
|
1901 opt_stats.num_re++;
|
|
1902
|
|
1903 if (TREE_CODE (cached_lhs) == ADDR_EXPR
|
|
1904 || (POINTER_TYPE_P (expr_type)
|
|
1905 && is_gimple_min_invariant (cached_lhs)))
|
|
1906 retval = true;
|
|
1907
|
|
1908 if (assigns_var_p
|
|
1909 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
|
|
1910 cached_lhs = fold_convert (expr_type, cached_lhs);
|
|
1911
|
|
1912 propagate_tree_value_into_stmt (gsi, cached_lhs);
|
|
1913
|
|
1914 /* Since it is always necessary to mark the result as modified,
|
|
1915 perhaps we should move this into propagate_tree_value_into_stmt
|
|
1916 itself. */
|
|
1917 gimple_set_modified (gsi_stmt (*gsi), true);
|
|
1918 }
|
|
1919 return retval;
|
|
1920 }
|
|
1921
|
|
1922 /* Return true if statement GS is an assignment that peforms a useless
|
|
1923 type conversion. It is is intended to be a tuples analog of function
|
|
1924 tree_ssa_useless_type_conversion. */
|
|
1925
|
|
1926 static bool
|
|
1927 gimple_assign_unary_useless_conversion_p (gimple gs)
|
|
1928 {
|
|
1929 if (is_gimple_assign (gs)
|
|
1930 && (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (gs))
|
|
1931 || gimple_assign_rhs_code (gs) == VIEW_CONVERT_EXPR
|
|
1932 || gimple_assign_rhs_code (gs) == NON_LVALUE_EXPR))
|
|
1933 {
|
|
1934 tree lhs_type = TREE_TYPE (gimple_assign_lhs (gs));
|
|
1935 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (gs));
|
|
1936 return useless_type_conversion_p (lhs_type, rhs_type);
|
|
1937 }
|
|
1938
|
|
1939 return false;
|
|
1940 }
|
|
1941
|
|
1942 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
|
|
1943 the available expressions table or the const_and_copies table.
|
|
1944 Detect and record those equivalences. */
|
|
1945 /* We handle only very simple copy equivalences here. The heavy
|
|
1946 lifing is done by eliminate_redundant_computations. */
|
|
1947
|
|
1948 static void
|
|
1949 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
|
|
1950 {
|
|
1951 tree lhs;
|
|
1952 enum tree_code lhs_code;
|
|
1953
|
|
1954 gcc_assert (is_gimple_assign (stmt));
|
|
1955
|
|
1956 lhs = gimple_assign_lhs (stmt);
|
|
1957 lhs_code = TREE_CODE (lhs);
|
|
1958
|
|
1959 if (lhs_code == SSA_NAME
|
|
1960 && (gimple_assign_single_p (stmt)
|
|
1961 || gimple_assign_unary_useless_conversion_p (stmt)))
|
|
1962 {
|
|
1963 tree rhs = gimple_assign_rhs1 (stmt);
|
|
1964
|
|
1965 /* Strip away any useless type conversions. */
|
|
1966 STRIP_USELESS_TYPE_CONVERSION (rhs);
|
|
1967
|
|
1968 /* If the RHS of the assignment is a constant or another variable that
|
|
1969 may be propagated, register it in the CONST_AND_COPIES table. We
|
|
1970 do not need to record unwind data for this, since this is a true
|
|
1971 assignment and not an equivalence inferred from a comparison. All
|
|
1972 uses of this ssa name are dominated by this assignment, so unwinding
|
|
1973 just costs time and space. */
|
|
1974 if (may_optimize_p
|
|
1975 && (TREE_CODE (rhs) == SSA_NAME
|
|
1976 || is_gimple_min_invariant (rhs)))
|
|
1977 {
|
|
1978 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
1979 {
|
|
1980 fprintf (dump_file, "==== ASGN ");
|
|
1981 print_generic_expr (dump_file, lhs, 0);
|
|
1982 fprintf (dump_file, " = ");
|
|
1983 print_generic_expr (dump_file, rhs, 0);
|
|
1984 fprintf (dump_file, "\n");
|
|
1985 }
|
|
1986
|
|
1987 SSA_NAME_VALUE (lhs) = rhs;
|
|
1988 }
|
|
1989 }
|
|
1990
|
|
1991 /* A memory store, even an aliased store, creates a useful
|
|
1992 equivalence. By exchanging the LHS and RHS, creating suitable
|
|
1993 vops and recording the result in the available expression table,
|
|
1994 we may be able to expose more redundant loads. */
|
|
1995 if (!gimple_has_volatile_ops (stmt)
|
|
1996 && gimple_references_memory_p (stmt)
|
|
1997 && gimple_assign_single_p (stmt)
|
|
1998 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
|
|
1999 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
|
|
2000 && !is_gimple_reg (lhs))
|
|
2001 {
|
|
2002 tree rhs = gimple_assign_rhs1 (stmt);
|
|
2003 gimple new_stmt;
|
|
2004
|
|
2005 /* Build a new statement with the RHS and LHS exchanged. */
|
|
2006 if (TREE_CODE (rhs) == SSA_NAME)
|
|
2007 {
|
|
2008 /* NOTE tuples. The call to gimple_build_assign below replaced
|
|
2009 a call to build_gimple_modify_stmt, which did not set the
|
|
2010 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
|
|
2011 may cause an SSA validation failure, as the LHS may be a
|
|
2012 default-initialized name and should have no definition. I'm
|
|
2013 a bit dubious of this, as the artificial statement that we
|
|
2014 generate here may in fact be ill-formed, but it is simply
|
|
2015 used as an internal device in this pass, and never becomes
|
|
2016 part of the CFG. */
|
|
2017 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
|
|
2018 new_stmt = gimple_build_assign (rhs, lhs);
|
|
2019 SSA_NAME_DEF_STMT (rhs) = defstmt;
|
|
2020 }
|
|
2021 else
|
|
2022 new_stmt = gimple_build_assign (rhs, lhs);
|
|
2023
|
|
2024 create_ssa_artificial_load_stmt (new_stmt, stmt, true);
|
|
2025
|
|
2026 /* Finally enter the statement into the available expression
|
|
2027 table. */
|
|
2028 lookup_avail_expr (new_stmt, true);
|
|
2029 }
|
|
2030 }
|
|
2031
|
|
2032 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
|
|
2033 CONST_AND_COPIES. */
|
|
2034
|
|
2035 static bool
|
|
2036 cprop_operand (gimple stmt, use_operand_p op_p)
|
|
2037 {
|
|
2038 bool may_have_exposed_new_symbols = false;
|
|
2039 tree val;
|
|
2040 tree op = USE_FROM_PTR (op_p);
|
|
2041
|
|
2042 /* If the operand has a known constant value or it is known to be a
|
|
2043 copy of some other variable, use the value or copy stored in
|
|
2044 CONST_AND_COPIES. */
|
|
2045 val = SSA_NAME_VALUE (op);
|
|
2046 if (val && val != op)
|
|
2047 {
|
|
2048 /* Do not change the base variable in the virtual operand
|
|
2049 tables. That would make it impossible to reconstruct
|
|
2050 the renamed virtual operand if we later modify this
|
|
2051 statement. Also only allow the new value to be an SSA_NAME
|
|
2052 for propagation into virtual operands. */
|
|
2053 if (!is_gimple_reg (op)
|
|
2054 && (TREE_CODE (val) != SSA_NAME
|
|
2055 || is_gimple_reg (val)
|
|
2056 || get_virtual_var (val) != get_virtual_var (op)))
|
|
2057 return false;
|
|
2058
|
|
2059 /* Do not replace hard register operands in asm statements. */
|
|
2060 if (gimple_code (stmt) == GIMPLE_ASM
|
|
2061 && !may_propagate_copy_into_asm (op))
|
|
2062 return false;
|
|
2063
|
|
2064 /* Certain operands are not allowed to be copy propagated due
|
|
2065 to their interaction with exception handling and some GCC
|
|
2066 extensions. */
|
|
2067 if (!may_propagate_copy (op, val))
|
|
2068 return false;
|
|
2069
|
|
2070 /* Do not propagate addresses that point to volatiles into memory
|
|
2071 stmts without volatile operands. */
|
|
2072 if (POINTER_TYPE_P (TREE_TYPE (val))
|
|
2073 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
|
|
2074 && gimple_has_mem_ops (stmt)
|
|
2075 && !gimple_has_volatile_ops (stmt))
|
|
2076 return false;
|
|
2077
|
|
2078 /* Do not propagate copies if the propagated value is at a deeper loop
|
|
2079 depth than the propagatee. Otherwise, this may move loop variant
|
|
2080 variables outside of their loops and prevent coalescing
|
|
2081 opportunities. If the value was loop invariant, it will be hoisted
|
|
2082 by LICM and exposed for copy propagation. */
|
|
2083 if (loop_depth_of_name (val) > loop_depth_of_name (op))
|
|
2084 return false;
|
|
2085
|
|
2086 /* Dump details. */
|
|
2087 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2088 {
|
|
2089 fprintf (dump_file, " Replaced '");
|
|
2090 print_generic_expr (dump_file, op, dump_flags);
|
|
2091 fprintf (dump_file, "' with %s '",
|
|
2092 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
|
|
2093 print_generic_expr (dump_file, val, dump_flags);
|
|
2094 fprintf (dump_file, "'\n");
|
|
2095 }
|
|
2096
|
|
2097 /* If VAL is an ADDR_EXPR or a constant of pointer type, note
|
|
2098 that we may have exposed a new symbol for SSA renaming. */
|
|
2099 if (TREE_CODE (val) == ADDR_EXPR
|
|
2100 || (POINTER_TYPE_P (TREE_TYPE (op))
|
|
2101 && is_gimple_min_invariant (val)))
|
|
2102 may_have_exposed_new_symbols = true;
|
|
2103
|
|
2104 if (TREE_CODE (val) != SSA_NAME)
|
|
2105 opt_stats.num_const_prop++;
|
|
2106 else
|
|
2107 opt_stats.num_copy_prop++;
|
|
2108
|
|
2109 propagate_value (op_p, val);
|
|
2110
|
|
2111 /* And note that we modified this statement. This is now
|
|
2112 safe, even if we changed virtual operands since we will
|
|
2113 rescan the statement and rewrite its operands again. */
|
|
2114 gimple_set_modified (stmt, true);
|
|
2115 }
|
|
2116 return may_have_exposed_new_symbols;
|
|
2117 }
|
|
2118
|
|
2119 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
|
|
2120 known value for that SSA_NAME (or NULL if no value is known).
|
|
2121
|
|
2122 Propagate values from CONST_AND_COPIES into the uses, vuses and
|
|
2123 vdef_ops of STMT. */
|
|
2124
|
|
2125 static bool
|
|
2126 cprop_into_stmt (gimple stmt)
|
|
2127 {
|
|
2128 bool may_have_exposed_new_symbols = false;
|
|
2129 use_operand_p op_p;
|
|
2130 ssa_op_iter iter;
|
|
2131
|
|
2132 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
|
|
2133 {
|
|
2134 if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
|
|
2135 may_have_exposed_new_symbols |= cprop_operand (stmt, op_p);
|
|
2136 }
|
|
2137
|
|
2138 return may_have_exposed_new_symbols;
|
|
2139 }
|
|
2140
|
|
2141 /* Optimize the statement pointed to by iterator SI.
|
|
2142
|
|
2143 We try to perform some simplistic global redundancy elimination and
|
|
2144 constant propagation:
|
|
2145
|
|
2146 1- To detect global redundancy, we keep track of expressions that have
|
|
2147 been computed in this block and its dominators. If we find that the
|
|
2148 same expression is computed more than once, we eliminate repeated
|
|
2149 computations by using the target of the first one.
|
|
2150
|
|
2151 2- Constant values and copy assignments. This is used to do very
|
|
2152 simplistic constant and copy propagation. When a constant or copy
|
|
2153 assignment is found, we map the value on the RHS of the assignment to
|
|
2154 the variable in the LHS in the CONST_AND_COPIES table. */
|
|
2155
|
|
2156 static void
|
|
2157 optimize_stmt (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
|
|
2158 basic_block bb, gimple_stmt_iterator si)
|
|
2159 {
|
|
2160 gimple stmt, old_stmt;
|
|
2161 bool may_optimize_p;
|
|
2162 bool may_have_exposed_new_symbols;
|
|
2163 bool modified_p = false;
|
|
2164
|
|
2165 old_stmt = stmt = gsi_stmt (si);
|
|
2166
|
|
2167 if (gimple_code (stmt) == GIMPLE_COND)
|
|
2168 canonicalize_comparison (stmt);
|
|
2169
|
|
2170 update_stmt_if_modified (stmt);
|
|
2171 opt_stats.num_stmts++;
|
|
2172 push_stmt_changes (gsi_stmt_ptr (&si));
|
|
2173
|
|
2174 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2175 {
|
|
2176 fprintf (dump_file, "Optimizing statement ");
|
|
2177 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
|
|
2178 }
|
|
2179
|
|
2180 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
|
|
2181 may_have_exposed_new_symbols = cprop_into_stmt (stmt);
|
|
2182
|
|
2183 /* If the statement has been modified with constant replacements,
|
|
2184 fold its RHS before checking for redundant computations. */
|
|
2185 if (gimple_modified_p (stmt))
|
|
2186 {
|
|
2187 tree rhs = NULL;
|
|
2188
|
|
2189 /* Try to fold the statement making sure that STMT is kept
|
|
2190 up to date. */
|
|
2191 if (fold_stmt (&si))
|
|
2192 {
|
|
2193 stmt = gsi_stmt (si);
|
|
2194
|
|
2195 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2196 {
|
|
2197 fprintf (dump_file, " Folded to: ");
|
|
2198 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
|
|
2199 }
|
|
2200 }
|
|
2201
|
|
2202 /* We only need to consider cases that can yield a gimple operand. */
|
|
2203 if (gimple_assign_single_p (stmt))
|
|
2204 rhs = gimple_assign_rhs1 (stmt);
|
|
2205 else if (gimple_code (stmt) == GIMPLE_GOTO)
|
|
2206 rhs = gimple_goto_dest (stmt);
|
|
2207 else if (gimple_code (stmt) == GIMPLE_SWITCH)
|
|
2208 /* This should never be an ADDR_EXPR. */
|
|
2209 rhs = gimple_switch_index (stmt);
|
|
2210
|
|
2211 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
|
|
2212 recompute_tree_invariant_for_addr_expr (rhs);
|
|
2213
|
|
2214 /* Constant/copy propagation above may change the set of
|
|
2215 virtual operands associated with this statement. Folding
|
|
2216 may remove the need for some virtual operands.
|
|
2217
|
|
2218 Indicate we will need to rescan and rewrite the statement. */
|
|
2219 may_have_exposed_new_symbols = true;
|
|
2220 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
|
|
2221 even if fold_stmt updated the stmt already and thus cleared
|
|
2222 gimple_modified_p flag on it. */
|
|
2223 modified_p = true;
|
|
2224 }
|
|
2225
|
|
2226 /* Check for redundant computations. Do this optimization only
|
|
2227 for assignments that have no volatile ops and conditionals. */
|
|
2228 may_optimize_p = (!gimple_has_volatile_ops (stmt)
|
|
2229 && ((is_gimple_assign (stmt)
|
|
2230 && !gimple_rhs_has_side_effects (stmt))
|
|
2231 || (is_gimple_call (stmt)
|
|
2232 && gimple_call_lhs (stmt) != NULL_TREE
|
|
2233 && !gimple_rhs_has_side_effects (stmt))
|
|
2234 || gimple_code (stmt) == GIMPLE_COND
|
|
2235 || gimple_code (stmt) == GIMPLE_SWITCH));
|
|
2236
|
|
2237 if (may_optimize_p)
|
|
2238 {
|
|
2239 may_have_exposed_new_symbols |= eliminate_redundant_computations (&si);
|
|
2240 stmt = gsi_stmt (si);
|
|
2241 }
|
|
2242
|
|
2243 /* Record any additional equivalences created by this statement. */
|
|
2244 if (is_gimple_assign (stmt))
|
|
2245 record_equivalences_from_stmt (stmt, may_optimize_p);
|
|
2246
|
|
2247 /* If STMT is a COND_EXPR and it was modified, then we may know
|
|
2248 where it goes. If that is the case, then mark the CFG as altered.
|
|
2249
|
|
2250 This will cause us to later call remove_unreachable_blocks and
|
|
2251 cleanup_tree_cfg when it is safe to do so. It is not safe to
|
|
2252 clean things up here since removal of edges and such can trigger
|
|
2253 the removal of PHI nodes, which in turn can release SSA_NAMEs to
|
|
2254 the manager.
|
|
2255
|
|
2256 That's all fine and good, except that once SSA_NAMEs are released
|
|
2257 to the manager, we must not call create_ssa_name until all references
|
|
2258 to released SSA_NAMEs have been eliminated.
|
|
2259
|
|
2260 All references to the deleted SSA_NAMEs can not be eliminated until
|
|
2261 we remove unreachable blocks.
|
|
2262
|
|
2263 We can not remove unreachable blocks until after we have completed
|
|
2264 any queued jump threading.
|
|
2265
|
|
2266 We can not complete any queued jump threads until we have taken
|
|
2267 appropriate variables out of SSA form. Taking variables out of
|
|
2268 SSA form can call create_ssa_name and thus we lose.
|
|
2269
|
|
2270 Ultimately I suspect we're going to need to change the interface
|
|
2271 into the SSA_NAME manager. */
|
|
2272 if (gimple_modified_p (stmt) || modified_p)
|
|
2273 {
|
|
2274 tree val = NULL;
|
|
2275
|
|
2276 if (gimple_code (stmt) == GIMPLE_COND)
|
|
2277 val = fold_binary (gimple_cond_code (stmt), boolean_type_node,
|
|
2278 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
|
|
2279 else if (gimple_code (stmt) == GIMPLE_SWITCH)
|
|
2280 val = gimple_switch_index (stmt);
|
|
2281
|
|
2282 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
|
|
2283 cfg_altered = true;
|
|
2284
|
|
2285 /* If we simplified a statement in such a way as to be shown that it
|
|
2286 cannot trap, update the eh information and the cfg to match. */
|
|
2287 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
|
|
2288 {
|
|
2289 bitmap_set_bit (need_eh_cleanup, bb->index);
|
|
2290 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2291 fprintf (dump_file, " Flagged to clear EH edges.\n");
|
|
2292 }
|
|
2293 }
|
|
2294
|
|
2295 if (may_have_exposed_new_symbols)
|
|
2296 {
|
|
2297 /* Queue the statement to be re-scanned after all the
|
|
2298 AVAIL_EXPRS have been processed. The change buffer stack for
|
|
2299 all the pushed statements will be processed when this queue
|
|
2300 is emptied. */
|
|
2301 VEC_safe_push (gimple_p, heap, stmts_to_rescan, gsi_stmt_ptr (&si));
|
|
2302 }
|
|
2303 else
|
|
2304 {
|
|
2305 /* Otherwise, just discard the recently pushed change buffer. If
|
|
2306 not, the STMTS_TO_RESCAN queue will get out of synch with the
|
|
2307 change buffer stack. */
|
|
2308 discard_stmt_changes (gsi_stmt_ptr (&si));
|
|
2309 }
|
|
2310 }
|
|
2311
|
|
2312 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
|
|
2313 If found, return its LHS. Otherwise insert STMT in the table and
|
|
2314 return NULL_TREE.
|
|
2315
|
|
2316 Also, when an expression is first inserted in the table, it is also
|
|
2317 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
|
|
2318 we finish processing this block and its children. */
|
|
2319
|
|
2320 static tree
|
|
2321 lookup_avail_expr (gimple stmt, bool insert)
|
|
2322 {
|
|
2323 void **slot;
|
|
2324 tree lhs;
|
|
2325 tree temp;
|
|
2326 struct expr_hash_elt *element = XNEW (struct expr_hash_elt);
|
|
2327
|
|
2328 /* Get LHS of assignment or call, else NULL_TREE. */
|
|
2329 lhs = gimple_get_lhs (stmt);
|
|
2330
|
|
2331 initialize_hash_element (stmt, lhs, element);
|
|
2332
|
|
2333 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2334 {
|
|
2335 fprintf (dump_file, "LKUP ");
|
|
2336 print_expr_hash_elt (dump_file, element);
|
|
2337 }
|
|
2338
|
|
2339 /* Don't bother remembering constant assignments and copy operations.
|
|
2340 Constants and copy operations are handled by the constant/copy propagator
|
|
2341 in optimize_stmt. */
|
|
2342 if (element->expr.kind == EXPR_SINGLE
|
|
2343 && (TREE_CODE (element->expr.ops.single.rhs) == SSA_NAME
|
|
2344 || is_gimple_min_invariant (element->expr.ops.single.rhs)))
|
|
2345 {
|
|
2346 free (element);
|
|
2347 return NULL_TREE;
|
|
2348 }
|
|
2349
|
|
2350 /* Finally try to find the expression in the main expression hash table. */
|
|
2351 slot = htab_find_slot_with_hash (avail_exprs, element, element->hash,
|
|
2352 (insert ? INSERT : NO_INSERT));
|
|
2353 if (slot == NULL)
|
|
2354 {
|
|
2355 free (element);
|
|
2356 return NULL_TREE;
|
|
2357 }
|
|
2358
|
|
2359 if (*slot == NULL)
|
|
2360 {
|
|
2361 *slot = (void *) element;
|
|
2362
|
|
2363 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2364 {
|
|
2365 fprintf (dump_file, "2>>> ");
|
|
2366 print_expr_hash_elt (dump_file, element);
|
|
2367 }
|
|
2368
|
|
2369 VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
|
|
2370 return NULL_TREE;
|
|
2371 }
|
|
2372
|
|
2373 /* Extract the LHS of the assignment so that it can be used as the current
|
|
2374 definition of another variable. */
|
|
2375 lhs = ((struct expr_hash_elt *)*slot)->lhs;
|
|
2376
|
|
2377 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
|
|
2378 use the value from the const_and_copies table. */
|
|
2379 if (TREE_CODE (lhs) == SSA_NAME)
|
|
2380 {
|
|
2381 temp = SSA_NAME_VALUE (lhs);
|
|
2382 if (temp)
|
|
2383 lhs = temp;
|
|
2384 }
|
|
2385
|
|
2386 free (element);
|
|
2387
|
|
2388 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2389 {
|
|
2390 fprintf (dump_file, "FIND: ");
|
|
2391 print_generic_expr (dump_file, lhs, 0);
|
|
2392 fprintf (dump_file, "\n");
|
|
2393 }
|
|
2394
|
|
2395 return lhs;
|
|
2396 }
|
|
2397
|
|
2398 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
|
|
2399 for expressions using the code of the expression and the SSA numbers of
|
|
2400 its operands. */
|
|
2401
|
|
2402 static hashval_t
|
|
2403 avail_expr_hash (const void *p)
|
|
2404 {
|
|
2405 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
|
|
2406 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
|
|
2407 tree vuse;
|
|
2408 ssa_op_iter iter;
|
|
2409 hashval_t val = 0;
|
|
2410
|
|
2411 val = iterative_hash_hashable_expr (expr, val);
|
|
2412
|
|
2413 /* If the hash table entry is not associated with a statement, then we
|
|
2414 can just hash the expression and not worry about virtual operands
|
|
2415 and such. */
|
|
2416 if (!stmt)
|
|
2417 return val;
|
|
2418
|
|
2419 /* Add the SSA version numbers of every vuse operand. This is important
|
|
2420 because compound variables like arrays are not renamed in the
|
|
2421 operands. Rather, the rename is done on the virtual variable
|
|
2422 representing all the elements of the array. */
|
|
2423 FOR_EACH_SSA_TREE_OPERAND (vuse, stmt, iter, SSA_OP_VUSE)
|
|
2424 val = iterative_hash_expr (vuse, val);
|
|
2425
|
|
2426 return val;
|
|
2427 }
|
|
2428
|
|
2429 static hashval_t
|
|
2430 real_avail_expr_hash (const void *p)
|
|
2431 {
|
|
2432 return ((const struct expr_hash_elt *)p)->hash;
|
|
2433 }
|
|
2434
|
|
2435 static int
|
|
2436 avail_expr_eq (const void *p1, const void *p2)
|
|
2437 {
|
|
2438 gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
|
|
2439 const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
|
|
2440 const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
|
|
2441 gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
|
|
2442 const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
|
|
2443 const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
|
|
2444
|
|
2445 /* This case should apply only when removing entries from the table. */
|
|
2446 if (stamp1 == stamp2)
|
|
2447 return true;
|
|
2448
|
|
2449 /* FIXME tuples:
|
|
2450 We add stmts to a hash table and them modify them. To detect the case
|
|
2451 that we modify a stmt and then search for it, we assume that the hash
|
|
2452 is always modified by that change.
|
|
2453 We have to fully check why this doesn't happen on trunk or rewrite
|
|
2454 this in a more reliable (and easier to understand) way. */
|
|
2455 if (((const struct expr_hash_elt *)p1)->hash
|
|
2456 != ((const struct expr_hash_elt *)p2)->hash)
|
|
2457 return false;
|
|
2458
|
|
2459 /* In case of a collision, both RHS have to be identical and have the
|
|
2460 same VUSE operands. */
|
|
2461 if (hashable_expr_equal_p (expr1, expr2)
|
|
2462 && types_compatible_p (expr1->type, expr2->type))
|
|
2463 {
|
|
2464 /* Note that STMT1 and/or STMT2 may be NULL. */
|
|
2465 bool ret = compare_ssa_operands_equal (stmt1, stmt2, SSA_OP_VUSE);
|
|
2466 return ret;
|
|
2467 }
|
|
2468
|
|
2469 return false;
|
|
2470 }
|
|
2471
|
|
2472 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
|
|
2473 up degenerate PHIs created by or exposed by jump threading. */
|
|
2474
|
|
2475 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
|
|
2476 NULL. */
|
|
2477
|
|
2478 static tree
|
|
2479 degenerate_phi_result (gimple phi)
|
|
2480 {
|
|
2481 tree lhs = gimple_phi_result (phi);
|
|
2482 tree val = NULL;
|
|
2483 size_t i;
|
|
2484
|
|
2485 /* Ignoring arguments which are the same as LHS, if all the remaining
|
|
2486 arguments are the same, then the PHI is a degenerate and has the
|
|
2487 value of that common argument. */
|
|
2488 for (i = 0; i < gimple_phi_num_args (phi); i++)
|
|
2489 {
|
|
2490 tree arg = gimple_phi_arg_def (phi, i);
|
|
2491
|
|
2492 if (arg == lhs)
|
|
2493 continue;
|
|
2494 else if (!val)
|
|
2495 val = arg;
|
|
2496 else if (!operand_equal_p (arg, val, 0))
|
|
2497 break;
|
|
2498 }
|
|
2499 return (i == gimple_phi_num_args (phi) ? val : NULL);
|
|
2500 }
|
|
2501
|
|
2502 /* Given a statement STMT, which is either a PHI node or an assignment,
|
|
2503 remove it from the IL. */
|
|
2504
|
|
2505 static void
|
|
2506 remove_stmt_or_phi (gimple stmt)
|
|
2507 {
|
|
2508 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
|
|
2509
|
|
2510 if (gimple_code (stmt) == GIMPLE_PHI)
|
|
2511 remove_phi_node (&gsi, true);
|
|
2512 else
|
|
2513 {
|
|
2514 gsi_remove (&gsi, true);
|
|
2515 release_defs (stmt);
|
|
2516 }
|
|
2517 }
|
|
2518
|
|
2519 /* Given a statement STMT, which is either a PHI node or an assignment,
|
|
2520 return the "rhs" of the node, in the case of a non-degenerate
|
|
2521 phi, NULL is returned. */
|
|
2522
|
|
2523 static tree
|
|
2524 get_rhs_or_phi_arg (gimple stmt)
|
|
2525 {
|
|
2526 if (gimple_code (stmt) == GIMPLE_PHI)
|
|
2527 return degenerate_phi_result (stmt);
|
|
2528 else if (gimple_assign_single_p (stmt))
|
|
2529 return gimple_assign_rhs1 (stmt);
|
|
2530 else
|
|
2531 gcc_unreachable ();
|
|
2532 }
|
|
2533
|
|
2534
|
|
2535 /* Given a statement STMT, which is either a PHI node or an assignment,
|
|
2536 return the "lhs" of the node. */
|
|
2537
|
|
2538 static tree
|
|
2539 get_lhs_or_phi_result (gimple stmt)
|
|
2540 {
|
|
2541 if (gimple_code (stmt) == GIMPLE_PHI)
|
|
2542 return gimple_phi_result (stmt);
|
|
2543 else if (is_gimple_assign (stmt))
|
|
2544 return gimple_assign_lhs (stmt);
|
|
2545 else
|
|
2546 gcc_unreachable ();
|
|
2547 }
|
|
2548
|
|
2549 /* Propagate RHS into all uses of LHS (when possible).
|
|
2550
|
|
2551 RHS and LHS are derived from STMT, which is passed in solely so
|
|
2552 that we can remove it if propagation is successful.
|
|
2553
|
|
2554 When propagating into a PHI node or into a statement which turns
|
|
2555 into a trivial copy or constant initialization, set the
|
|
2556 appropriate bit in INTERESTING_NAMEs so that we will visit those
|
|
2557 nodes as well in an effort to pick up secondary optimization
|
|
2558 opportunities. */
|
|
2559
|
|
2560 static void
|
|
2561 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
|
|
2562 {
|
|
2563 /* First verify that propagation is valid and isn't going to move a
|
|
2564 loop variant variable outside its loop. */
|
|
2565 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
|
|
2566 && (TREE_CODE (rhs) != SSA_NAME
|
|
2567 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
|
|
2568 && may_propagate_copy (lhs, rhs)
|
|
2569 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
|
|
2570 {
|
|
2571 use_operand_p use_p;
|
|
2572 imm_use_iterator iter;
|
|
2573 gimple use_stmt;
|
|
2574 bool all = true;
|
|
2575
|
|
2576 /* Dump details. */
|
|
2577 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2578 {
|
|
2579 fprintf (dump_file, " Replacing '");
|
|
2580 print_generic_expr (dump_file, lhs, dump_flags);
|
|
2581 fprintf (dump_file, "' with %s '",
|
|
2582 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
|
|
2583 print_generic_expr (dump_file, rhs, dump_flags);
|
|
2584 fprintf (dump_file, "'\n");
|
|
2585 }
|
|
2586
|
|
2587 /* Walk over every use of LHS and try to replace the use with RHS.
|
|
2588 At this point the only reason why such a propagation would not
|
|
2589 be successful would be if the use occurs in an ASM_EXPR. */
|
|
2590 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
|
|
2591 {
|
|
2592
|
|
2593 /* It's not always safe to propagate into an ASM_EXPR. */
|
|
2594 if (gimple_code (use_stmt) == GIMPLE_ASM
|
|
2595 && ! may_propagate_copy_into_asm (lhs))
|
|
2596 {
|
|
2597 all = false;
|
|
2598 continue;
|
|
2599 }
|
|
2600
|
|
2601 /* Dump details. */
|
|
2602 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2603 {
|
|
2604 fprintf (dump_file, " Original statement:");
|
|
2605 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
|
|
2606 }
|
|
2607
|
|
2608 push_stmt_changes (&use_stmt);
|
|
2609
|
|
2610 /* Propagate the RHS into this use of the LHS. */
|
|
2611 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
|
|
2612 propagate_value (use_p, rhs);
|
|
2613
|
|
2614 /* Special cases to avoid useless calls into the folding
|
|
2615 routines, operand scanning, etc.
|
|
2616
|
|
2617 First, propagation into a PHI may cause the PHI to become
|
|
2618 a degenerate, so mark the PHI as interesting. No other
|
|
2619 actions are necessary.
|
|
2620
|
|
2621 Second, if we're propagating a virtual operand and the
|
|
2622 propagation does not change the underlying _DECL node for
|
|
2623 the virtual operand, then no further actions are necessary. */
|
|
2624 if (gimple_code (use_stmt) == GIMPLE_PHI
|
|
2625 || (! is_gimple_reg (lhs)
|
|
2626 && TREE_CODE (rhs) == SSA_NAME
|
|
2627 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
|
|
2628 {
|
|
2629 /* Dump details. */
|
|
2630 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2631 {
|
|
2632 fprintf (dump_file, " Updated statement:");
|
|
2633 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
|
|
2634 }
|
|
2635
|
|
2636 /* Propagation into a PHI may expose new degenerate PHIs,
|
|
2637 so mark the result of the PHI as interesting. */
|
|
2638 if (gimple_code (use_stmt) == GIMPLE_PHI)
|
|
2639 {
|
|
2640 tree result = get_lhs_or_phi_result (use_stmt);
|
|
2641 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
|
|
2642 }
|
|
2643
|
|
2644 discard_stmt_changes (&use_stmt);
|
|
2645 continue;
|
|
2646 }
|
|
2647
|
|
2648 /* From this point onward we are propagating into a
|
|
2649 real statement. Folding may (or may not) be possible,
|
|
2650 we may expose new operands, expose dead EH edges,
|
|
2651 etc. */
|
|
2652 /* NOTE tuples. In the tuples world, fold_stmt_inplace
|
|
2653 cannot fold a call that simplifies to a constant,
|
|
2654 because the GIMPLE_CALL must be replaced by a
|
|
2655 GIMPLE_ASSIGN, and there is no way to effect such a
|
|
2656 transformation in-place. We might want to consider
|
|
2657 using the more general fold_stmt here. */
|
|
2658 fold_stmt_inplace (use_stmt);
|
|
2659
|
|
2660 /* Sometimes propagation can expose new operands to the
|
|
2661 renamer. Note this will call update_stmt at the
|
|
2662 appropriate time. */
|
|
2663 pop_stmt_changes (&use_stmt);
|
|
2664
|
|
2665 /* Dump details. */
|
|
2666 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2667 {
|
|
2668 fprintf (dump_file, " Updated statement:");
|
|
2669 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
|
|
2670 }
|
|
2671
|
|
2672 /* If we replaced a variable index with a constant, then
|
|
2673 we would need to update the invariant flag for ADDR_EXPRs. */
|
|
2674 if (gimple_assign_single_p (use_stmt)
|
|
2675 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
|
|
2676 recompute_tree_invariant_for_addr_expr
|
|
2677 (gimple_assign_rhs1 (use_stmt));
|
|
2678
|
|
2679 /* If we cleaned up EH information from the statement,
|
|
2680 mark its containing block as needing EH cleanups. */
|
|
2681 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
|
|
2682 {
|
|
2683 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
|
|
2684 if (dump_file && (dump_flags & TDF_DETAILS))
|
|
2685 fprintf (dump_file, " Flagged to clear EH edges.\n");
|
|
2686 }
|
|
2687
|
|
2688 /* Propagation may expose new trivial copy/constant propagation
|
|
2689 opportunities. */
|
|
2690 if (gimple_assign_single_p (use_stmt)
|
|
2691 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
|
|
2692 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
|
|
2693 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
|
|
2694 {
|
|
2695 tree result = get_lhs_or_phi_result (use_stmt);
|
|
2696 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
|
|
2697 }
|
|
2698
|
|
2699 /* Propagation into these nodes may make certain edges in
|
|
2700 the CFG unexecutable. We want to identify them as PHI nodes
|
|
2701 at the destination of those unexecutable edges may become
|
|
2702 degenerates. */
|
|
2703 else if (gimple_code (use_stmt) == GIMPLE_COND
|
|
2704 || gimple_code (use_stmt) == GIMPLE_SWITCH
|
|
2705 || gimple_code (use_stmt) == GIMPLE_GOTO)
|
|
2706 {
|
|
2707 tree val;
|
|
2708
|
|
2709 if (gimple_code (use_stmt) == GIMPLE_COND)
|
|
2710 val = fold_binary (gimple_cond_code (use_stmt),
|
|
2711 boolean_type_node,
|
|
2712 gimple_cond_lhs (use_stmt),
|
|
2713 gimple_cond_rhs (use_stmt));
|
|
2714 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
|
|
2715 val = gimple_switch_index (use_stmt);
|
|
2716 else
|
|
2717 val = gimple_goto_dest (use_stmt);
|
|
2718
|
|
2719 if (val && is_gimple_min_invariant (val))
|
|
2720 {
|
|
2721 basic_block bb = gimple_bb (use_stmt);
|
|
2722 edge te = find_taken_edge (bb, val);
|
|
2723 edge_iterator ei;
|
|
2724 edge e;
|
|
2725 gimple_stmt_iterator gsi, psi;
|
|
2726
|
|
2727 /* Remove all outgoing edges except TE. */
|
|
2728 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
|
|
2729 {
|
|
2730 if (e != te)
|
|
2731 {
|
|
2732 /* Mark all the PHI nodes at the destination of
|
|
2733 the unexecutable edge as interesting. */
|
|
2734 for (psi = gsi_start_phis (e->dest);
|
|
2735 !gsi_end_p (psi);
|
|
2736 gsi_next (&psi))
|
|
2737 {
|
|
2738 gimple phi = gsi_stmt (psi);
|
|
2739
|
|
2740 tree result = gimple_phi_result (phi);
|
|
2741 int version = SSA_NAME_VERSION (result);
|
|
2742
|
|
2743 bitmap_set_bit (interesting_names, version);
|
|
2744 }
|
|
2745
|
|
2746 te->probability += e->probability;
|
|
2747
|
|
2748 te->count += e->count;
|
|
2749 remove_edge (e);
|
|
2750 cfg_altered = true;
|
|
2751 }
|
|
2752 else
|
|
2753 ei_next (&ei);
|
|
2754 }
|
|
2755
|
|
2756 gsi = gsi_last_bb (gimple_bb (use_stmt));
|
|
2757 gsi_remove (&gsi, true);
|
|
2758
|
|
2759 /* And fixup the flags on the single remaining edge. */
|
|
2760 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
|
|
2761 te->flags &= ~EDGE_ABNORMAL;
|
|
2762 te->flags |= EDGE_FALLTHRU;
|
|
2763 if (te->probability > REG_BR_PROB_BASE)
|
|
2764 te->probability = REG_BR_PROB_BASE;
|
|
2765 }
|
|
2766 }
|
|
2767 }
|
|
2768
|
|
2769 /* Ensure there is nothing else to do. */
|
|
2770 gcc_assert (!all || has_zero_uses (lhs));
|
|
2771
|
|
2772 /* If we were able to propagate away all uses of LHS, then
|
|
2773 we can remove STMT. */
|
|
2774 if (all)
|
|
2775 remove_stmt_or_phi (stmt);
|
|
2776 }
|
|
2777 }
|
|
2778
|
|
2779 /* STMT is either a PHI node (potentially a degenerate PHI node) or
|
|
2780 a statement that is a trivial copy or constant initialization.
|
|
2781
|
|
2782 Attempt to eliminate T by propagating its RHS into all uses of
|
|
2783 its LHS. This may in turn set new bits in INTERESTING_NAMES
|
|
2784 for nodes we want to revisit later.
|
|
2785
|
|
2786 All exit paths should clear INTERESTING_NAMES for the result
|
|
2787 of STMT. */
|
|
2788
|
|
2789 static void
|
|
2790 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
|
|
2791 {
|
|
2792 tree lhs = get_lhs_or_phi_result (stmt);
|
|
2793 tree rhs;
|
|
2794 int version = SSA_NAME_VERSION (lhs);
|
|
2795
|
|
2796 /* If the LHS of this statement or PHI has no uses, then we can
|
|
2797 just eliminate it. This can occur if, for example, the PHI
|
|
2798 was created by block duplication due to threading and its only
|
|
2799 use was in the conditional at the end of the block which was
|
|
2800 deleted. */
|
|
2801 if (has_zero_uses (lhs))
|
|
2802 {
|
|
2803 bitmap_clear_bit (interesting_names, version);
|
|
2804 remove_stmt_or_phi (stmt);
|
|
2805 return;
|
|
2806 }
|
|
2807
|
|
2808 /* Get the RHS of the assignment or PHI node if the PHI is a
|
|
2809 degenerate. */
|
|
2810 rhs = get_rhs_or_phi_arg (stmt);
|
|
2811 if (!rhs)
|
|
2812 {
|
|
2813 bitmap_clear_bit (interesting_names, version);
|
|
2814 return;
|
|
2815 }
|
|
2816
|
|
2817 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
|
|
2818
|
|
2819 /* Note that STMT may well have been deleted by now, so do
|
|
2820 not access it, instead use the saved version # to clear
|
|
2821 T's entry in the worklist. */
|
|
2822 bitmap_clear_bit (interesting_names, version);
|
|
2823 }
|
|
2824
|
|
2825 /* The first phase in degenerate PHI elimination.
|
|
2826
|
|
2827 Eliminate the degenerate PHIs in BB, then recurse on the
|
|
2828 dominator children of BB. */
|
|
2829
|
|
2830 static void
|
|
2831 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
|
|
2832 {
|
|
2833 gimple_stmt_iterator gsi;
|
|
2834 basic_block son;
|
|
2835
|
|
2836 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
|
|
2837 {
|
|
2838 gimple phi = gsi_stmt (gsi);
|
|
2839
|
|
2840 eliminate_const_or_copy (phi, interesting_names);
|
|
2841 }
|
|
2842
|
|
2843 /* Recurse into the dominator children of BB. */
|
|
2844 for (son = first_dom_son (CDI_DOMINATORS, bb);
|
|
2845 son;
|
|
2846 son = next_dom_son (CDI_DOMINATORS, son))
|
|
2847 eliminate_degenerate_phis_1 (son, interesting_names);
|
|
2848 }
|
|
2849
|
|
2850
|
|
2851 /* A very simple pass to eliminate degenerate PHI nodes from the
|
|
2852 IL. This is meant to be fast enough to be able to be run several
|
|
2853 times in the optimization pipeline.
|
|
2854
|
|
2855 Certain optimizations, particularly those which duplicate blocks
|
|
2856 or remove edges from the CFG can create or expose PHIs which are
|
|
2857 trivial copies or constant initializations.
|
|
2858
|
|
2859 While we could pick up these optimizations in DOM or with the
|
|
2860 combination of copy-prop and CCP, those solutions are far too
|
|
2861 heavy-weight for our needs.
|
|
2862
|
|
2863 This implementation has two phases so that we can efficiently
|
|
2864 eliminate the first order degenerate PHIs and second order
|
|
2865 degenerate PHIs.
|
|
2866
|
|
2867 The first phase performs a dominator walk to identify and eliminate
|
|
2868 the vast majority of the degenerate PHIs. When a degenerate PHI
|
|
2869 is identified and eliminated any affected statements or PHIs
|
|
2870 are put on a worklist.
|
|
2871
|
|
2872 The second phase eliminates degenerate PHIs and trivial copies
|
|
2873 or constant initializations using the worklist. This is how we
|
|
2874 pick up the secondary optimization opportunities with minimal
|
|
2875 cost. */
|
|
2876
|
|
2877 static unsigned int
|
|
2878 eliminate_degenerate_phis (void)
|
|
2879 {
|
|
2880 bitmap interesting_names;
|
|
2881 bitmap interesting_names1;
|
|
2882
|
|
2883 /* Bitmap of blocks which need EH information updated. We can not
|
|
2884 update it on-the-fly as doing so invalidates the dominator tree. */
|
|
2885 need_eh_cleanup = BITMAP_ALLOC (NULL);
|
|
2886
|
|
2887 /* INTERESTING_NAMES is effectively our worklist, indexed by
|
|
2888 SSA_NAME_VERSION.
|
|
2889
|
|
2890 A set bit indicates that the statement or PHI node which
|
|
2891 defines the SSA_NAME should be (re)examined to determine if
|
|
2892 it has become a degenerate PHI or trivial const/copy propagation
|
|
2893 opportunity.
|
|
2894
|
|
2895 Experiments have show we generally get better compilation
|
|
2896 time behavior with bitmaps rather than sbitmaps. */
|
|
2897 interesting_names = BITMAP_ALLOC (NULL);
|
|
2898 interesting_names1 = BITMAP_ALLOC (NULL);
|
|
2899
|
|
2900 calculate_dominance_info (CDI_DOMINATORS);
|
|
2901 cfg_altered = false;
|
|
2902
|
|
2903 /* First phase. Eliminate degenerate PHIs via a dominator
|
|
2904 walk of the CFG.
|
|
2905
|
|
2906 Experiments have indicated that we generally get better
|
|
2907 compile-time behavior by visiting blocks in the first
|
|
2908 phase in dominator order. Presumably this is because walking
|
|
2909 in dominator order leaves fewer PHIs for later examination
|
|
2910 by the worklist phase. */
|
|
2911 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
|
|
2912
|
|
2913 /* Second phase. Eliminate second order degenerate PHIs as well
|
|
2914 as trivial copies or constant initializations identified by
|
|
2915 the first phase or this phase. Basically we keep iterating
|
|
2916 until our set of INTERESTING_NAMEs is empty. */
|
|
2917 while (!bitmap_empty_p (interesting_names))
|
|
2918 {
|
|
2919 unsigned int i;
|
|
2920 bitmap_iterator bi;
|
|
2921
|
|
2922 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
|
|
2923 changed during the loop. Copy it to another bitmap and
|
|
2924 use that. */
|
|
2925 bitmap_copy (interesting_names1, interesting_names);
|
|
2926
|
|
2927 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
|
|
2928 {
|
|
2929 tree name = ssa_name (i);
|
|
2930
|
|
2931 /* Ignore SSA_NAMEs that have been released because
|
|
2932 their defining statement was deleted (unreachable). */
|
|
2933 if (name)
|
|
2934 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
|
|
2935 interesting_names);
|
|
2936 }
|
|
2937 }
|
|
2938
|
|
2939 if (cfg_altered)
|
|
2940 free_dominance_info (CDI_DOMINATORS);
|
|
2941
|
|
2942 /* Propagation of const and copies may make some EH edges dead. Purge
|
|
2943 such edges from the CFG as needed. */
|
|
2944 if (!bitmap_empty_p (need_eh_cleanup))
|
|
2945 {
|
|
2946 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
|
|
2947 BITMAP_FREE (need_eh_cleanup);
|
|
2948 }
|
|
2949
|
|
2950 BITMAP_FREE (interesting_names);
|
|
2951 BITMAP_FREE (interesting_names1);
|
|
2952 return 0;
|
|
2953 }
|
|
2954
|
|
2955 struct gimple_opt_pass pass_phi_only_cprop =
|
|
2956 {
|
|
2957 {
|
|
2958 GIMPLE_PASS,
|
|
2959 "phicprop", /* name */
|
|
2960 gate_dominator, /* gate */
|
|
2961 eliminate_degenerate_phis, /* execute */
|
|
2962 NULL, /* sub */
|
|
2963 NULL, /* next */
|
|
2964 0, /* static_pass_number */
|
|
2965 TV_TREE_PHI_CPROP, /* tv_id */
|
|
2966 PROP_cfg | PROP_ssa | PROP_alias, /* properties_required */
|
|
2967 0, /* properties_provided */
|
|
2968 0, /* properties_destroyed */
|
|
2969 0, /* todo_flags_start */
|
|
2970 TODO_cleanup_cfg
|
|
2971 | TODO_dump_func
|
|
2972 | TODO_ggc_collect
|
|
2973 | TODO_verify_ssa
|
|
2974 | TODO_verify_stmts
|
|
2975 | TODO_update_ssa /* todo_flags_finish */
|
|
2976 }
|
|
2977 };
|