Mercurial > hg > CbC > CbC_gcc
comparison gcc/tree-ssa-threadedge.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* SSA Jump Threading | |
2 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. | |
3 Contributed by Jeff Law <law@redhat.com> | |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify | |
8 it under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 3, or (at your option) | |
10 any later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, | |
13 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 GNU General Public License for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
24 #include "tm.h" | |
25 #include "tree.h" | |
26 #include "flags.h" | |
27 #include "rtl.h" | |
28 #include "tm_p.h" | |
29 #include "ggc.h" | |
30 #include "basic-block.h" | |
31 #include "cfgloop.h" | |
32 #include "output.h" | |
33 #include "expr.h" | |
34 #include "function.h" | |
35 #include "diagnostic.h" | |
36 #include "timevar.h" | |
37 #include "tree-dump.h" | |
38 #include "tree-flow.h" | |
39 #include "domwalk.h" | |
40 #include "real.h" | |
41 #include "tree-pass.h" | |
42 #include "tree-ssa-propagate.h" | |
43 #include "langhooks.h" | |
44 #include "params.h" | |
45 | |
46 /* To avoid code explosion due to jump threading, we limit the | |
47 number of statements we are going to copy. This variable | |
48 holds the number of statements currently seen that we'll have | |
49 to copy as part of the jump threading process. */ | |
50 static int stmt_count; | |
51 | |
52 /* Return TRUE if we may be able to thread an incoming edge into | |
53 BB to an outgoing edge from BB. Return FALSE otherwise. */ | |
54 | |
55 bool | |
56 potentially_threadable_block (basic_block bb) | |
57 { | |
58 gimple_stmt_iterator gsi; | |
59 | |
60 /* If BB has a single successor or a single predecessor, then | |
61 there is no threading opportunity. */ | |
62 if (single_succ_p (bb) || single_pred_p (bb)) | |
63 return false; | |
64 | |
65 /* If BB does not end with a conditional, switch or computed goto, | |
66 then there is no threading opportunity. */ | |
67 gsi = gsi_last_bb (bb); | |
68 if (gsi_end_p (gsi) | |
69 || ! gsi_stmt (gsi) | |
70 || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND | |
71 && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO | |
72 && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH)) | |
73 return false; | |
74 | |
75 return true; | |
76 } | |
77 | |
78 /* Return the LHS of any ASSERT_EXPR where OP appears as the first | |
79 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates | |
80 BB. If no such ASSERT_EXPR is found, return OP. */ | |
81 | |
82 static tree | |
83 lhs_of_dominating_assert (tree op, basic_block bb, gimple stmt) | |
84 { | |
85 imm_use_iterator imm_iter; | |
86 gimple use_stmt; | |
87 use_operand_p use_p; | |
88 | |
89 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op) | |
90 { | |
91 use_stmt = USE_STMT (use_p); | |
92 if (use_stmt != stmt | |
93 && gimple_assign_single_p (use_stmt) | |
94 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR | |
95 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op | |
96 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt))) | |
97 { | |
98 return gimple_assign_lhs (use_stmt); | |
99 } | |
100 } | |
101 return op; | |
102 } | |
103 | |
104 /* We record temporary equivalences created by PHI nodes or | |
105 statements within the target block. Doing so allows us to | |
106 identify more jump threading opportunities, even in blocks | |
107 with side effects. | |
108 | |
109 We keep track of those temporary equivalences in a stack | |
110 structure so that we can unwind them when we're done processing | |
111 a particular edge. This routine handles unwinding the data | |
112 structures. */ | |
113 | |
114 static void | |
115 remove_temporary_equivalences (VEC(tree, heap) **stack) | |
116 { | |
117 while (VEC_length (tree, *stack) > 0) | |
118 { | |
119 tree prev_value, dest; | |
120 | |
121 dest = VEC_pop (tree, *stack); | |
122 | |
123 /* A NULL value indicates we should stop unwinding, otherwise | |
124 pop off the next entry as they're recorded in pairs. */ | |
125 if (dest == NULL) | |
126 break; | |
127 | |
128 prev_value = VEC_pop (tree, *stack); | |
129 SSA_NAME_VALUE (dest) = prev_value; | |
130 } | |
131 } | |
132 | |
133 /* Record a temporary equivalence, saving enough information so that | |
134 we can restore the state of recorded equivalences when we're | |
135 done processing the current edge. */ | |
136 | |
137 static void | |
138 record_temporary_equivalence (tree x, tree y, VEC(tree, heap) **stack) | |
139 { | |
140 tree prev_x = SSA_NAME_VALUE (x); | |
141 | |
142 if (TREE_CODE (y) == SSA_NAME) | |
143 { | |
144 tree tmp = SSA_NAME_VALUE (y); | |
145 y = tmp ? tmp : y; | |
146 } | |
147 | |
148 SSA_NAME_VALUE (x) = y; | |
149 VEC_reserve (tree, heap, *stack, 2); | |
150 VEC_quick_push (tree, *stack, prev_x); | |
151 VEC_quick_push (tree, *stack, x); | |
152 } | |
153 | |
154 /* Record temporary equivalences created by PHIs at the target of the | |
155 edge E. Record unwind information for the equivalences onto STACK. | |
156 | |
157 If a PHI which prevents threading is encountered, then return FALSE | |
158 indicating we should not thread this edge, else return TRUE. */ | |
159 | |
160 static bool | |
161 record_temporary_equivalences_from_phis (edge e, VEC(tree, heap) **stack) | |
162 { | |
163 gimple_stmt_iterator gsi; | |
164 | |
165 /* Each PHI creates a temporary equivalence, record them. | |
166 These are context sensitive equivalences and will be removed | |
167 later. */ | |
168 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
169 { | |
170 gimple phi = gsi_stmt (gsi); | |
171 tree src = PHI_ARG_DEF_FROM_EDGE (phi, e); | |
172 tree dst = gimple_phi_result (phi); | |
173 | |
174 /* If the desired argument is not the same as this PHI's result | |
175 and it is set by a PHI in E->dest, then we can not thread | |
176 through E->dest. */ | |
177 if (src != dst | |
178 && TREE_CODE (src) == SSA_NAME | |
179 && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI | |
180 && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest) | |
181 return false; | |
182 | |
183 /* We consider any non-virtual PHI as a statement since it | |
184 count result in a constant assignment or copy operation. */ | |
185 if (is_gimple_reg (dst)) | |
186 stmt_count++; | |
187 | |
188 record_temporary_equivalence (dst, src, stack); | |
189 } | |
190 return true; | |
191 } | |
192 | |
193 /* Fold the RHS of an assignment statement and return it as a tree. | |
194 May return NULL_TREE if no simplification is possible. */ | |
195 | |
196 static tree | |
197 fold_assignment_stmt (gimple stmt) | |
198 { | |
199 enum tree_code subcode = gimple_assign_rhs_code (stmt); | |
200 | |
201 switch (get_gimple_rhs_class (subcode)) | |
202 { | |
203 case GIMPLE_SINGLE_RHS: | |
204 { | |
205 tree rhs = gimple_assign_rhs1 (stmt); | |
206 | |
207 if (TREE_CODE (rhs) == COND_EXPR) | |
208 { | |
209 /* Sadly, we have to handle conditional assignments specially | |
210 here, because fold expects all the operands of an expression | |
211 to be folded before the expression itself is folded, but we | |
212 can't just substitute the folded condition here. */ | |
213 tree cond = fold (COND_EXPR_COND (rhs)); | |
214 if (cond == boolean_true_node) | |
215 rhs = COND_EXPR_THEN (rhs); | |
216 else if (cond == boolean_false_node) | |
217 rhs = COND_EXPR_ELSE (rhs); | |
218 } | |
219 | |
220 return fold (rhs); | |
221 } | |
222 break; | |
223 case GIMPLE_UNARY_RHS: | |
224 { | |
225 tree lhs = gimple_assign_lhs (stmt); | |
226 tree op0 = gimple_assign_rhs1 (stmt); | |
227 return fold_unary (subcode, TREE_TYPE (lhs), op0); | |
228 } | |
229 break; | |
230 case GIMPLE_BINARY_RHS: | |
231 { | |
232 tree lhs = gimple_assign_lhs (stmt); | |
233 tree op0 = gimple_assign_rhs1 (stmt); | |
234 tree op1 = gimple_assign_rhs2 (stmt); | |
235 return fold_binary (subcode, TREE_TYPE (lhs), op0, op1); | |
236 } | |
237 break; | |
238 default: | |
239 gcc_unreachable (); | |
240 } | |
241 } | |
242 | |
243 /* Try to simplify each statement in E->dest, ultimately leading to | |
244 a simplification of the COND_EXPR at the end of E->dest. | |
245 | |
246 Record unwind information for temporary equivalences onto STACK. | |
247 | |
248 Use SIMPLIFY (a pointer to a callback function) to further simplify | |
249 statements using pass specific information. | |
250 | |
251 We might consider marking just those statements which ultimately | |
252 feed the COND_EXPR. It's not clear if the overhead of bookkeeping | |
253 would be recovered by trying to simplify fewer statements. | |
254 | |
255 If we are able to simplify a statement into the form | |
256 SSA_NAME = (SSA_NAME | gimple invariant), then we can record | |
257 a context sensitive equivalence which may help us simplify | |
258 later statements in E->dest. */ | |
259 | |
260 static gimple | |
261 record_temporary_equivalences_from_stmts_at_dest (edge e, | |
262 VEC(tree, heap) **stack, | |
263 tree (*simplify) (gimple, | |
264 gimple)) | |
265 { | |
266 gimple stmt = NULL; | |
267 gimple_stmt_iterator gsi; | |
268 int max_stmt_count; | |
269 | |
270 max_stmt_count = PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS); | |
271 | |
272 /* Walk through each statement in the block recording equivalences | |
273 we discover. Note any equivalences we discover are context | |
274 sensitive (ie, are dependent on traversing E) and must be unwound | |
275 when we're finished processing E. */ | |
276 for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
277 { | |
278 tree cached_lhs = NULL; | |
279 | |
280 stmt = gsi_stmt (gsi); | |
281 | |
282 /* Ignore empty statements and labels. */ | |
283 if (gimple_code (stmt) == GIMPLE_NOP || gimple_code (stmt) == GIMPLE_LABEL) | |
284 continue; | |
285 | |
286 /* If the statement has volatile operands, then we assume we | |
287 can not thread through this block. This is overly | |
288 conservative in some ways. */ | |
289 if (gimple_code (stmt) == GIMPLE_ASM && gimple_asm_volatile_p (stmt)) | |
290 return NULL; | |
291 | |
292 /* If duplicating this block is going to cause too much code | |
293 expansion, then do not thread through this block. */ | |
294 stmt_count++; | |
295 if (stmt_count > max_stmt_count) | |
296 return NULL; | |
297 | |
298 /* If this is not a statement that sets an SSA_NAME to a new | |
299 value, then do not try to simplify this statement as it will | |
300 not simplify in any way that is helpful for jump threading. */ | |
301 if ((gimple_code (stmt) != GIMPLE_ASSIGN | |
302 || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) | |
303 && (gimple_code (stmt) != GIMPLE_CALL | |
304 || gimple_call_lhs (stmt) == NULL_TREE | |
305 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)) | |
306 continue; | |
307 | |
308 /* The result of __builtin_object_size depends on all the arguments | |
309 of a phi node. Temporarily using only one edge produces invalid | |
310 results. For example | |
311 | |
312 if (x < 6) | |
313 goto l; | |
314 else | |
315 goto l; | |
316 | |
317 l: | |
318 r = PHI <&w[2].a[1](2), &a.a[6](3)> | |
319 __builtin_object_size (r, 0) | |
320 | |
321 The result of __builtin_object_size is defined to be the maximum of | |
322 remaining bytes. If we use only one edge on the phi, the result will | |
323 change to be the remaining bytes for the corresponding phi argument. | |
324 | |
325 Similarly for __builtin_constant_p: | |
326 | |
327 r = PHI <1(2), 2(3)> | |
328 __builtin_constant_p (r) | |
329 | |
330 Both PHI arguments are constant, but x ? 1 : 2 is still not | |
331 constant. */ | |
332 | |
333 if (is_gimple_call (stmt)) | |
334 { | |
335 tree fndecl = gimple_call_fndecl (stmt); | |
336 if (fndecl | |
337 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE | |
338 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P)) | |
339 continue; | |
340 } | |
341 | |
342 /* At this point we have a statement which assigns an RHS to an | |
343 SSA_VAR on the LHS. We want to try and simplify this statement | |
344 to expose more context sensitive equivalences which in turn may | |
345 allow us to simplify the condition at the end of the loop. | |
346 | |
347 Handle simple copy operations as well as implied copies from | |
348 ASSERT_EXPRs. */ | |
349 if (gimple_assign_single_p (stmt) | |
350 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) | |
351 cached_lhs = gimple_assign_rhs1 (stmt); | |
352 else if (gimple_assign_single_p (stmt) | |
353 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR) | |
354 cached_lhs = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); | |
355 else | |
356 { | |
357 /* A statement that is not a trivial copy or ASSERT_EXPR. | |
358 We're going to temporarily copy propagate the operands | |
359 and see if that allows us to simplify this statement. */ | |
360 tree *copy; | |
361 ssa_op_iter iter; | |
362 use_operand_p use_p; | |
363 unsigned int num, i = 0; | |
364 | |
365 num = NUM_SSA_OPERANDS (stmt, (SSA_OP_USE | SSA_OP_VUSE)); | |
366 copy = XCNEWVEC (tree, num); | |
367 | |
368 /* Make a copy of the uses & vuses into USES_COPY, then cprop into | |
369 the operands. */ | |
370 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE) | |
371 { | |
372 tree tmp = NULL; | |
373 tree use = USE_FROM_PTR (use_p); | |
374 | |
375 copy[i++] = use; | |
376 if (TREE_CODE (use) == SSA_NAME) | |
377 tmp = SSA_NAME_VALUE (use); | |
378 if (tmp) | |
379 SET_USE (use_p, tmp); | |
380 } | |
381 | |
382 /* Try to fold/lookup the new expression. Inserting the | |
383 expression into the hash table is unlikely to help. */ | |
384 if (is_gimple_call (stmt)) | |
385 cached_lhs = fold_call_stmt (stmt, false); | |
386 else | |
387 cached_lhs = fold_assignment_stmt (stmt); | |
388 | |
389 if (!cached_lhs | |
390 || (TREE_CODE (cached_lhs) != SSA_NAME | |
391 && !is_gimple_min_invariant (cached_lhs))) | |
392 cached_lhs = (*simplify) (stmt, stmt); | |
393 | |
394 /* Restore the statement's original uses/defs. */ | |
395 i = 0; | |
396 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE | SSA_OP_VUSE) | |
397 SET_USE (use_p, copy[i++]); | |
398 | |
399 free (copy); | |
400 } | |
401 | |
402 /* Record the context sensitive equivalence if we were able | |
403 to simplify this statement. */ | |
404 if (cached_lhs | |
405 && (TREE_CODE (cached_lhs) == SSA_NAME | |
406 || is_gimple_min_invariant (cached_lhs))) | |
407 record_temporary_equivalence (gimple_get_lhs (stmt), cached_lhs, stack); | |
408 } | |
409 return stmt; | |
410 } | |
411 | |
412 /* Simplify the control statement at the end of the block E->dest. | |
413 | |
414 To avoid allocating memory unnecessarily, a scratch GIMPLE_COND | |
415 is available to use/clobber in DUMMY_COND. | |
416 | |
417 Use SIMPLIFY (a pointer to a callback function) to further simplify | |
418 a condition using pass specific information. | |
419 | |
420 Return the simplified condition or NULL if simplification could | |
421 not be performed. */ | |
422 | |
423 static tree | |
424 simplify_control_stmt_condition (edge e, | |
425 gimple stmt, | |
426 gimple dummy_cond, | |
427 tree (*simplify) (gimple, gimple), | |
428 bool handle_dominating_asserts) | |
429 { | |
430 tree cond, cached_lhs; | |
431 enum gimple_code code = gimple_code (stmt); | |
432 | |
433 /* For comparisons, we have to update both operands, then try | |
434 to simplify the comparison. */ | |
435 if (code == GIMPLE_COND) | |
436 { | |
437 tree op0, op1; | |
438 enum tree_code cond_code; | |
439 | |
440 op0 = gimple_cond_lhs (stmt); | |
441 op1 = gimple_cond_rhs (stmt); | |
442 cond_code = gimple_cond_code (stmt); | |
443 | |
444 /* Get the current value of both operands. */ | |
445 if (TREE_CODE (op0) == SSA_NAME) | |
446 { | |
447 tree tmp = SSA_NAME_VALUE (op0); | |
448 if (tmp) | |
449 op0 = tmp; | |
450 } | |
451 | |
452 if (TREE_CODE (op1) == SSA_NAME) | |
453 { | |
454 tree tmp = SSA_NAME_VALUE (op1); | |
455 if (tmp) | |
456 op1 = tmp; | |
457 } | |
458 | |
459 if (handle_dominating_asserts) | |
460 { | |
461 /* Now see if the operand was consumed by an ASSERT_EXPR | |
462 which dominates E->src. If so, we want to replace the | |
463 operand with the LHS of the ASSERT_EXPR. */ | |
464 if (TREE_CODE (op0) == SSA_NAME) | |
465 op0 = lhs_of_dominating_assert (op0, e->src, stmt); | |
466 | |
467 if (TREE_CODE (op1) == SSA_NAME) | |
468 op1 = lhs_of_dominating_assert (op1, e->src, stmt); | |
469 } | |
470 | |
471 /* We may need to canonicalize the comparison. For | |
472 example, op0 might be a constant while op1 is an | |
473 SSA_NAME. Failure to canonicalize will cause us to | |
474 miss threading opportunities. */ | |
475 if (tree_swap_operands_p (op0, op1, false)) | |
476 { | |
477 tree tmp; | |
478 cond_code = swap_tree_comparison (cond_code); | |
479 tmp = op0; | |
480 op0 = op1; | |
481 op1 = tmp; | |
482 } | |
483 | |
484 /* Stuff the operator and operands into our dummy conditional | |
485 expression. */ | |
486 gimple_cond_set_code (dummy_cond, cond_code); | |
487 gimple_cond_set_lhs (dummy_cond, op0); | |
488 gimple_cond_set_rhs (dummy_cond, op1); | |
489 | |
490 /* We absolutely do not care about any type conversions | |
491 we only care about a zero/nonzero value. */ | |
492 fold_defer_overflow_warnings (); | |
493 | |
494 cached_lhs = fold_binary (cond_code, boolean_type_node, op0, op1); | |
495 if (cached_lhs) | |
496 while (CONVERT_EXPR_P (cached_lhs)) | |
497 cached_lhs = TREE_OPERAND (cached_lhs, 0); | |
498 | |
499 fold_undefer_overflow_warnings ((cached_lhs | |
500 && is_gimple_min_invariant (cached_lhs)), | |
501 stmt, WARN_STRICT_OVERFLOW_CONDITIONAL); | |
502 | |
503 /* If we have not simplified the condition down to an invariant, | |
504 then use the pass specific callback to simplify the condition. */ | |
505 if (!cached_lhs | |
506 || !is_gimple_min_invariant (cached_lhs)) | |
507 cached_lhs = (*simplify) (dummy_cond, stmt); | |
508 | |
509 return cached_lhs; | |
510 } | |
511 | |
512 if (code == GIMPLE_SWITCH) | |
513 cond = gimple_switch_index (stmt); | |
514 else if (code == GIMPLE_GOTO) | |
515 cond = gimple_goto_dest (stmt); | |
516 else | |
517 gcc_unreachable (); | |
518 | |
519 /* We can have conditionals which just test the state of a variable | |
520 rather than use a relational operator. These are simpler to handle. */ | |
521 if (TREE_CODE (cond) == SSA_NAME) | |
522 { | |
523 cached_lhs = cond; | |
524 | |
525 /* Get the variable's current value from the equivalence chains. | |
526 | |
527 It is possible to get loops in the SSA_NAME_VALUE chains | |
528 (consider threading the backedge of a loop where we have | |
529 a loop invariant SSA_NAME used in the condition. */ | |
530 if (cached_lhs | |
531 && TREE_CODE (cached_lhs) == SSA_NAME | |
532 && SSA_NAME_VALUE (cached_lhs)) | |
533 cached_lhs = SSA_NAME_VALUE (cached_lhs); | |
534 | |
535 /* If we're dominated by a suitable ASSERT_EXPR, then | |
536 update CACHED_LHS appropriately. */ | |
537 if (handle_dominating_asserts && TREE_CODE (cached_lhs) == SSA_NAME) | |
538 cached_lhs = lhs_of_dominating_assert (cached_lhs, e->src, stmt); | |
539 | |
540 /* If we haven't simplified to an invariant yet, then use the | |
541 pass specific callback to try and simplify it further. */ | |
542 if (cached_lhs && ! is_gimple_min_invariant (cached_lhs)) | |
543 cached_lhs = (*simplify) (stmt, stmt); | |
544 } | |
545 else | |
546 cached_lhs = NULL; | |
547 | |
548 return cached_lhs; | |
549 } | |
550 | |
551 /* We are exiting E->src, see if E->dest ends with a conditional | |
552 jump which has a known value when reached via E. | |
553 | |
554 Special care is necessary if E is a back edge in the CFG as we | |
555 may have already recorded equivalences for E->dest into our | |
556 various tables, including the result of the conditional at | |
557 the end of E->dest. Threading opportunities are severely | |
558 limited in that case to avoid short-circuiting the loop | |
559 incorrectly. | |
560 | |
561 Note it is quite common for the first block inside a loop to | |
562 end with a conditional which is either always true or always | |
563 false when reached via the loop backedge. Thus we do not want | |
564 to blindly disable threading across a loop backedge. | |
565 | |
566 DUMMY_COND is a shared cond_expr used by condition simplification as scratch, | |
567 to avoid allocating memory. | |
568 | |
569 HANDLE_DOMINATING_ASSERTS is true if we should try to replace operands of | |
570 the simplified condition with left-hand sides of ASSERT_EXPRs they are | |
571 used in. | |
572 | |
573 STACK is used to undo temporary equivalences created during the walk of | |
574 E->dest. | |
575 | |
576 SIMPLIFY is a pass-specific function used to simplify statements. */ | |
577 | |
578 void | |
579 thread_across_edge (gimple dummy_cond, | |
580 edge e, | |
581 bool handle_dominating_asserts, | |
582 VEC(tree, heap) **stack, | |
583 tree (*simplify) (gimple, gimple)) | |
584 { | |
585 gimple stmt; | |
586 | |
587 /* If E is a backedge, then we want to verify that the COND_EXPR, | |
588 SWITCH_EXPR or GOTO_EXPR at the end of e->dest is not affected | |
589 by any statements in e->dest. If it is affected, then it is not | |
590 safe to thread this edge. */ | |
591 if (e->flags & EDGE_DFS_BACK) | |
592 { | |
593 ssa_op_iter iter; | |
594 use_operand_p use_p; | |
595 gimple last = gsi_stmt (gsi_last_bb (e->dest)); | |
596 | |
597 FOR_EACH_SSA_USE_OPERAND (use_p, last, iter, SSA_OP_USE | SSA_OP_VUSE) | |
598 { | |
599 tree use = USE_FROM_PTR (use_p); | |
600 | |
601 if (TREE_CODE (use) == SSA_NAME | |
602 && gimple_code (SSA_NAME_DEF_STMT (use)) != GIMPLE_PHI | |
603 && gimple_bb (SSA_NAME_DEF_STMT (use)) == e->dest) | |
604 goto fail; | |
605 } | |
606 } | |
607 | |
608 stmt_count = 0; | |
609 | |
610 /* PHIs create temporary equivalences. */ | |
611 if (!record_temporary_equivalences_from_phis (e, stack)) | |
612 goto fail; | |
613 | |
614 /* Now walk each statement recording any context sensitive | |
615 temporary equivalences we can detect. */ | |
616 stmt = record_temporary_equivalences_from_stmts_at_dest (e, stack, simplify); | |
617 if (!stmt) | |
618 goto fail; | |
619 | |
620 /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm | |
621 will be taken. */ | |
622 if (gimple_code (stmt) == GIMPLE_COND | |
623 || gimple_code (stmt) == GIMPLE_GOTO | |
624 || gimple_code (stmt) == GIMPLE_SWITCH) | |
625 { | |
626 tree cond; | |
627 | |
628 /* Extract and simplify the condition. */ | |
629 cond = simplify_control_stmt_condition (e, stmt, dummy_cond, simplify, handle_dominating_asserts); | |
630 | |
631 if (cond && is_gimple_min_invariant (cond)) | |
632 { | |
633 edge taken_edge = find_taken_edge (e->dest, cond); | |
634 basic_block dest = (taken_edge ? taken_edge->dest : NULL); | |
635 | |
636 if (dest == e->dest) | |
637 goto fail; | |
638 | |
639 remove_temporary_equivalences (stack); | |
640 register_jump_thread (e, taken_edge); | |
641 } | |
642 } | |
643 | |
644 fail: | |
645 remove_temporary_equivalences (stack); | |
646 } |