Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-ssa-threadupdate.c @ 67:f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
author | nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 22 Mar 2011 17:18:12 +0900 |
parents | b7f97abdc517 |
children | 04ced10e8804 |
rev | line source |
---|---|
0 | 1 /* Thread edges through blocks and update the control flow and SSA graphs. |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010 Free Software Foundation, |
0 | 3 Inc. |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify | |
8 it under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 3, or (at your option) | |
10 any later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, | |
13 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 GNU General Public License for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
24 #include "tm.h" | |
25 #include "tree.h" | |
26 #include "flags.h" | |
27 #include "tm_p.h" | |
28 #include "basic-block.h" | |
29 #include "output.h" | |
30 #include "function.h" | |
31 #include "tree-flow.h" | |
32 #include "tree-dump.h" | |
33 #include "tree-pass.h" | |
34 #include "cfgloop.h" | |
35 | |
36 /* Given a block B, update the CFG and SSA graph to reflect redirecting | |
37 one or more in-edges to B to instead reach the destination of an | |
38 out-edge from B while preserving any side effects in B. | |
39 | |
40 i.e., given A->B and B->C, change A->B to be A->C yet still preserve the | |
41 side effects of executing B. | |
42 | |
43 1. Make a copy of B (including its outgoing edges and statements). Call | |
44 the copy B'. Note B' has no incoming edges or PHIs at this time. | |
45 | |
46 2. Remove the control statement at the end of B' and all outgoing edges | |
47 except B'->C. | |
48 | |
49 3. Add a new argument to each PHI in C with the same value as the existing | |
50 argument associated with edge B->C. Associate the new PHI arguments | |
51 with the edge B'->C. | |
52 | |
53 4. For each PHI in B, find or create a PHI in B' with an identical | |
54 PHI_RESULT. Add an argument to the PHI in B' which has the same | |
55 value as the PHI in B associated with the edge A->B. Associate | |
56 the new argument in the PHI in B' with the edge A->B. | |
57 | |
58 5. Change the edge A->B to A->B'. | |
59 | |
60 5a. This automatically deletes any PHI arguments associated with the | |
61 edge A->B in B. | |
62 | |
63 5b. This automatically associates each new argument added in step 4 | |
64 with the edge A->B'. | |
65 | |
66 6. Repeat for other incoming edges into B. | |
67 | |
68 7. Put the duplicated resources in B and all the B' blocks into SSA form. | |
69 | |
70 Note that block duplication can be minimized by first collecting the | |
71 set of unique destination blocks that the incoming edges should | |
72 be threaded to. Block duplication can be further minimized by using | |
73 B instead of creating B' for one destination if all edges into B are | |
74 going to be threaded to a successor of B. | |
75 | |
76 We further reduce the number of edges and statements we create by | |
77 not copying all the outgoing edges and the control statement in | |
78 step #1. We instead create a template block without the outgoing | |
79 edges and duplicate the template. */ | |
80 | |
81 | |
82 /* Steps #5 and #6 of the above algorithm are best implemented by walking | |
83 all the incoming edges which thread to the same destination edge at | |
84 the same time. That avoids lots of table lookups to get information | |
85 for the destination edge. | |
86 | |
87 To realize that implementation we create a list of incoming edges | |
88 which thread to the same outgoing edge. Thus to implement steps | |
89 #5 and #6 we traverse our hash table of outgoing edge information. | |
90 For each entry we walk the list of incoming edges which thread to | |
91 the current outgoing edge. */ | |
92 | |
93 struct el | |
94 { | |
95 edge e; | |
96 struct el *next; | |
97 }; | |
98 | |
99 /* Main data structure recording information regarding B's duplicate | |
100 blocks. */ | |
101 | |
102 /* We need to efficiently record the unique thread destinations of this | |
103 block and specific information associated with those destinations. We | |
104 may have many incoming edges threaded to the same outgoing edge. This | |
105 can be naturally implemented with a hash table. */ | |
106 | |
107 struct redirection_data | |
108 { | |
109 /* A duplicate of B with the trailing control statement removed and which | |
110 targets a single successor of B. */ | |
111 basic_block dup_block; | |
112 | |
113 /* An outgoing edge from B. DUP_BLOCK will have OUTGOING_EDGE->dest as | |
114 its single successor. */ | |
115 edge outgoing_edge; | |
116 | |
117 /* A list of incoming edges which we want to thread to | |
118 OUTGOING_EDGE->dest. */ | |
119 struct el *incoming_edges; | |
120 | |
121 /* Flag indicating whether or not we should create a duplicate block | |
122 for this thread destination. This is only true if we are threading | |
123 all incoming edges and thus are using BB itself as a duplicate block. */ | |
124 bool do_not_duplicate; | |
125 }; | |
126 | |
127 /* Main data structure to hold information for duplicates of BB. */ | |
128 static htab_t redirection_data; | |
129 | |
130 /* Data structure of information to pass to hash table traversal routines. */ | |
131 struct local_info | |
132 { | |
133 /* The current block we are working on. */ | |
134 basic_block bb; | |
135 | |
136 /* A template copy of BB with no outgoing edges or control statement that | |
137 we use for creating copies. */ | |
138 basic_block template_block; | |
139 | |
140 /* TRUE if we thread one or more jumps, FALSE otherwise. */ | |
141 bool jumps_threaded; | |
142 }; | |
143 | |
144 /* Passes which use the jump threading code register jump threading | |
145 opportunities as they are discovered. We keep the registered | |
146 jump threading opportunities in this vector as edge pairs | |
147 (original_edge, target_edge). */ | |
148 static VEC(edge,heap) *threaded_edges; | |
149 | |
150 | |
151 /* Jump threading statistics. */ | |
152 | |
153 struct thread_stats_d | |
154 { | |
155 unsigned long num_threaded_edges; | |
156 }; | |
157 | |
158 struct thread_stats_d thread_stats; | |
159 | |
160 | |
161 /* Remove the last statement in block BB if it is a control statement | |
162 Also remove all outgoing edges except the edge which reaches DEST_BB. | |
163 If DEST_BB is NULL, then remove all outgoing edges. */ | |
164 | |
165 static void | |
166 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb) | |
167 { | |
168 gimple_stmt_iterator gsi; | |
169 edge e; | |
170 edge_iterator ei; | |
171 | |
172 gsi = gsi_last_bb (bb); | |
173 | |
174 /* If the duplicate ends with a control statement, then remove it. | |
175 | |
176 Note that if we are duplicating the template block rather than the | |
177 original basic block, then the duplicate might not have any real | |
178 statements in it. */ | |
179 if (!gsi_end_p (gsi) | |
180 && gsi_stmt (gsi) | |
181 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND | |
182 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO | |
183 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH)) | |
184 gsi_remove (&gsi, true); | |
185 | |
186 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) | |
187 { | |
188 if (e->dest != dest_bb) | |
189 remove_edge (e); | |
190 else | |
191 ei_next (&ei); | |
192 } | |
193 } | |
194 | |
195 /* Create a duplicate of BB which only reaches the destination of the edge | |
196 stored in RD. Record the duplicate block in RD. */ | |
197 | |
198 static void | |
199 create_block_for_threading (basic_block bb, struct redirection_data *rd) | |
200 { | |
201 /* We can use the generic block duplication code and simply remove | |
202 the stuff we do not need. */ | |
203 rd->dup_block = duplicate_block (bb, NULL, NULL); | |
204 | |
205 /* Zero out the profile, since the block is unreachable for now. */ | |
206 rd->dup_block->frequency = 0; | |
207 rd->dup_block->count = 0; | |
208 | |
209 /* The call to duplicate_block will copy everything, including the | |
210 useless COND_EXPR or SWITCH_EXPR at the end of BB. We just remove | |
211 the useless COND_EXPR or SWITCH_EXPR here rather than having a | |
212 specialized block copier. We also remove all outgoing edges | |
213 from the duplicate block. The appropriate edge will be created | |
214 later. */ | |
215 remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL); | |
216 } | |
217 | |
218 /* Hashing and equality routines for our hash table. */ | |
219 static hashval_t | |
220 redirection_data_hash (const void *p) | |
221 { | |
222 edge e = ((const struct redirection_data *)p)->outgoing_edge; | |
223 return e->dest->index; | |
224 } | |
225 | |
226 static int | |
227 redirection_data_eq (const void *p1, const void *p2) | |
228 { | |
229 edge e1 = ((const struct redirection_data *)p1)->outgoing_edge; | |
230 edge e2 = ((const struct redirection_data *)p2)->outgoing_edge; | |
231 | |
232 return e1 == e2; | |
233 } | |
234 | |
235 /* Given an outgoing edge E lookup and return its entry in our hash table. | |
236 | |
237 If INSERT is true, then we insert the entry into the hash table if | |
238 it is not already present. INCOMING_EDGE is added to the list of incoming | |
239 edges associated with E in the hash table. */ | |
240 | |
241 static struct redirection_data * | |
242 lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert) | |
243 { | |
244 void **slot; | |
245 struct redirection_data *elt; | |
246 | |
247 /* Build a hash table element so we can see if E is already | |
248 in the table. */ | |
249 elt = XNEW (struct redirection_data); | |
250 elt->outgoing_edge = e; | |
251 elt->dup_block = NULL; | |
252 elt->do_not_duplicate = false; | |
253 elt->incoming_edges = NULL; | |
254 | |
255 slot = htab_find_slot (redirection_data, elt, insert); | |
256 | |
257 /* This will only happen if INSERT is false and the entry is not | |
258 in the hash table. */ | |
259 if (slot == NULL) | |
260 { | |
261 free (elt); | |
262 return NULL; | |
263 } | |
264 | |
265 /* This will only happen if E was not in the hash table and | |
266 INSERT is true. */ | |
267 if (*slot == NULL) | |
268 { | |
269 *slot = (void *)elt; | |
270 elt->incoming_edges = XNEW (struct el); | |
271 elt->incoming_edges->e = incoming_edge; | |
272 elt->incoming_edges->next = NULL; | |
273 return elt; | |
274 } | |
275 /* E was in the hash table. */ | |
276 else | |
277 { | |
278 /* Free ELT as we do not need it anymore, we will extract the | |
279 relevant entry from the hash table itself. */ | |
280 free (elt); | |
281 | |
282 /* Get the entry stored in the hash table. */ | |
283 elt = (struct redirection_data *) *slot; | |
284 | |
285 /* If insertion was requested, then we need to add INCOMING_EDGE | |
286 to the list of incoming edges associated with E. */ | |
287 if (insert) | |
288 { | |
289 struct el *el = XNEW (struct el); | |
290 el->next = elt->incoming_edges; | |
291 el->e = incoming_edge; | |
292 elt->incoming_edges = el; | |
293 } | |
294 | |
295 return elt; | |
296 } | |
297 } | |
298 | |
299 /* Given a duplicate block and its single destination (both stored | |
300 in RD). Create an edge between the duplicate and its single | |
301 destination. | |
302 | |
303 Add an additional argument to any PHI nodes at the single | |
304 destination. */ | |
305 | |
306 static void | |
307 create_edge_and_update_destination_phis (struct redirection_data *rd) | |
308 { | |
309 edge e = make_edge (rd->dup_block, rd->outgoing_edge->dest, EDGE_FALLTHRU); | |
310 gimple_stmt_iterator gsi; | |
311 | |
312 rescan_loop_exit (e, true, false); | |
313 e->probability = REG_BR_PROB_BASE; | |
314 e->count = rd->dup_block->count; | |
315 e->aux = rd->outgoing_edge->aux; | |
316 | |
317 /* If there are any PHI nodes at the destination of the outgoing edge | |
318 from the duplicate block, then we will need to add a new argument | |
319 to them. The argument should have the same value as the argument | |
320 associated with the outgoing edge stored in RD. */ | |
321 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi)) | |
322 { | |
323 gimple phi = gsi_stmt (gsi); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
324 source_location locus; |
0 | 325 int indx = rd->outgoing_edge->dest_idx; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
326 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
327 locus = gimple_phi_arg_location (phi, indx); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
328 add_phi_arg (phi, gimple_phi_arg_def (phi, indx), e, locus); |
0 | 329 } |
330 } | |
331 | |
332 /* Hash table traversal callback routine to create duplicate blocks. */ | |
333 | |
334 static int | |
335 create_duplicates (void **slot, void *data) | |
336 { | |
337 struct redirection_data *rd = (struct redirection_data *) *slot; | |
338 struct local_info *local_info = (struct local_info *)data; | |
339 | |
340 /* If this entry should not have a duplicate created, then there's | |
341 nothing to do. */ | |
342 if (rd->do_not_duplicate) | |
343 return 1; | |
344 | |
345 /* Create a template block if we have not done so already. Otherwise | |
346 use the template to create a new block. */ | |
347 if (local_info->template_block == NULL) | |
348 { | |
349 create_block_for_threading (local_info->bb, rd); | |
350 local_info->template_block = rd->dup_block; | |
351 | |
352 /* We do not create any outgoing edges for the template. We will | |
353 take care of that in a later traversal. That way we do not | |
354 create edges that are going to just be deleted. */ | |
355 } | |
356 else | |
357 { | |
358 create_block_for_threading (local_info->template_block, rd); | |
359 | |
360 /* Go ahead and wire up outgoing edges and update PHIs for the duplicate | |
361 block. */ | |
362 create_edge_and_update_destination_phis (rd); | |
363 } | |
364 | |
365 /* Keep walking the hash table. */ | |
366 return 1; | |
367 } | |
368 | |
369 /* We did not create any outgoing edges for the template block during | |
370 block creation. This hash table traversal callback creates the | |
371 outgoing edge for the template block. */ | |
372 | |
373 static int | |
374 fixup_template_block (void **slot, void *data) | |
375 { | |
376 struct redirection_data *rd = (struct redirection_data *) *slot; | |
377 struct local_info *local_info = (struct local_info *)data; | |
378 | |
379 /* If this is the template block, then create its outgoing edges | |
380 and halt the hash table traversal. */ | |
381 if (rd->dup_block && rd->dup_block == local_info->template_block) | |
382 { | |
383 create_edge_and_update_destination_phis (rd); | |
384 return 0; | |
385 } | |
386 | |
387 return 1; | |
388 } | |
389 | |
390 /* Hash table traversal callback to redirect each incoming edge | |
391 associated with this hash table element to its new destination. */ | |
392 | |
393 static int | |
394 redirect_edges (void **slot, void *data) | |
395 { | |
396 struct redirection_data *rd = (struct redirection_data *) *slot; | |
397 struct local_info *local_info = (struct local_info *)data; | |
398 struct el *next, *el; | |
399 | |
400 /* Walk over all the incoming edges associated associated with this | |
401 hash table entry. */ | |
402 for (el = rd->incoming_edges; el; el = next) | |
403 { | |
404 edge e = el->e; | |
405 | |
406 /* Go ahead and free this element from the list. Doing this now | |
407 avoids the need for another list walk when we destroy the hash | |
408 table. */ | |
409 next = el->next; | |
410 free (el); | |
411 | |
412 /* Go ahead and clear E->aux. It's not needed anymore and failure | |
413 to clear it will cause all kinds of unpleasant problems later. */ | |
414 e->aux = NULL; | |
415 | |
416 thread_stats.num_threaded_edges++; | |
417 | |
418 if (rd->dup_block) | |
419 { | |
420 edge e2; | |
421 | |
422 if (dump_file && (dump_flags & TDF_DETAILS)) | |
423 fprintf (dump_file, " Threaded jump %d --> %d to %d\n", | |
424 e->src->index, e->dest->index, rd->dup_block->index); | |
425 | |
426 rd->dup_block->count += e->count; | |
427 rd->dup_block->frequency += EDGE_FREQUENCY (e); | |
428 EDGE_SUCC (rd->dup_block, 0)->count += e->count; | |
429 /* Redirect the incoming edge to the appropriate duplicate | |
430 block. */ | |
431 e2 = redirect_edge_and_branch (e, rd->dup_block); | |
432 gcc_assert (e == e2); | |
433 flush_pending_stmts (e2); | |
434 } | |
435 else | |
436 { | |
437 if (dump_file && (dump_flags & TDF_DETAILS)) | |
438 fprintf (dump_file, " Threaded jump %d --> %d to %d\n", | |
439 e->src->index, e->dest->index, local_info->bb->index); | |
440 | |
441 /* We are using BB as the duplicate. Remove the unnecessary | |
442 outgoing edges and statements from BB. */ | |
443 remove_ctrl_stmt_and_useless_edges (local_info->bb, | |
444 rd->outgoing_edge->dest); | |
445 | |
446 /* Fixup the flags on the single remaining edge. */ | |
447 single_succ_edge (local_info->bb)->flags | |
448 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL); | |
449 single_succ_edge (local_info->bb)->flags |= EDGE_FALLTHRU; | |
450 | |
451 /* And adjust count and frequency on BB. */ | |
452 local_info->bb->count = e->count; | |
453 local_info->bb->frequency = EDGE_FREQUENCY (e); | |
454 } | |
455 } | |
456 | |
457 /* Indicate that we actually threaded one or more jumps. */ | |
458 if (rd->incoming_edges) | |
459 local_info->jumps_threaded = true; | |
460 | |
461 return 1; | |
462 } | |
463 | |
464 /* Return true if this block has no executable statements other than | |
465 a simple ctrl flow instruction. When the number of outgoing edges | |
466 is one, this is equivalent to a "forwarder" block. */ | |
467 | |
468 static bool | |
469 redirection_block_p (basic_block bb) | |
470 { | |
471 gimple_stmt_iterator gsi; | |
472 | |
473 /* Advance to the first executable statement. */ | |
474 gsi = gsi_start_bb (bb); | |
475 while (!gsi_end_p (gsi) | |
476 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
477 || is_gimple_debug (gsi_stmt (gsi)) |
0 | 478 || gimple_nop_p (gsi_stmt (gsi)))) |
479 gsi_next (&gsi); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
480 |
0 | 481 /* Check if this is an empty block. */ |
482 if (gsi_end_p (gsi)) | |
483 return true; | |
484 | |
485 /* Test that we've reached the terminating control statement. */ | |
486 return gsi_stmt (gsi) | |
487 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND | |
488 || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO | |
489 || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH); | |
490 } | |
491 | |
492 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB | |
493 is reached via one or more specific incoming edges, we know which | |
494 outgoing edge from BB will be traversed. | |
495 | |
496 We want to redirect those incoming edges to the target of the | |
497 appropriate outgoing edge. Doing so avoids a conditional branch | |
498 and may expose new optimization opportunities. Note that we have | |
499 to update dominator tree and SSA graph after such changes. | |
500 | |
501 The key to keeping the SSA graph update manageable is to duplicate | |
502 the side effects occurring in BB so that those side effects still | |
503 occur on the paths which bypass BB after redirecting edges. | |
504 | |
505 We accomplish this by creating duplicates of BB and arranging for | |
506 the duplicates to unconditionally pass control to one specific | |
507 successor of BB. We then revector the incoming edges into BB to | |
508 the appropriate duplicate of BB. | |
509 | |
510 If NOLOOP_ONLY is true, we only perform the threading as long as it | |
511 does not affect the structure of the loops in a nontrivial way. */ | |
512 | |
513 static bool | |
514 thread_block (basic_block bb, bool noloop_only) | |
515 { | |
516 /* E is an incoming edge into BB that we may or may not want to | |
517 redirect to a duplicate of BB. */ | |
518 edge e, e2; | |
519 edge_iterator ei; | |
520 struct local_info local_info; | |
521 struct loop *loop = bb->loop_father; | |
522 | |
523 /* ALL indicates whether or not all incoming edges into BB should | |
524 be threaded to a duplicate of BB. */ | |
525 bool all = true; | |
526 | |
527 /* To avoid scanning a linear array for the element we need we instead | |
528 use a hash table. For normal code there should be no noticeable | |
529 difference. However, if we have a block with a large number of | |
530 incoming and outgoing edges such linear searches can get expensive. */ | |
531 redirection_data = htab_create (EDGE_COUNT (bb->succs), | |
532 redirection_data_hash, | |
533 redirection_data_eq, | |
534 free); | |
535 | |
536 /* If we thread the latch of the loop to its exit, the loop ceases to | |
537 exist. Make sure we do not restrict ourselves in order to preserve | |
538 this loop. */ | |
539 if (loop->header == bb) | |
540 { | |
541 e = loop_latch_edge (loop); | |
542 e2 = (edge) e->aux; | |
543 | |
544 if (e2 && loop_exit_edge_p (loop, e2)) | |
545 { | |
546 loop->header = NULL; | |
547 loop->latch = NULL; | |
548 } | |
549 } | |
550 | |
551 /* Record each unique threaded destination into a hash table for | |
552 efficient lookups. */ | |
553 FOR_EACH_EDGE (e, ei, bb->preds) | |
554 { | |
555 e2 = (edge) e->aux; | |
556 | |
557 if (!e2 | |
558 /* If NOLOOP_ONLY is true, we only allow threading through the | |
559 header of a loop to exit edges. */ | |
560 || (noloop_only | |
561 && bb == bb->loop_father->header | |
562 && !loop_exit_edge_p (bb->loop_father, e2))) | |
563 { | |
564 all = false; | |
565 continue; | |
566 } | |
567 | |
568 update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e), | |
569 e->count, (edge) e->aux); | |
570 | |
571 /* Insert the outgoing edge into the hash table if it is not | |
572 already in the hash table. */ | |
573 lookup_redirection_data (e2, e, INSERT); | |
574 } | |
575 | |
576 /* If we are going to thread all incoming edges to an outgoing edge, then | |
577 BB will become unreachable. Rather than just throwing it away, use | |
578 it for one of the duplicates. Mark the first incoming edge with the | |
579 DO_NOT_DUPLICATE attribute. */ | |
580 if (all) | |
581 { | |
582 edge e = (edge) EDGE_PRED (bb, 0)->aux; | |
583 lookup_redirection_data (e, NULL, NO_INSERT)->do_not_duplicate = true; | |
584 } | |
585 | |
586 /* We do not update dominance info. */ | |
587 free_dominance_info (CDI_DOMINATORS); | |
588 | |
589 /* Now create duplicates of BB. | |
590 | |
591 Note that for a block with a high outgoing degree we can waste | |
592 a lot of time and memory creating and destroying useless edges. | |
593 | |
594 So we first duplicate BB and remove the control structure at the | |
595 tail of the duplicate as well as all outgoing edges from the | |
596 duplicate. We then use that duplicate block as a template for | |
597 the rest of the duplicates. */ | |
598 local_info.template_block = NULL; | |
599 local_info.bb = bb; | |
600 local_info.jumps_threaded = false; | |
601 htab_traverse (redirection_data, create_duplicates, &local_info); | |
602 | |
603 /* The template does not have an outgoing edge. Create that outgoing | |
604 edge and update PHI nodes as the edge's target as necessary. | |
605 | |
606 We do this after creating all the duplicates to avoid creating | |
607 unnecessary edges. */ | |
608 htab_traverse (redirection_data, fixup_template_block, &local_info); | |
609 | |
610 /* The hash table traversals above created the duplicate blocks (and the | |
611 statements within the duplicate blocks). This loop creates PHI nodes for | |
612 the duplicated blocks and redirects the incoming edges into BB to reach | |
613 the duplicates of BB. */ | |
614 htab_traverse (redirection_data, redirect_edges, &local_info); | |
615 | |
616 /* Done with this block. Clear REDIRECTION_DATA. */ | |
617 htab_delete (redirection_data); | |
618 redirection_data = NULL; | |
619 | |
620 /* Indicate to our caller whether or not any jumps were threaded. */ | |
621 return local_info.jumps_threaded; | |
622 } | |
623 | |
624 /* Threads edge E through E->dest to the edge E->aux. Returns the copy | |
625 of E->dest created during threading, or E->dest if it was not necessary | |
626 to copy it (E is its single predecessor). */ | |
627 | |
628 static basic_block | |
629 thread_single_edge (edge e) | |
630 { | |
631 basic_block bb = e->dest; | |
632 edge eto = (edge) e->aux; | |
633 struct redirection_data rd; | |
634 | |
635 e->aux = NULL; | |
636 | |
637 thread_stats.num_threaded_edges++; | |
638 | |
639 if (single_pred_p (bb)) | |
640 { | |
641 /* If BB has just a single predecessor, we should only remove the | |
642 control statements at its end, and successors except for ETO. */ | |
643 remove_ctrl_stmt_and_useless_edges (bb, eto->dest); | |
644 | |
645 /* And fixup the flags on the single remaining edge. */ | |
646 eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL); | |
647 eto->flags |= EDGE_FALLTHRU; | |
648 | |
649 return bb; | |
650 } | |
651 | |
652 /* Otherwise, we need to create a copy. */ | |
653 update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto); | |
654 | |
655 rd.outgoing_edge = eto; | |
656 | |
657 create_block_for_threading (bb, &rd); | |
658 create_edge_and_update_destination_phis (&rd); | |
659 | |
660 if (dump_file && (dump_flags & TDF_DETAILS)) | |
661 fprintf (dump_file, " Threaded jump %d --> %d to %d\n", | |
662 e->src->index, e->dest->index, rd.dup_block->index); | |
663 | |
664 rd.dup_block->count = e->count; | |
665 rd.dup_block->frequency = EDGE_FREQUENCY (e); | |
666 single_succ_edge (rd.dup_block)->count = e->count; | |
667 redirect_edge_and_branch (e, rd.dup_block); | |
668 flush_pending_stmts (e); | |
669 | |
670 return rd.dup_block; | |
671 } | |
672 | |
673 /* Callback for dfs_enumerate_from. Returns true if BB is different | |
674 from STOP and DBDS_CE_STOP. */ | |
675 | |
676 static basic_block dbds_ce_stop; | |
677 static bool | |
678 dbds_continue_enumeration_p (const_basic_block bb, const void *stop) | |
679 { | |
680 return (bb != (const_basic_block) stop | |
681 && bb != dbds_ce_stop); | |
682 } | |
683 | |
684 /* Evaluates the dominance relationship of latch of the LOOP and BB, and | |
685 returns the state. */ | |
686 | |
687 enum bb_dom_status | |
688 { | |
689 /* BB does not dominate latch of the LOOP. */ | |
690 DOMST_NONDOMINATING, | |
691 /* The LOOP is broken (there is no path from the header to its latch. */ | |
692 DOMST_LOOP_BROKEN, | |
693 /* BB dominates the latch of the LOOP. */ | |
694 DOMST_DOMINATING | |
695 }; | |
696 | |
697 static enum bb_dom_status | |
698 determine_bb_domination_status (struct loop *loop, basic_block bb) | |
699 { | |
700 basic_block *bblocks; | |
701 unsigned nblocks, i; | |
702 bool bb_reachable = false; | |
703 edge_iterator ei; | |
704 edge e; | |
705 | |
706 #ifdef ENABLE_CHECKING | |
707 /* This function assumes BB is a successor of LOOP->header. */ | |
708 { | |
709 bool ok = false; | |
710 | |
711 FOR_EACH_EDGE (e, ei, bb->preds) | |
712 { | |
713 if (e->src == loop->header) | |
714 { | |
715 ok = true; | |
716 break; | |
717 } | |
718 } | |
719 | |
720 gcc_assert (ok); | |
721 } | |
722 #endif | |
723 | |
724 if (bb == loop->latch) | |
725 return DOMST_DOMINATING; | |
726 | |
727 /* Check that BB dominates LOOP->latch, and that it is back-reachable | |
728 from it. */ | |
729 | |
730 bblocks = XCNEWVEC (basic_block, loop->num_nodes); | |
731 dbds_ce_stop = loop->header; | |
732 nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p, | |
733 bblocks, loop->num_nodes, bb); | |
734 for (i = 0; i < nblocks; i++) | |
735 FOR_EACH_EDGE (e, ei, bblocks[i]->preds) | |
736 { | |
737 if (e->src == loop->header) | |
738 { | |
739 free (bblocks); | |
740 return DOMST_NONDOMINATING; | |
741 } | |
742 if (e->src == bb) | |
743 bb_reachable = true; | |
744 } | |
745 | |
746 free (bblocks); | |
747 return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN); | |
748 } | |
749 | |
750 /* Thread jumps through the header of LOOP. Returns true if cfg changes. | |
751 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges | |
752 to the inside of the loop. */ | |
753 | |
754 static bool | |
755 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers) | |
756 { | |
757 basic_block header = loop->header; | |
758 edge e, tgt_edge, latch = loop_latch_edge (loop); | |
759 edge_iterator ei; | |
760 basic_block tgt_bb, atgt_bb; | |
761 enum bb_dom_status domst; | |
762 | |
763 /* We have already threaded through headers to exits, so all the threading | |
764 requests now are to the inside of the loop. We need to avoid creating | |
765 irreducible regions (i.e., loops with more than one entry block), and | |
766 also loop with several latch edges, or new subloops of the loop (although | |
767 there are cases where it might be appropriate, it is difficult to decide, | |
768 and doing it wrongly may confuse other optimizers). | |
769 | |
770 We could handle more general cases here. However, the intention is to | |
771 preserve some information about the loop, which is impossible if its | |
772 structure changes significantly, in a way that is not well understood. | |
773 Thus we only handle few important special cases, in which also updating | |
774 of the loop-carried information should be feasible: | |
775 | |
776 1) Propagation of latch edge to a block that dominates the latch block | |
777 of a loop. This aims to handle the following idiom: | |
778 | |
779 first = 1; | |
780 while (1) | |
781 { | |
782 if (first) | |
783 initialize; | |
784 first = 0; | |
785 body; | |
786 } | |
787 | |
788 After threading the latch edge, this becomes | |
789 | |
790 first = 1; | |
791 if (first) | |
792 initialize; | |
793 while (1) | |
794 { | |
795 first = 0; | |
796 body; | |
797 } | |
798 | |
799 The original header of the loop is moved out of it, and we may thread | |
800 the remaining edges through it without further constraints. | |
801 | |
802 2) All entry edges are propagated to a single basic block that dominates | |
803 the latch block of the loop. This aims to handle the following idiom | |
804 (normally created for "for" loops): | |
805 | |
806 i = 0; | |
807 while (1) | |
808 { | |
809 if (i >= 100) | |
810 break; | |
811 body; | |
812 i++; | |
813 } | |
814 | |
815 This becomes | |
816 | |
817 i = 0; | |
818 while (1) | |
819 { | |
820 body; | |
821 i++; | |
822 if (i >= 100) | |
823 break; | |
824 } | |
825 */ | |
826 | |
827 /* Threading through the header won't improve the code if the header has just | |
828 one successor. */ | |
829 if (single_succ_p (header)) | |
830 goto fail; | |
831 | |
832 if (latch->aux) | |
833 { | |
834 tgt_edge = (edge) latch->aux; | |
835 tgt_bb = tgt_edge->dest; | |
836 } | |
837 else if (!may_peel_loop_headers | |
838 && !redirection_block_p (loop->header)) | |
839 goto fail; | |
840 else | |
841 { | |
842 tgt_bb = NULL; | |
843 tgt_edge = NULL; | |
844 FOR_EACH_EDGE (e, ei, header->preds) | |
845 { | |
846 if (!e->aux) | |
847 { | |
848 if (e == latch) | |
849 continue; | |
850 | |
851 /* If latch is not threaded, and there is a header | |
852 edge that is not threaded, we would create loop | |
853 with multiple entries. */ | |
854 goto fail; | |
855 } | |
856 | |
857 tgt_edge = (edge) e->aux; | |
858 atgt_bb = tgt_edge->dest; | |
859 if (!tgt_bb) | |
860 tgt_bb = atgt_bb; | |
861 /* Two targets of threading would make us create loop | |
862 with multiple entries. */ | |
863 else if (tgt_bb != atgt_bb) | |
864 goto fail; | |
865 } | |
866 | |
867 if (!tgt_bb) | |
868 { | |
869 /* There are no threading requests. */ | |
870 return false; | |
871 } | |
872 | |
873 /* Redirecting to empty loop latch is useless. */ | |
874 if (tgt_bb == loop->latch | |
875 && empty_block_p (loop->latch)) | |
876 goto fail; | |
877 } | |
878 | |
879 /* The target block must dominate the loop latch, otherwise we would be | |
880 creating a subloop. */ | |
881 domst = determine_bb_domination_status (loop, tgt_bb); | |
882 if (domst == DOMST_NONDOMINATING) | |
883 goto fail; | |
884 if (domst == DOMST_LOOP_BROKEN) | |
885 { | |
886 /* If the loop ceased to exist, mark it as such, and thread through its | |
887 original header. */ | |
888 loop->header = NULL; | |
889 loop->latch = NULL; | |
890 return thread_block (header, false); | |
891 } | |
892 | |
893 if (tgt_bb->loop_father->header == tgt_bb) | |
894 { | |
895 /* If the target of the threading is a header of a subloop, we need | |
896 to create a preheader for it, so that the headers of the two loops | |
897 do not merge. */ | |
898 if (EDGE_COUNT (tgt_bb->preds) > 2) | |
899 { | |
900 tgt_bb = create_preheader (tgt_bb->loop_father, 0); | |
901 gcc_assert (tgt_bb != NULL); | |
902 } | |
903 else | |
904 tgt_bb = split_edge (tgt_edge); | |
905 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
906 |
0 | 907 if (latch->aux) |
908 { | |
909 /* First handle the case latch edge is redirected. */ | |
910 loop->latch = thread_single_edge (latch); | |
911 gcc_assert (single_succ (loop->latch) == tgt_bb); | |
912 loop->header = tgt_bb; | |
913 | |
914 /* Thread the remaining edges through the former header. */ | |
915 thread_block (header, false); | |
916 } | |
917 else | |
918 { | |
919 basic_block new_preheader; | |
920 | |
921 /* Now consider the case entry edges are redirected to the new entry | |
922 block. Remember one entry edge, so that we can find the new | |
923 preheader (its destination after threading). */ | |
924 FOR_EACH_EDGE (e, ei, header->preds) | |
925 { | |
926 if (e->aux) | |
927 break; | |
928 } | |
929 | |
930 /* The duplicate of the header is the new preheader of the loop. Ensure | |
931 that it is placed correctly in the loop hierarchy. */ | |
932 set_loop_copy (loop, loop_outer (loop)); | |
933 | |
934 thread_block (header, false); | |
935 set_loop_copy (loop, NULL); | |
936 new_preheader = e->dest; | |
937 | |
938 /* Create the new latch block. This is always necessary, as the latch | |
939 must have only a single successor, but the original header had at | |
940 least two successors. */ | |
941 loop->latch = NULL; | |
942 mfb_kj_edge = single_succ_edge (new_preheader); | |
943 loop->header = mfb_kj_edge->dest; | |
944 latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL); | |
945 loop->header = latch->dest; | |
946 loop->latch = latch->src; | |
947 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
948 |
0 | 949 return true; |
950 | |
951 fail: | |
952 /* We failed to thread anything. Cancel the requests. */ | |
953 FOR_EACH_EDGE (e, ei, header->preds) | |
954 { | |
955 e->aux = NULL; | |
956 } | |
957 return false; | |
958 } | |
959 | |
960 /* Walk through the registered jump threads and convert them into a | |
961 form convenient for this pass. | |
962 | |
963 Any block which has incoming edges threaded to outgoing edges | |
964 will have its entry in THREADED_BLOCK set. | |
965 | |
966 Any threaded edge will have its new outgoing edge stored in the | |
967 original edge's AUX field. | |
968 | |
969 This form avoids the need to walk all the edges in the CFG to | |
970 discover blocks which need processing and avoids unnecessary | |
971 hash table lookups to map from threaded edge to new target. */ | |
972 | |
973 static void | |
974 mark_threaded_blocks (bitmap threaded_blocks) | |
975 { | |
976 unsigned int i; | |
977 bitmap_iterator bi; | |
978 bitmap tmp = BITMAP_ALLOC (NULL); | |
979 basic_block bb; | |
980 edge e; | |
981 edge_iterator ei; | |
982 | |
983 for (i = 0; i < VEC_length (edge, threaded_edges); i += 2) | |
984 { | |
985 edge e = VEC_index (edge, threaded_edges, i); | |
986 edge e2 = VEC_index (edge, threaded_edges, i + 1); | |
987 | |
988 e->aux = e2; | |
989 bitmap_set_bit (tmp, e->dest->index); | |
990 } | |
991 | |
992 /* If optimizing for size, only thread through block if we don't have | |
993 to duplicate it or it's an otherwise empty redirection block. */ | |
994 if (optimize_function_for_size_p (cfun)) | |
995 { | |
996 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) | |
997 { | |
998 bb = BASIC_BLOCK (i); | |
999 if (EDGE_COUNT (bb->preds) > 1 | |
1000 && !redirection_block_p (bb)) | |
1001 { | |
1002 FOR_EACH_EDGE (e, ei, bb->preds) | |
1003 e->aux = NULL; | |
1004 } | |
1005 else | |
1006 bitmap_set_bit (threaded_blocks, i); | |
1007 } | |
1008 } | |
1009 else | |
1010 bitmap_copy (threaded_blocks, tmp); | |
1011 | |
1012 BITMAP_FREE(tmp); | |
1013 } | |
1014 | |
1015 | |
1016 /* Walk through all blocks and thread incoming edges to the appropriate | |
1017 outgoing edge for each edge pair recorded in THREADED_EDGES. | |
1018 | |
1019 It is the caller's responsibility to fix the dominance information | |
1020 and rewrite duplicated SSA_NAMEs back into SSA form. | |
1021 | |
1022 If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through | |
1023 loop headers if it does not simplify the loop. | |
1024 | |
1025 Returns true if one or more edges were threaded, false otherwise. */ | |
1026 | |
1027 bool | |
1028 thread_through_all_blocks (bool may_peel_loop_headers) | |
1029 { | |
1030 bool retval = false; | |
1031 unsigned int i; | |
1032 bitmap_iterator bi; | |
1033 bitmap threaded_blocks; | |
1034 struct loop *loop; | |
1035 loop_iterator li; | |
1036 | |
1037 /* We must know about loops in order to preserve them. */ | |
1038 gcc_assert (current_loops != NULL); | |
1039 | |
1040 if (threaded_edges == NULL) | |
1041 return false; | |
1042 | |
1043 threaded_blocks = BITMAP_ALLOC (NULL); | |
1044 memset (&thread_stats, 0, sizeof (thread_stats)); | |
1045 | |
1046 mark_threaded_blocks (threaded_blocks); | |
1047 | |
1048 initialize_original_copy_tables (); | |
1049 | |
1050 /* First perform the threading requests that do not affect | |
1051 loop structure. */ | |
1052 EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi) | |
1053 { | |
1054 basic_block bb = BASIC_BLOCK (i); | |
1055 | |
1056 if (EDGE_COUNT (bb->preds) > 0) | |
1057 retval |= thread_block (bb, true); | |
1058 } | |
1059 | |
1060 /* Then perform the threading through loop headers. We start with the | |
1061 innermost loop, so that the changes in cfg we perform won't affect | |
1062 further threading. */ | |
1063 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) | |
1064 { | |
1065 if (!loop->header | |
1066 || !bitmap_bit_p (threaded_blocks, loop->header->index)) | |
1067 continue; | |
1068 | |
1069 retval |= thread_through_loop_header (loop, may_peel_loop_headers); | |
1070 } | |
1071 | |
1072 statistics_counter_event (cfun, "Jumps threaded", | |
1073 thread_stats.num_threaded_edges); | |
1074 | |
1075 free_original_copy_tables (); | |
1076 | |
1077 BITMAP_FREE (threaded_blocks); | |
1078 threaded_blocks = NULL; | |
1079 VEC_free (edge, heap, threaded_edges); | |
1080 threaded_edges = NULL; | |
1081 | |
1082 if (retval) | |
1083 loops_state_set (LOOPS_NEED_FIXUP); | |
1084 | |
1085 return retval; | |
1086 } | |
1087 | |
1088 /* Register a jump threading opportunity. We queue up all the jump | |
1089 threading opportunities discovered by a pass and update the CFG | |
1090 and SSA form all at once. | |
1091 | |
1092 E is the edge we can thread, E2 is the new target edge, i.e., we | |
1093 are effectively recording that E->dest can be changed to E2->dest | |
1094 after fixing the SSA graph. */ | |
1095 | |
1096 void | |
1097 register_jump_thread (edge e, edge e2) | |
1098 { | |
1099 if (threaded_edges == NULL) | |
1100 threaded_edges = VEC_alloc (edge, heap, 10); | |
1101 | |
1102 VEC_safe_push (edge, heap, threaded_edges, e); | |
1103 VEC_safe_push (edge, heap, threaded_edges, e2); | |
1104 } |