Mercurial > hg > CbC > CbC_gcc
annotate gcc/cfg.c @ 120:f93fa5091070
fix conv1.c
author | mir3636 |
---|---|
date | Thu, 08 Mar 2018 14:53:42 +0900 |
parents | 04ced10e8804 |
children | 84e7813d76e9 |
rev | line source |
---|---|
0 | 1 /* Control flow graph manipulation code for GNU compiler. |
111 | 2 Copyright (C) 1987-2017 Free Software Foundation, Inc. |
0 | 3 |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify it under | |
7 the terms of the GNU General Public License as published by the Free | |
8 Software Foundation; either version 3, or (at your option) any later | |
9 version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 /* This file contains low level functions to manipulate the CFG and | |
21 analyze it. All other modules should not transform the data structure | |
22 directly and use abstraction instead. The file is supposed to be | |
23 ordered bottom-up and should not contain any code dependent on a | |
24 particular intermediate language (RTL or trees). | |
25 | |
26 Available functionality: | |
27 - Initialization/deallocation | |
28 init_flow, clear_edges | |
29 - Low level basic block manipulation | |
30 alloc_block, expunge_block | |
31 - Edge manipulation | |
32 make_edge, make_single_succ_edge, cached_make_edge, remove_edge | |
33 - Low level edge redirection (without updating instruction chain) | |
34 redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred | |
35 - Dumping and debugging | |
36 dump_flow_info, debug_flow_info, dump_edge_info | |
37 - Allocation of AUX fields for basic blocks | |
38 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block | |
39 - clear_bb_flags | |
40 - Consistency checking | |
41 verify_flow_info | |
42 - Dumping and debugging | |
43 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n | |
111 | 44 |
45 TODO: Document these "Available functionality" functions in the files | |
46 that implement them. | |
0 | 47 */ |
48 | |
49 #include "config.h" | |
50 #include "system.h" | |
51 #include "coretypes.h" | |
111 | 52 #include "backend.h" |
0 | 53 #include "hard-reg-set.h" |
111 | 54 #include "tree.h" |
55 #include "cfghooks.h" | |
0 | 56 #include "df.h" |
111 | 57 #include "cfganal.h" |
58 #include "cfgloop.h" /* FIXME: For struct loop. */ | |
59 #include "dumpfile.h" | |
0 | 60 |
61 | |
62 | |
63 /* Called once at initialization time. */ | |
64 | |
65 void | |
66 init_flow (struct function *the_fun) | |
67 { | |
68 if (!the_fun->cfg) | |
111 | 69 the_fun->cfg = ggc_cleared_alloc<control_flow_graph> (); |
70 n_edges_for_fn (the_fun) = 0; | |
71 ENTRY_BLOCK_PTR_FOR_FN (the_fun) | |
72 = alloc_block (); | |
73 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK; | |
74 EXIT_BLOCK_PTR_FOR_FN (the_fun) | |
75 = alloc_block (); | |
76 EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK; | |
77 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb | |
78 = EXIT_BLOCK_PTR_FOR_FN (the_fun); | |
79 EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb | |
80 = ENTRY_BLOCK_PTR_FOR_FN (the_fun); | |
0 | 81 } |
82 | |
83 /* Helper function for remove_edge and clear_edges. Frees edge structure | |
111 | 84 without actually removing it from the pred/succ arrays. */ |
0 | 85 |
86 static void | |
111 | 87 free_edge (function *fn, edge e) |
0 | 88 { |
111 | 89 n_edges_for_fn (fn)--; |
0 | 90 ggc_free (e); |
91 } | |
92 | |
93 /* Free the memory associated with the edge structures. */ | |
94 | |
95 void | |
111 | 96 clear_edges (struct function *fn) |
0 | 97 { |
98 basic_block bb; | |
99 edge e; | |
100 edge_iterator ei; | |
101 | |
111 | 102 FOR_EACH_BB_FN (bb, fn) |
0 | 103 { |
104 FOR_EACH_EDGE (e, ei, bb->succs) | |
111 | 105 free_edge (fn, e); |
106 vec_safe_truncate (bb->succs, 0); | |
107 vec_safe_truncate (bb->preds, 0); | |
0 | 108 } |
109 | |
111 | 110 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs) |
111 free_edge (fn, e); | |
112 vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0); | |
113 vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0); | |
0 | 114 |
111 | 115 gcc_assert (!n_edges_for_fn (fn)); |
0 | 116 } |
117 | |
118 /* Allocate memory for basic_block. */ | |
119 | |
120 basic_block | |
121 alloc_block (void) | |
122 { | |
123 basic_block bb; | |
111 | 124 bb = ggc_cleared_alloc<basic_block_def> (); |
125 bb->count = profile_count::uninitialized (); | |
0 | 126 return bb; |
127 } | |
128 | |
129 /* Link block B to chain after AFTER. */ | |
130 void | |
131 link_block (basic_block b, basic_block after) | |
132 { | |
133 b->next_bb = after->next_bb; | |
134 b->prev_bb = after; | |
135 after->next_bb = b; | |
136 b->next_bb->prev_bb = b; | |
137 } | |
138 | |
139 /* Unlink block B from chain. */ | |
140 void | |
141 unlink_block (basic_block b) | |
142 { | |
143 b->next_bb->prev_bb = b->prev_bb; | |
144 b->prev_bb->next_bb = b->next_bb; | |
145 b->prev_bb = NULL; | |
146 b->next_bb = NULL; | |
147 } | |
148 | |
149 /* Sequentially order blocks and compact the arrays. */ | |
150 void | |
151 compact_blocks (void) | |
152 { | |
153 int i; | |
154 | |
111 | 155 SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun)); |
156 SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun)); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
157 |
0 | 158 if (df) |
159 df_compact_blocks (); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
160 else |
0 | 161 { |
162 basic_block bb; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
163 |
0 | 164 i = NUM_FIXED_BLOCKS; |
111 | 165 FOR_EACH_BB_FN (bb, cfun) |
0 | 166 { |
111 | 167 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb); |
0 | 168 bb->index = i; |
169 i++; | |
170 } | |
111 | 171 gcc_assert (i == n_basic_blocks_for_fn (cfun)); |
0 | 172 |
111 | 173 for (; i < last_basic_block_for_fn (cfun); i++) |
174 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL); | |
0 | 175 } |
111 | 176 last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun); |
0 | 177 } |
178 | |
179 /* Remove block B from the basic block array. */ | |
180 | |
181 void | |
182 expunge_block (basic_block b) | |
183 { | |
184 unlink_block (b); | |
111 | 185 SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL); |
186 n_basic_blocks_for_fn (cfun)--; | |
0 | 187 /* We should be able to ggc_free here, but we are not. |
188 The dead SSA_NAMES are left pointing to dead statements that are pointing | |
189 to dead basic blocks making garbage collector to die. | |
190 We should be able to release all dead SSA_NAMES and at the same time we should | |
191 clear out BB pointer of dead statements consistently. */ | |
192 } | |
193 | |
194 /* Connect E to E->src. */ | |
195 | |
196 static inline void | |
197 connect_src (edge e) | |
198 { | |
111 | 199 vec_safe_push (e->src->succs, e); |
0 | 200 df_mark_solutions_dirty (); |
201 } | |
202 | |
203 /* Connect E to E->dest. */ | |
204 | |
205 static inline void | |
206 connect_dest (edge e) | |
207 { | |
208 basic_block dest = e->dest; | |
111 | 209 vec_safe_push (dest->preds, e); |
0 | 210 e->dest_idx = EDGE_COUNT (dest->preds) - 1; |
211 df_mark_solutions_dirty (); | |
212 } | |
213 | |
214 /* Disconnect edge E from E->src. */ | |
215 | |
216 static inline void | |
217 disconnect_src (edge e) | |
218 { | |
219 basic_block src = e->src; | |
220 edge_iterator ei; | |
221 edge tmp; | |
222 | |
223 for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); ) | |
224 { | |
225 if (tmp == e) | |
226 { | |
111 | 227 src->succs->unordered_remove (ei.index); |
228 df_mark_solutions_dirty (); | |
0 | 229 return; |
230 } | |
231 else | |
232 ei_next (&ei); | |
233 } | |
234 | |
235 gcc_unreachable (); | |
236 } | |
237 | |
238 /* Disconnect edge E from E->dest. */ | |
239 | |
240 static inline void | |
241 disconnect_dest (edge e) | |
242 { | |
243 basic_block dest = e->dest; | |
244 unsigned int dest_idx = e->dest_idx; | |
245 | |
111 | 246 dest->preds->unordered_remove (dest_idx); |
0 | 247 |
248 /* If we removed an edge in the middle of the edge vector, we need | |
249 to update dest_idx of the edge that moved into the "hole". */ | |
250 if (dest_idx < EDGE_COUNT (dest->preds)) | |
251 EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx; | |
252 df_mark_solutions_dirty (); | |
253 } | |
254 | |
255 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly | |
256 created edge. Use this only if you are sure that this edge can't | |
257 possibly already exist. */ | |
258 | |
259 edge | |
260 unchecked_make_edge (basic_block src, basic_block dst, int flags) | |
261 { | |
262 edge e; | |
111 | 263 e = ggc_cleared_alloc<edge_def> (); |
264 n_edges_for_fn (cfun)++; | |
0 | 265 |
111 | 266 e->probability = profile_probability::uninitialized (); |
0 | 267 e->src = src; |
268 e->dest = dst; | |
269 e->flags = flags; | |
270 | |
271 connect_src (e); | |
272 connect_dest (e); | |
273 | |
274 execute_on_growing_pred (e); | |
275 return e; | |
276 } | |
277 | |
278 /* Create an edge connecting SRC and DST with FLAGS optionally using | |
279 edge cache CACHE. Return the new edge, NULL if already exist. */ | |
280 | |
281 edge | |
282 cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags) | |
283 { | |
284 if (edge_cache == NULL | |
111 | 285 || src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
286 || dst == EXIT_BLOCK_PTR_FOR_FN (cfun)) | |
0 | 287 return make_edge (src, dst, flags); |
288 | |
289 /* Does the requested edge already exist? */ | |
111 | 290 if (! bitmap_bit_p (edge_cache, dst->index)) |
0 | 291 { |
292 /* The edge does not exist. Create one and update the | |
293 cache. */ | |
111 | 294 bitmap_set_bit (edge_cache, dst->index); |
0 | 295 return unchecked_make_edge (src, dst, flags); |
296 } | |
297 | |
298 /* At this point, we know that the requested edge exists. Adjust | |
299 flags if necessary. */ | |
300 if (flags) | |
301 { | |
302 edge e = find_edge (src, dst); | |
303 e->flags |= flags; | |
304 } | |
305 | |
306 return NULL; | |
307 } | |
308 | |
309 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly | |
310 created edge or NULL if already exist. */ | |
311 | |
312 edge | |
313 make_edge (basic_block src, basic_block dest, int flags) | |
314 { | |
315 edge e = find_edge (src, dest); | |
316 | |
317 /* Make sure we don't add duplicate edges. */ | |
318 if (e) | |
319 { | |
320 e->flags |= flags; | |
321 return NULL; | |
322 } | |
323 | |
324 return unchecked_make_edge (src, dest, flags); | |
325 } | |
326 | |
327 /* Create an edge connecting SRC to DEST and set probability by knowing | |
328 that it is the single edge leaving SRC. */ | |
329 | |
330 edge | |
331 make_single_succ_edge (basic_block src, basic_block dest, int flags) | |
332 { | |
333 edge e = make_edge (src, dest, flags); | |
334 | |
111 | 335 e->probability = profile_probability::always (); |
0 | 336 return e; |
337 } | |
338 | |
339 /* This function will remove an edge from the flow graph. */ | |
340 | |
341 void | |
342 remove_edge_raw (edge e) | |
343 { | |
344 remove_predictions_associated_with_edge (e); | |
345 execute_on_shrinking_pred (e); | |
346 | |
347 disconnect_src (e); | |
348 disconnect_dest (e); | |
349 | |
111 | 350 free_edge (cfun, e); |
0 | 351 } |
352 | |
353 /* Redirect an edge's successor from one block to another. */ | |
354 | |
355 void | |
356 redirect_edge_succ (edge e, basic_block new_succ) | |
357 { | |
358 execute_on_shrinking_pred (e); | |
359 | |
360 disconnect_dest (e); | |
361 | |
362 e->dest = new_succ; | |
363 | |
364 /* Reconnect the edge to the new successor block. */ | |
365 connect_dest (e); | |
366 | |
367 execute_on_growing_pred (e); | |
368 } | |
369 | |
370 /* Redirect an edge's predecessor from one block to another. */ | |
371 | |
372 void | |
373 redirect_edge_pred (edge e, basic_block new_pred) | |
374 { | |
375 disconnect_src (e); | |
376 | |
377 e->src = new_pred; | |
378 | |
379 /* Reconnect the edge to the new predecessor block. */ | |
380 connect_src (e); | |
381 } | |
382 | |
111 | 383 /* Clear all basic block flags that do not have to be preserved. */ |
0 | 384 void |
385 clear_bb_flags (void) | |
386 { | |
387 basic_block bb; | |
388 | |
111 | 389 FOR_ALL_BB_FN (bb, cfun) |
390 bb->flags &= BB_FLAGS_TO_PRESERVE; | |
0 | 391 } |
392 | |
393 /* Check the consistency of profile information. We can't do that | |
394 in verify_flow_info, as the counts may get invalid for incompletely | |
395 solved graphs, later eliminating of conditionals or roundoff errors. | |
396 It is still practical to have them reported for debugging of simple | |
397 testcases. */ | |
111 | 398 static void |
399 check_bb_profile (basic_block bb, FILE * file, int indent) | |
0 | 400 { |
401 edge e; | |
402 edge_iterator ei; | |
111 | 403 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl); |
404 char *s_indent = (char *) alloca ((size_t) indent + 1); | |
405 memset ((void *) s_indent, ' ', (size_t) indent); | |
406 s_indent[indent] = '\0'; | |
0 | 407 |
111 | 408 if (profile_status_for_fn (fun) == PROFILE_ABSENT) |
0 | 409 return; |
410 | |
111 | 411 if (bb != EXIT_BLOCK_PTR_FOR_FN (fun)) |
0 | 412 { |
111 | 413 bool found = false; |
414 profile_probability sum = profile_probability::never (); | |
415 int isum = 0; | |
416 | |
0 | 417 FOR_EACH_EDGE (e, ei, bb->succs) |
111 | 418 { |
419 if (!(e->flags & (EDGE_EH | EDGE_FAKE))) | |
420 found = true; | |
421 sum += e->probability; | |
422 if (e->probability.initialized_p ()) | |
423 isum += e->probability.to_reg_br_prob_base (); | |
424 } | |
425 /* Only report mismatches for non-EH control flow. If there are only EH | |
426 edges it means that the BB ends by noreturn call. Here the control | |
427 flow may just terminate. */ | |
428 if (found) | |
429 { | |
430 if (sum.differs_from_p (profile_probability::always ())) | |
431 { | |
432 fprintf (file, | |
433 ";; %sInvalid sum of outgoing probabilities ", | |
434 s_indent); | |
435 sum.dump (file); | |
436 fprintf (file, "\n"); | |
437 } | |
438 /* Probabilities caps to 100% and thus the previous test will never | |
439 fire if the sum of probabilities is too large. */ | |
440 else if (isum > REG_BR_PROB_BASE + 100) | |
441 { | |
442 fprintf (file, | |
443 ";; %sInvalid sum of outgoing probabilities %.1f%%\n", | |
444 s_indent, isum * 100.0 / REG_BR_PROB_BASE); | |
445 } | |
446 } | |
0 | 447 } |
111 | 448 if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun)) |
0 | 449 { |
111 | 450 int sum = 0; |
0 | 451 FOR_EACH_EDGE (e, ei, bb->preds) |
452 sum += EDGE_FREQUENCY (e); | |
453 if (abs (sum - bb->frequency) > 100) | |
454 fprintf (file, | |
111 | 455 ";; %sInvalid sum of incoming frequencies %i, should be %i\n", |
456 s_indent, sum, bb->frequency); | |
457 } | |
458 if (BB_PARTITION (bb) == BB_COLD_PARTITION) | |
459 { | |
460 /* Warn about inconsistencies in the partitioning that are | |
461 currently caused by profile insanities created via optimization. */ | |
462 if (!probably_never_executed_bb_p (fun, bb)) | |
463 fprintf (file, ";; %sBlock in cold partition with hot count\n", | |
464 s_indent); | |
0 | 465 FOR_EACH_EDGE (e, ei, bb->preds) |
111 | 466 { |
467 if (!probably_never_executed_edge_p (fun, e)) | |
468 fprintf (file, | |
469 ";; %sBlock in cold partition with incoming hot edge\n", | |
470 s_indent); | |
471 } | |
0 | 472 } |
473 } | |
474 | |
475 void | |
111 | 476 dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ) |
0 | 477 { |
478 basic_block side = (do_succ ? e->dest : e->src); | |
111 | 479 bool do_details = false; |
480 | |
481 if ((flags & TDF_DETAILS) != 0 | |
482 && (flags & TDF_SLIM) == 0) | |
483 do_details = true; | |
484 | |
485 if (side->index == ENTRY_BLOCK) | |
0 | 486 fputs (" ENTRY", file); |
111 | 487 else if (side->index == EXIT_BLOCK) |
0 | 488 fputs (" EXIT", file); |
489 else | |
490 fprintf (file, " %d", side->index); | |
491 | |
111 | 492 if (e->probability.initialized_p () && do_details) |
493 { | |
494 fprintf (file, " ["); | |
495 e->probability.dump (file); | |
496 fprintf (file, "] "); | |
497 } | |
0 | 498 |
111 | 499 if (e->count ().initialized_p () && do_details) |
0 | 500 { |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
501 fputs (" count:", file); |
111 | 502 e->count ().dump (file); |
0 | 503 } |
504 | |
111 | 505 if (e->flags && do_details) |
0 | 506 { |
111 | 507 static const char * const bitnames[] = |
508 { | |
509 #define DEF_EDGE_FLAG(NAME,IDX) #NAME , | |
510 #include "cfg-flags.def" | |
511 NULL | |
512 #undef DEF_EDGE_FLAG | |
513 }; | |
514 bool comma = false; | |
0 | 515 int i, flags = e->flags; |
516 | |
111 | 517 gcc_assert (e->flags <= EDGE_ALL_FLAGS); |
0 | 518 fputs (" (", file); |
519 for (i = 0; flags; i++) | |
520 if (flags & (1 << i)) | |
521 { | |
522 flags &= ~(1 << i); | |
523 | |
524 if (comma) | |
525 fputc (',', file); | |
111 | 526 fputs (bitnames[i], file); |
527 comma = true; | |
0 | 528 } |
529 | |
530 fputc (')', file); | |
531 } | |
532 } | |
111 | 533 |
534 DEBUG_FUNCTION void | |
535 debug (edge_def &ref) | |
536 { | |
537 /* FIXME (crowl): Is this desireable? */ | |
538 dump_edge_info (stderr, &ref, 0, false); | |
539 dump_edge_info (stderr, &ref, 0, true); | |
540 } | |
541 | |
542 DEBUG_FUNCTION void | |
543 debug (edge_def *ptr) | |
544 { | |
545 if (ptr) | |
546 debug (*ptr); | |
547 else | |
548 fprintf (stderr, "<nil>\n"); | |
549 } | |
0 | 550 |
551 /* Simple routines to easily allocate AUX fields of basic blocks. */ | |
552 | |
553 static struct obstack block_aux_obstack; | |
554 static void *first_block_aux_obj = 0; | |
555 static struct obstack edge_aux_obstack; | |
556 static void *first_edge_aux_obj = 0; | |
557 | |
558 /* Allocate a memory block of SIZE as BB->aux. The obstack must | |
559 be first initialized by alloc_aux_for_blocks. */ | |
560 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
561 static void |
0 | 562 alloc_aux_for_block (basic_block bb, int size) |
563 { | |
564 /* Verify that aux field is clear. */ | |
565 gcc_assert (!bb->aux && first_block_aux_obj); | |
566 bb->aux = obstack_alloc (&block_aux_obstack, size); | |
567 memset (bb->aux, 0, size); | |
568 } | |
569 | |
570 /* Initialize the block_aux_obstack and if SIZE is nonzero, call | |
571 alloc_aux_for_block for each basic block. */ | |
572 | |
573 void | |
574 alloc_aux_for_blocks (int size) | |
575 { | |
576 static int initialized; | |
577 | |
578 if (!initialized) | |
579 { | |
580 gcc_obstack_init (&block_aux_obstack); | |
581 initialized = 1; | |
582 } | |
583 else | |
584 /* Check whether AUX data are still allocated. */ | |
585 gcc_assert (!first_block_aux_obj); | |
586 | |
587 first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0); | |
588 if (size) | |
589 { | |
590 basic_block bb; | |
591 | |
111 | 592 FOR_ALL_BB_FN (bb, cfun) |
0 | 593 alloc_aux_for_block (bb, size); |
594 } | |
595 } | |
596 | |
597 /* Clear AUX pointers of all blocks. */ | |
598 | |
599 void | |
600 clear_aux_for_blocks (void) | |
601 { | |
602 basic_block bb; | |
603 | |
111 | 604 FOR_ALL_BB_FN (bb, cfun) |
0 | 605 bb->aux = NULL; |
606 } | |
607 | |
608 /* Free data allocated in block_aux_obstack and clear AUX pointers | |
609 of all blocks. */ | |
610 | |
611 void | |
612 free_aux_for_blocks (void) | |
613 { | |
614 gcc_assert (first_block_aux_obj); | |
615 obstack_free (&block_aux_obstack, first_block_aux_obj); | |
616 first_block_aux_obj = NULL; | |
617 | |
618 clear_aux_for_blocks (); | |
619 } | |
620 | |
111 | 621 /* Allocate a memory edge of SIZE as E->aux. The obstack must |
0 | 622 be first initialized by alloc_aux_for_edges. */ |
623 | |
111 | 624 void |
0 | 625 alloc_aux_for_edge (edge e, int size) |
626 { | |
627 /* Verify that aux field is clear. */ | |
628 gcc_assert (!e->aux && first_edge_aux_obj); | |
629 e->aux = obstack_alloc (&edge_aux_obstack, size); | |
630 memset (e->aux, 0, size); | |
631 } | |
632 | |
633 /* Initialize the edge_aux_obstack and if SIZE is nonzero, call | |
634 alloc_aux_for_edge for each basic edge. */ | |
635 | |
636 void | |
637 alloc_aux_for_edges (int size) | |
638 { | |
639 static int initialized; | |
640 | |
641 if (!initialized) | |
642 { | |
643 gcc_obstack_init (&edge_aux_obstack); | |
644 initialized = 1; | |
645 } | |
646 else | |
647 /* Check whether AUX data are still allocated. */ | |
648 gcc_assert (!first_edge_aux_obj); | |
649 | |
650 first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0); | |
651 if (size) | |
652 { | |
653 basic_block bb; | |
654 | |
111 | 655 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), |
656 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb) | |
0 | 657 { |
658 edge e; | |
659 edge_iterator ei; | |
660 | |
661 FOR_EACH_EDGE (e, ei, bb->succs) | |
662 alloc_aux_for_edge (e, size); | |
663 } | |
664 } | |
665 } | |
666 | |
667 /* Clear AUX pointers of all edges. */ | |
668 | |
669 void | |
670 clear_aux_for_edges (void) | |
671 { | |
672 basic_block bb; | |
673 edge e; | |
674 | |
111 | 675 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), |
676 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb) | |
0 | 677 { |
678 edge_iterator ei; | |
679 FOR_EACH_EDGE (e, ei, bb->succs) | |
680 e->aux = NULL; | |
681 } | |
682 } | |
683 | |
684 /* Free data allocated in edge_aux_obstack and clear AUX pointers | |
685 of all edges. */ | |
686 | |
687 void | |
688 free_aux_for_edges (void) | |
689 { | |
690 gcc_assert (first_edge_aux_obj); | |
691 obstack_free (&edge_aux_obstack, first_edge_aux_obj); | |
692 first_edge_aux_obj = NULL; | |
693 | |
694 clear_aux_for_edges (); | |
695 } | |
696 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
697 DEBUG_FUNCTION void |
0 | 698 debug_bb (basic_block bb) |
699 { | |
111 | 700 dump_bb (stderr, bb, 0, dump_flags); |
0 | 701 } |
702 | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
63
diff
changeset
|
703 DEBUG_FUNCTION basic_block |
0 | 704 debug_bb_n (int n) |
705 { | |
111 | 706 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n); |
707 debug_bb (bb); | |
0 | 708 return bb; |
709 } | |
710 | |
111 | 711 /* Dumps cfg related information about basic block BB to OUTF. |
712 If HEADER is true, dump things that appear before the instructions | |
713 contained in BB. If FOOTER is true, dump things that appear after. | |
714 Flags are the TDF_* masks as documented in dumpfile.h. | |
715 NB: With TDF_DETAILS, it is assumed that cfun is available, so | |
716 that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE. */ | |
0 | 717 |
111 | 718 void |
719 dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags, | |
720 bool do_header, bool do_footer) | |
0 | 721 { |
722 edge_iterator ei; | |
111 | 723 edge e; |
0 | 724 static const char * const bb_bitnames[] = |
725 { | |
111 | 726 #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME , |
727 #include "cfg-flags.def" | |
728 NULL | |
729 #undef DEF_BASIC_BLOCK_FLAG | |
0 | 730 }; |
731 const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *); | |
111 | 732 bool first; |
733 char *s_indent = (char *) alloca ((size_t) indent + 1); | |
734 memset ((void *) s_indent, ' ', (size_t) indent); | |
735 s_indent[indent] = '\0'; | |
736 | |
737 gcc_assert (bb->flags <= BB_ALL_FLAGS); | |
738 | |
739 if (do_header) | |
740 { | |
741 unsigned i; | |
742 | |
743 fputs (";; ", outf); | |
744 fprintf (outf, "%sbasic block %d, loop depth %d", | |
745 s_indent, bb->index, bb_loop_depth (bb)); | |
746 if (flags & TDF_DETAILS) | |
747 { | |
748 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl); | |
749 if (bb->count.initialized_p ()) | |
750 { | |
751 fputs (", count ", outf); | |
752 bb->count.dump (outf); | |
753 } | |
754 fprintf (outf, ", freq %i", bb->frequency); | |
755 if (maybe_hot_bb_p (fun, bb)) | |
756 fputs (", maybe hot", outf); | |
757 if (probably_never_executed_bb_p (fun, bb)) | |
758 fputs (", probably never executed", outf); | |
759 } | |
760 fputc ('\n', outf); | |
761 | |
762 if (flags & TDF_DETAILS) | |
763 { | |
764 check_bb_profile (bb, outf, indent); | |
765 fputs (";; ", outf); | |
766 fprintf (outf, "%s prev block ", s_indent); | |
767 if (bb->prev_bb) | |
768 fprintf (outf, "%d", bb->prev_bb->index); | |
769 else | |
770 fprintf (outf, "(nil)"); | |
771 fprintf (outf, ", next block "); | |
772 if (bb->next_bb) | |
773 fprintf (outf, "%d", bb->next_bb->index); | |
774 else | |
775 fprintf (outf, "(nil)"); | |
0 | 776 |
111 | 777 fputs (", flags:", outf); |
778 first = true; | |
779 for (i = 0; i < n_bitnames; i++) | |
780 if (bb->flags & (1 << i)) | |
781 { | |
782 if (first) | |
783 fputs (" (", outf); | |
784 else | |
785 fputs (", ", outf); | |
786 first = false; | |
787 fputs (bb_bitnames[i], outf); | |
788 } | |
789 if (!first) | |
790 fputc (')', outf); | |
791 fputc ('\n', outf); | |
792 } | |
0 | 793 |
111 | 794 fputs (";; ", outf); |
795 fprintf (outf, "%s pred: ", s_indent); | |
796 first = true; | |
797 FOR_EACH_EDGE (e, ei, bb->preds) | |
798 { | |
799 if (! first) | |
800 { | |
801 fputs (";; ", outf); | |
802 fprintf (outf, "%s ", s_indent); | |
803 } | |
804 first = false; | |
805 dump_edge_info (outf, e, flags, 0); | |
806 fputc ('\n', outf); | |
807 } | |
808 if (first) | |
809 fputc ('\n', outf); | |
810 } | |
0 | 811 |
111 | 812 if (do_footer) |
813 { | |
814 fputs (";; ", outf); | |
815 fprintf (outf, "%s succ: ", s_indent); | |
816 first = true; | |
817 FOR_EACH_EDGE (e, ei, bb->succs) | |
818 { | |
819 if (! first) | |
820 { | |
821 fputs (";; ", outf); | |
822 fprintf (outf, "%s ", s_indent); | |
823 } | |
824 first = false; | |
825 dump_edge_info (outf, e, flags, 1); | |
826 fputc ('\n', outf); | |
827 } | |
828 if (first) | |
829 fputc ('\n', outf); | |
830 } | |
0 | 831 } |
832 | |
833 /* Dumps a brief description of cfg to FILE. */ | |
834 | |
835 void | |
111 | 836 brief_dump_cfg (FILE *file, dump_flags_t flags) |
0 | 837 { |
838 basic_block bb; | |
839 | |
111 | 840 FOR_EACH_BB_FN (bb, cfun) |
0 | 841 { |
111 | 842 dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true); |
0 | 843 } |
844 } | |
845 | |
846 /* An edge originally destinating BB of FREQUENCY and COUNT has been proved to | |
847 leave the block by TAKEN_EDGE. Update profile of BB such that edge E can be | |
848 redirected to destination of TAKEN_EDGE. | |
849 | |
850 This function may leave the profile inconsistent in the case TAKEN_EDGE | |
851 frequency or count is believed to be lower than FREQUENCY or COUNT | |
852 respectively. */ | |
853 void | |
854 update_bb_profile_for_threading (basic_block bb, int edge_frequency, | |
111 | 855 profile_count count, edge taken_edge) |
0 | 856 { |
857 edge c; | |
111 | 858 profile_probability prob; |
0 | 859 edge_iterator ei; |
860 | |
111 | 861 if (bb->count < count) |
0 | 862 { |
863 if (dump_file) | |
864 fprintf (dump_file, "bb %i count became negative after threading", | |
865 bb->index); | |
866 } | |
111 | 867 bb->count -= count; |
868 | |
869 bb->frequency -= edge_frequency; | |
870 if (bb->frequency < 0) | |
871 bb->frequency = 0; | |
0 | 872 |
873 /* Compute the probability of TAKEN_EDGE being reached via threaded edge. | |
874 Watch for overflows. */ | |
875 if (bb->frequency) | |
111 | 876 /* FIXME: We should get edge frequency as count. */ |
877 prob = profile_probability::probability_in_gcov_type | |
878 (edge_frequency, bb->frequency); | |
0 | 879 else |
111 | 880 prob = profile_probability::never (); |
0 | 881 if (prob > taken_edge->probability) |
882 { | |
883 if (dump_file) | |
111 | 884 { |
885 fprintf (dump_file, "Jump threading proved probability of edge " | |
886 "%i->%i too small (it is ", | |
887 taken_edge->src->index, taken_edge->dest->index); | |
888 taken_edge->probability.dump (dump_file); | |
889 fprintf (dump_file, " should be "); | |
890 prob.dump (dump_file); | |
891 fprintf (dump_file, ")\n"); | |
892 } | |
893 prob = taken_edge->probability.apply_scale (6, 8); | |
0 | 894 } |
895 | |
896 /* Now rescale the probabilities. */ | |
897 taken_edge->probability -= prob; | |
111 | 898 prob = prob.invert (); |
899 if (prob == profile_probability::never ()) | |
0 | 900 { |
901 if (dump_file) | |
902 fprintf (dump_file, "Edge frequencies of bb %i has been reset, " | |
903 "frequency of block should end up being 0, it is %i\n", | |
904 bb->index, bb->frequency); | |
111 | 905 EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always (); |
0 | 906 ei = ei_start (bb->succs); |
907 ei_next (&ei); | |
908 for (; (c = ei_safe_edge (ei)); ei_next (&ei)) | |
111 | 909 c->probability = profile_probability::guessed_never (); |
0 | 910 } |
111 | 911 else if (!(prob == profile_probability::always ())) |
0 | 912 { |
913 FOR_EACH_EDGE (c, ei, bb->succs) | |
111 | 914 c->probability /= prob; |
0 | 915 } |
916 | |
917 gcc_assert (bb == taken_edge->src); | |
918 } | |
919 | |
920 /* Multiply all frequencies of basic blocks in array BBS of length NBBS | |
921 by NUM/DEN, in int arithmetic. May lose some accuracy. */ | |
922 void | |
923 scale_bbs_frequencies_int (basic_block *bbs, int nbbs, int num, int den) | |
924 { | |
925 int i; | |
926 if (num < 0) | |
927 num = 0; | |
928 | |
929 /* Scale NUM and DEN to avoid overflows. Frequencies are in order of | |
930 10^4, if we make DEN <= 10^3, we can afford to upscale by 100 | |
931 and still safely fit in int during calculations. */ | |
932 if (den > 1000) | |
933 { | |
934 if (num > 1000000) | |
935 return; | |
936 | |
937 num = RDIV (1000 * num, den); | |
938 den = 1000; | |
939 } | |
940 if (num > 100 * den) | |
941 return; | |
942 | |
943 for (i = 0; i < nbbs; i++) | |
944 { | |
945 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den); | |
946 /* Make sure the frequencies do not grow over BB_FREQ_MAX. */ | |
947 if (bbs[i]->frequency > BB_FREQ_MAX) | |
948 bbs[i]->frequency = BB_FREQ_MAX; | |
111 | 949 bbs[i]->count = bbs[i]->count.apply_scale (num, den); |
0 | 950 } |
951 } | |
952 | |
953 /* numbers smaller than this value are safe to multiply without getting | |
954 64bit overflow. */ | |
111 | 955 #define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1)) |
0 | 956 |
957 /* Multiply all frequencies of basic blocks in array BBS of length NBBS | |
958 by NUM/DEN, in gcov_type arithmetic. More accurate than previous | |
959 function but considerably slower. */ | |
960 void | |
961 scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num, | |
962 gcov_type den) | |
963 { | |
964 int i; | |
965 gcov_type fraction = RDIV (num * 65536, den); | |
966 | |
967 gcc_assert (fraction >= 0); | |
968 | |
969 if (num < MAX_SAFE_MULTIPLIER) | |
970 for (i = 0; i < nbbs; i++) | |
971 { | |
972 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den); | |
973 if (bbs[i]->count <= MAX_SAFE_MULTIPLIER) | |
111 | 974 bbs[i]->count = bbs[i]->count.apply_scale (num, den); |
0 | 975 else |
111 | 976 bbs[i]->count = bbs[i]->count.apply_scale (fraction, 65536); |
0 | 977 } |
978 else | |
979 for (i = 0; i < nbbs; i++) | |
980 { | |
981 if (sizeof (gcov_type) > sizeof (int)) | |
982 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den); | |
983 else | |
984 bbs[i]->frequency = RDIV (bbs[i]->frequency * fraction, 65536); | |
111 | 985 bbs[i]->count = bbs[i]->count.apply_scale (fraction, 65536); |
0 | 986 } |
987 } | |
988 | |
111 | 989 /* Multiply all frequencies of basic blocks in array BBS of length NBBS |
990 by NUM/DEN, in profile_count arithmetic. More accurate than previous | |
991 function but considerably slower. */ | |
992 void | |
993 scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs, | |
994 profile_count num, profile_count den) | |
995 { | |
996 int i; | |
997 | |
998 for (i = 0; i < nbbs; i++) | |
999 { | |
1000 bbs[i]->frequency = RDIV (bbs[i]->frequency * num.to_gcov_type (), | |
1001 den.to_gcov_type ()); | |
1002 bbs[i]->count = bbs[i]->count.apply_scale (num, den); | |
1003 } | |
1004 } | |
0 | 1005 |
111 | 1006 /* Multiply all frequencies of basic blocks in array BBS of length NBBS |
1007 by NUM/DEN, in profile_count arithmetic. More accurate than previous | |
1008 function but considerably slower. */ | |
1009 void | |
1010 scale_bbs_frequencies (basic_block *bbs, int nbbs, | |
1011 profile_probability p) | |
1012 { | |
1013 int i; | |
1014 | |
1015 for (i = 0; i < nbbs; i++) | |
1016 { | |
1017 bbs[i]->frequency = p.apply (bbs[i]->frequency); | |
1018 bbs[i]->count = bbs[i]->count.apply_probability (p); | |
1019 } | |
1020 } | |
1021 | |
1022 /* Helper types for hash tables. */ | |
0 | 1023 |
1024 struct htab_bb_copy_original_entry | |
1025 { | |
1026 /* Block we are attaching info to. */ | |
1027 int index1; | |
1028 /* Index of original or copy (depending on the hashtable) */ | |
1029 int index2; | |
1030 }; | |
1031 | |
111 | 1032 struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry> |
0 | 1033 { |
111 | 1034 static inline hashval_t hash (const htab_bb_copy_original_entry *); |
1035 static inline bool equal (const htab_bb_copy_original_entry *existing, | |
1036 const htab_bb_copy_original_entry * candidate); | |
1037 }; | |
0 | 1038 |
111 | 1039 inline hashval_t |
1040 bb_copy_hasher::hash (const htab_bb_copy_original_entry *data) | |
1041 { | |
0 | 1042 return data->index1; |
1043 } | |
111 | 1044 |
1045 inline bool | |
1046 bb_copy_hasher::equal (const htab_bb_copy_original_entry *data, | |
1047 const htab_bb_copy_original_entry *data2) | |
0 | 1048 { |
1049 return data->index1 == data2->index1; | |
1050 } | |
1051 | |
111 | 1052 /* Data structures used to maintain mapping between basic blocks and |
1053 copies. */ | |
1054 static hash_table<bb_copy_hasher> *bb_original; | |
1055 static hash_table<bb_copy_hasher> *bb_copy; | |
1056 | |
1057 /* And between loops and copies. */ | |
1058 static hash_table<bb_copy_hasher> *loop_copy; | |
1059 static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool; | |
1060 | |
0 | 1061 /* Initialize the data structures to maintain mapping between blocks |
1062 and its copies. */ | |
1063 void | |
1064 initialize_original_copy_tables (void) | |
1065 { | |
111 | 1066 original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry> |
1067 ("original_copy"); | |
1068 bb_original = new hash_table<bb_copy_hasher> (10); | |
1069 bb_copy = new hash_table<bb_copy_hasher> (10); | |
1070 loop_copy = new hash_table<bb_copy_hasher> (10); | |
1071 } | |
1072 | |
1073 /* Reset the data structures to maintain mapping between blocks and | |
1074 its copies. */ | |
1075 | |
1076 void | |
1077 reset_original_copy_tables (void) | |
1078 { | |
1079 gcc_assert (original_copy_bb_pool); | |
1080 bb_original->empty (); | |
1081 bb_copy->empty (); | |
1082 loop_copy->empty (); | |
0 | 1083 } |
1084 | |
1085 /* Free the data structures to maintain mapping between blocks and | |
1086 its copies. */ | |
1087 void | |
1088 free_original_copy_tables (void) | |
1089 { | |
1090 gcc_assert (original_copy_bb_pool); | |
111 | 1091 delete bb_copy; |
0 | 1092 bb_copy = NULL; |
111 | 1093 delete bb_original; |
0 | 1094 bb_original = NULL; |
111 | 1095 delete loop_copy; |
0 | 1096 loop_copy = NULL; |
111 | 1097 delete original_copy_bb_pool; |
0 | 1098 original_copy_bb_pool = NULL; |
1099 } | |
1100 | |
111 | 1101 /* Return true iff we have had a call to initialize_original_copy_tables |
1102 without a corresponding call to free_original_copy_tables. */ | |
1103 | |
1104 bool | |
1105 original_copy_tables_initialized_p (void) | |
1106 { | |
1107 return original_copy_bb_pool != NULL; | |
1108 } | |
1109 | |
0 | 1110 /* Removes the value associated with OBJ from table TAB. */ |
1111 | |
1112 static void | |
111 | 1113 copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj) |
0 | 1114 { |
111 | 1115 htab_bb_copy_original_entry **slot; |
0 | 1116 struct htab_bb_copy_original_entry key, *elt; |
1117 | |
1118 if (!original_copy_bb_pool) | |
1119 return; | |
1120 | |
1121 key.index1 = obj; | |
111 | 1122 slot = tab->find_slot (&key, NO_INSERT); |
0 | 1123 if (!slot) |
1124 return; | |
1125 | |
111 | 1126 elt = *slot; |
1127 tab->clear_slot (slot); | |
1128 original_copy_bb_pool->remove (elt); | |
0 | 1129 } |
1130 | |
1131 /* Sets the value associated with OBJ in table TAB to VAL. | |
1132 Do nothing when data structures are not initialized. */ | |
1133 | |
1134 static void | |
111 | 1135 copy_original_table_set (hash_table<bb_copy_hasher> *tab, |
1136 unsigned obj, unsigned val) | |
0 | 1137 { |
1138 struct htab_bb_copy_original_entry **slot; | |
1139 struct htab_bb_copy_original_entry key; | |
1140 | |
1141 if (!original_copy_bb_pool) | |
1142 return; | |
1143 | |
1144 key.index1 = obj; | |
111 | 1145 slot = tab->find_slot (&key, INSERT); |
0 | 1146 if (!*slot) |
1147 { | |
111 | 1148 *slot = original_copy_bb_pool->allocate (); |
0 | 1149 (*slot)->index1 = obj; |
1150 } | |
1151 (*slot)->index2 = val; | |
1152 } | |
1153 | |
1154 /* Set original for basic block. Do nothing when data structures are not | |
1155 initialized so passes not needing this don't need to care. */ | |
1156 void | |
1157 set_bb_original (basic_block bb, basic_block original) | |
1158 { | |
1159 copy_original_table_set (bb_original, bb->index, original->index); | |
1160 } | |
1161 | |
1162 /* Get the original basic block. */ | |
1163 basic_block | |
1164 get_bb_original (basic_block bb) | |
1165 { | |
1166 struct htab_bb_copy_original_entry *entry; | |
1167 struct htab_bb_copy_original_entry key; | |
1168 | |
1169 gcc_assert (original_copy_bb_pool); | |
1170 | |
1171 key.index1 = bb->index; | |
111 | 1172 entry = bb_original->find (&key); |
0 | 1173 if (entry) |
111 | 1174 return BASIC_BLOCK_FOR_FN (cfun, entry->index2); |
0 | 1175 else |
1176 return NULL; | |
1177 } | |
1178 | |
1179 /* Set copy for basic block. Do nothing when data structures are not | |
1180 initialized so passes not needing this don't need to care. */ | |
1181 void | |
1182 set_bb_copy (basic_block bb, basic_block copy) | |
1183 { | |
1184 copy_original_table_set (bb_copy, bb->index, copy->index); | |
1185 } | |
1186 | |
1187 /* Get the copy of basic block. */ | |
1188 basic_block | |
1189 get_bb_copy (basic_block bb) | |
1190 { | |
1191 struct htab_bb_copy_original_entry *entry; | |
1192 struct htab_bb_copy_original_entry key; | |
1193 | |
1194 gcc_assert (original_copy_bb_pool); | |
1195 | |
1196 key.index1 = bb->index; | |
111 | 1197 entry = bb_copy->find (&key); |
0 | 1198 if (entry) |
111 | 1199 return BASIC_BLOCK_FOR_FN (cfun, entry->index2); |
0 | 1200 else |
1201 return NULL; | |
1202 } | |
1203 | |
1204 /* Set copy for LOOP to COPY. Do nothing when data structures are not | |
1205 initialized so passes not needing this don't need to care. */ | |
1206 | |
1207 void | |
1208 set_loop_copy (struct loop *loop, struct loop *copy) | |
1209 { | |
1210 if (!copy) | |
1211 copy_original_table_clear (loop_copy, loop->num); | |
1212 else | |
1213 copy_original_table_set (loop_copy, loop->num, copy->num); | |
1214 } | |
1215 | |
1216 /* Get the copy of LOOP. */ | |
1217 | |
1218 struct loop * | |
1219 get_loop_copy (struct loop *loop) | |
1220 { | |
1221 struct htab_bb_copy_original_entry *entry; | |
1222 struct htab_bb_copy_original_entry key; | |
1223 | |
1224 gcc_assert (original_copy_bb_pool); | |
1225 | |
1226 key.index1 = loop->num; | |
111 | 1227 entry = loop_copy->find (&key); |
0 | 1228 if (entry) |
111 | 1229 return get_loop (cfun, entry->index2); |
0 | 1230 else |
1231 return NULL; | |
1232 } |