Mercurial > hg > CbC > CbC_gcc
comparison gcc/bb-reorder.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* Basic block reordering routines for the GNU compiler. | |
2 Copyright (C) 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008 | |
3 Free Software Foundation, Inc. | |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by | |
9 the Free Software Foundation; either version 3, or (at your option) | |
10 any later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | |
15 License for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 /* This (greedy) algorithm constructs traces in several rounds. | |
22 The construction starts from "seeds". The seed for the first round | |
23 is the entry point of function. When there are more than one seed | |
24 that one is selected first that has the lowest key in the heap | |
25 (see function bb_to_key). Then the algorithm repeatedly adds the most | |
26 probable successor to the end of a trace. Finally it connects the traces. | |
27 | |
28 There are two parameters: Branch Threshold and Exec Threshold. | |
29 If the edge to a successor of the actual basic block is lower than | |
30 Branch Threshold or the frequency of the successor is lower than | |
31 Exec Threshold the successor will be the seed in one of the next rounds. | |
32 Each round has these parameters lower than the previous one. | |
33 The last round has to have these parameters set to zero | |
34 so that the remaining blocks are picked up. | |
35 | |
36 The algorithm selects the most probable successor from all unvisited | |
37 successors and successors that have been added to this trace. | |
38 The other successors (that has not been "sent" to the next round) will be | |
39 other seeds for this round and the secondary traces will start in them. | |
40 If the successor has not been visited in this trace it is added to the trace | |
41 (however, there is some heuristic for simple branches). | |
42 If the successor has been visited in this trace the loop has been found. | |
43 If the loop has many iterations the loop is rotated so that the | |
44 source block of the most probable edge going out from the loop | |
45 is the last block of the trace. | |
46 If the loop has few iterations and there is no edge from the last block of | |
47 the loop going out from loop the loop header is duplicated. | |
48 Finally, the construction of the trace is terminated. | |
49 | |
50 When connecting traces it first checks whether there is an edge from the | |
51 last block of one trace to the first block of another trace. | |
52 When there are still some unconnected traces it checks whether there exists | |
53 a basic block BB such that BB is a successor of the last bb of one trace | |
54 and BB is a predecessor of the first block of another trace. In this case, | |
55 BB is duplicated and the traces are connected through this duplicate. | |
56 The rest of traces are simply connected so there will be a jump to the | |
57 beginning of the rest of trace. | |
58 | |
59 | |
60 References: | |
61 | |
62 "Software Trace Cache" | |
63 A. Ramirez, J. Larriba-Pey, C. Navarro, J. Torrellas and M. Valero; 1999 | |
64 http://citeseer.nj.nec.com/15361.html | |
65 | |
66 */ | |
67 | |
68 #include "config.h" | |
69 #include "system.h" | |
70 #include "coretypes.h" | |
71 #include "tm.h" | |
72 #include "rtl.h" | |
73 #include "regs.h" | |
74 #include "flags.h" | |
75 #include "timevar.h" | |
76 #include "output.h" | |
77 #include "cfglayout.h" | |
78 #include "fibheap.h" | |
79 #include "target.h" | |
80 #include "function.h" | |
81 #include "tm_p.h" | |
82 #include "obstack.h" | |
83 #include "expr.h" | |
84 #include "params.h" | |
85 #include "toplev.h" | |
86 #include "tree-pass.h" | |
87 #include "df.h" | |
88 | |
89 #ifndef HAVE_conditional_execution | |
90 #define HAVE_conditional_execution 0 | |
91 #endif | |
92 | |
93 /* The number of rounds. In most cases there will only be 4 rounds, but | |
94 when partitioning hot and cold basic blocks into separate sections of | |
95 the .o file there will be an extra round.*/ | |
96 #define N_ROUNDS 5 | |
97 | |
98 /* Stubs in case we don't have a return insn. | |
99 We have to check at runtime too, not only compiletime. */ | |
100 | |
101 #ifndef HAVE_return | |
102 #define HAVE_return 0 | |
103 #define gen_return() NULL_RTX | |
104 #endif | |
105 | |
106 | |
107 /* Branch thresholds in thousandths (per mille) of the REG_BR_PROB_BASE. */ | |
108 static int branch_threshold[N_ROUNDS] = {400, 200, 100, 0, 0}; | |
109 | |
110 /* Exec thresholds in thousandths (per mille) of the frequency of bb 0. */ | |
111 static int exec_threshold[N_ROUNDS] = {500, 200, 50, 0, 0}; | |
112 | |
113 /* If edge frequency is lower than DUPLICATION_THRESHOLD per mille of entry | |
114 block the edge destination is not duplicated while connecting traces. */ | |
115 #define DUPLICATION_THRESHOLD 100 | |
116 | |
117 /* Length of unconditional jump instruction. */ | |
118 static int uncond_jump_length; | |
119 | |
120 /* Structure to hold needed information for each basic block. */ | |
121 typedef struct bbro_basic_block_data_def | |
122 { | |
123 /* Which trace is the bb start of (-1 means it is not a start of a trace). */ | |
124 int start_of_trace; | |
125 | |
126 /* Which trace is the bb end of (-1 means it is not an end of a trace). */ | |
127 int end_of_trace; | |
128 | |
129 /* Which trace is the bb in? */ | |
130 int in_trace; | |
131 | |
132 /* Which heap is BB in (if any)? */ | |
133 fibheap_t heap; | |
134 | |
135 /* Which heap node is BB in (if any)? */ | |
136 fibnode_t node; | |
137 } bbro_basic_block_data; | |
138 | |
139 /* The current size of the following dynamic array. */ | |
140 static int array_size; | |
141 | |
142 /* The array which holds needed information for basic blocks. */ | |
143 static bbro_basic_block_data *bbd; | |
144 | |
145 /* To avoid frequent reallocation the size of arrays is greater than needed, | |
146 the number of elements is (not less than) 1.25 * size_wanted. */ | |
147 #define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5) | |
148 | |
149 /* Free the memory and set the pointer to NULL. */ | |
150 #define FREE(P) (gcc_assert (P), free (P), P = 0) | |
151 | |
152 /* Structure for holding information about a trace. */ | |
153 struct trace | |
154 { | |
155 /* First and last basic block of the trace. */ | |
156 basic_block first, last; | |
157 | |
158 /* The round of the STC creation which this trace was found in. */ | |
159 int round; | |
160 | |
161 /* The length (i.e. the number of basic blocks) of the trace. */ | |
162 int length; | |
163 }; | |
164 | |
165 /* Maximum frequency and count of one of the entry blocks. */ | |
166 static int max_entry_frequency; | |
167 static gcov_type max_entry_count; | |
168 | |
169 /* Local function prototypes. */ | |
170 static void find_traces (int *, struct trace *); | |
171 static basic_block rotate_loop (edge, struct trace *, int); | |
172 static void mark_bb_visited (basic_block, int); | |
173 static void find_traces_1_round (int, int, gcov_type, struct trace *, int *, | |
174 int, fibheap_t *, int); | |
175 static basic_block copy_bb (basic_block, edge, basic_block, int); | |
176 static fibheapkey_t bb_to_key (basic_block); | |
177 static bool better_edge_p (const_basic_block, const_edge, int, int, int, int, const_edge); | |
178 static void connect_traces (int, struct trace *); | |
179 static bool copy_bb_p (const_basic_block, int); | |
180 static int get_uncond_jump_length (void); | |
181 static bool push_to_next_round_p (const_basic_block, int, int, int, gcov_type); | |
182 static void find_rarely_executed_basic_blocks_and_crossing_edges (edge **, | |
183 int *, | |
184 int *); | |
185 static void add_labels_and_missing_jumps (edge *, int); | |
186 static void add_reg_crossing_jump_notes (void); | |
187 static void fix_up_fall_thru_edges (void); | |
188 static void fix_edges_for_rarely_executed_code (edge *, int); | |
189 static void fix_crossing_conditional_branches (void); | |
190 static void fix_crossing_unconditional_branches (void); | |
191 | |
192 /* Check to see if bb should be pushed into the next round of trace | |
193 collections or not. Reasons for pushing the block forward are 1). | |
194 If the block is cold, we are doing partitioning, and there will be | |
195 another round (cold partition blocks are not supposed to be | |
196 collected into traces until the very last round); or 2). There will | |
197 be another round, and the basic block is not "hot enough" for the | |
198 current round of trace collection. */ | |
199 | |
200 static bool | |
201 push_to_next_round_p (const_basic_block bb, int round, int number_of_rounds, | |
202 int exec_th, gcov_type count_th) | |
203 { | |
204 bool there_exists_another_round; | |
205 bool block_not_hot_enough; | |
206 | |
207 there_exists_another_round = round < number_of_rounds - 1; | |
208 | |
209 block_not_hot_enough = (bb->frequency < exec_th | |
210 || bb->count < count_th | |
211 || probably_never_executed_bb_p (bb)); | |
212 | |
213 if (there_exists_another_round | |
214 && block_not_hot_enough) | |
215 return true; | |
216 else | |
217 return false; | |
218 } | |
219 | |
220 /* Find the traces for Software Trace Cache. Chain each trace through | |
221 RBI()->next. Store the number of traces to N_TRACES and description of | |
222 traces to TRACES. */ | |
223 | |
224 static void | |
225 find_traces (int *n_traces, struct trace *traces) | |
226 { | |
227 int i; | |
228 int number_of_rounds; | |
229 edge e; | |
230 edge_iterator ei; | |
231 fibheap_t heap; | |
232 | |
233 /* Add one extra round of trace collection when partitioning hot/cold | |
234 basic blocks into separate sections. The last round is for all the | |
235 cold blocks (and ONLY the cold blocks). */ | |
236 | |
237 number_of_rounds = N_ROUNDS - 1; | |
238 | |
239 /* Insert entry points of function into heap. */ | |
240 heap = fibheap_new (); | |
241 max_entry_frequency = 0; | |
242 max_entry_count = 0; | |
243 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) | |
244 { | |
245 bbd[e->dest->index].heap = heap; | |
246 bbd[e->dest->index].node = fibheap_insert (heap, bb_to_key (e->dest), | |
247 e->dest); | |
248 if (e->dest->frequency > max_entry_frequency) | |
249 max_entry_frequency = e->dest->frequency; | |
250 if (e->dest->count > max_entry_count) | |
251 max_entry_count = e->dest->count; | |
252 } | |
253 | |
254 /* Find the traces. */ | |
255 for (i = 0; i < number_of_rounds; i++) | |
256 { | |
257 gcov_type count_threshold; | |
258 | |
259 if (dump_file) | |
260 fprintf (dump_file, "STC - round %d\n", i + 1); | |
261 | |
262 if (max_entry_count < INT_MAX / 1000) | |
263 count_threshold = max_entry_count * exec_threshold[i] / 1000; | |
264 else | |
265 count_threshold = max_entry_count / 1000 * exec_threshold[i]; | |
266 | |
267 find_traces_1_round (REG_BR_PROB_BASE * branch_threshold[i] / 1000, | |
268 max_entry_frequency * exec_threshold[i] / 1000, | |
269 count_threshold, traces, n_traces, i, &heap, | |
270 number_of_rounds); | |
271 } | |
272 fibheap_delete (heap); | |
273 | |
274 if (dump_file) | |
275 { | |
276 for (i = 0; i < *n_traces; i++) | |
277 { | |
278 basic_block bb; | |
279 fprintf (dump_file, "Trace %d (round %d): ", i + 1, | |
280 traces[i].round + 1); | |
281 for (bb = traces[i].first; bb != traces[i].last; bb = (basic_block) bb->aux) | |
282 fprintf (dump_file, "%d [%d] ", bb->index, bb->frequency); | |
283 fprintf (dump_file, "%d [%d]\n", bb->index, bb->frequency); | |
284 } | |
285 fflush (dump_file); | |
286 } | |
287 } | |
288 | |
289 /* Rotate loop whose back edge is BACK_EDGE in the tail of trace TRACE | |
290 (with sequential number TRACE_N). */ | |
291 | |
292 static basic_block | |
293 rotate_loop (edge back_edge, struct trace *trace, int trace_n) | |
294 { | |
295 basic_block bb; | |
296 | |
297 /* Information about the best end (end after rotation) of the loop. */ | |
298 basic_block best_bb = NULL; | |
299 edge best_edge = NULL; | |
300 int best_freq = -1; | |
301 gcov_type best_count = -1; | |
302 /* The best edge is preferred when its destination is not visited yet | |
303 or is a start block of some trace. */ | |
304 bool is_preferred = false; | |
305 | |
306 /* Find the most frequent edge that goes out from current trace. */ | |
307 bb = back_edge->dest; | |
308 do | |
309 { | |
310 edge e; | |
311 edge_iterator ei; | |
312 | |
313 FOR_EACH_EDGE (e, ei, bb->succs) | |
314 if (e->dest != EXIT_BLOCK_PTR | |
315 && e->dest->il.rtl->visited != trace_n | |
316 && (e->flags & EDGE_CAN_FALLTHRU) | |
317 && !(e->flags & EDGE_COMPLEX)) | |
318 { | |
319 if (is_preferred) | |
320 { | |
321 /* The best edge is preferred. */ | |
322 if (!e->dest->il.rtl->visited | |
323 || bbd[e->dest->index].start_of_trace >= 0) | |
324 { | |
325 /* The current edge E is also preferred. */ | |
326 int freq = EDGE_FREQUENCY (e); | |
327 if (freq > best_freq || e->count > best_count) | |
328 { | |
329 best_freq = freq; | |
330 best_count = e->count; | |
331 best_edge = e; | |
332 best_bb = bb; | |
333 } | |
334 } | |
335 } | |
336 else | |
337 { | |
338 if (!e->dest->il.rtl->visited | |
339 || bbd[e->dest->index].start_of_trace >= 0) | |
340 { | |
341 /* The current edge E is preferred. */ | |
342 is_preferred = true; | |
343 best_freq = EDGE_FREQUENCY (e); | |
344 best_count = e->count; | |
345 best_edge = e; | |
346 best_bb = bb; | |
347 } | |
348 else | |
349 { | |
350 int freq = EDGE_FREQUENCY (e); | |
351 if (!best_edge || freq > best_freq || e->count > best_count) | |
352 { | |
353 best_freq = freq; | |
354 best_count = e->count; | |
355 best_edge = e; | |
356 best_bb = bb; | |
357 } | |
358 } | |
359 } | |
360 } | |
361 bb = (basic_block) bb->aux; | |
362 } | |
363 while (bb != back_edge->dest); | |
364 | |
365 if (best_bb) | |
366 { | |
367 /* Rotate the loop so that the BEST_EDGE goes out from the last block of | |
368 the trace. */ | |
369 if (back_edge->dest == trace->first) | |
370 { | |
371 trace->first = (basic_block) best_bb->aux; | |
372 } | |
373 else | |
374 { | |
375 basic_block prev_bb; | |
376 | |
377 for (prev_bb = trace->first; | |
378 prev_bb->aux != back_edge->dest; | |
379 prev_bb = (basic_block) prev_bb->aux) | |
380 ; | |
381 prev_bb->aux = best_bb->aux; | |
382 | |
383 /* Try to get rid of uncond jump to cond jump. */ | |
384 if (single_succ_p (prev_bb)) | |
385 { | |
386 basic_block header = single_succ (prev_bb); | |
387 | |
388 /* Duplicate HEADER if it is a small block containing cond jump | |
389 in the end. */ | |
390 if (any_condjump_p (BB_END (header)) && copy_bb_p (header, 0) | |
391 && !find_reg_note (BB_END (header), REG_CROSSING_JUMP, | |
392 NULL_RTX)) | |
393 copy_bb (header, single_succ_edge (prev_bb), prev_bb, trace_n); | |
394 } | |
395 } | |
396 } | |
397 else | |
398 { | |
399 /* We have not found suitable loop tail so do no rotation. */ | |
400 best_bb = back_edge->src; | |
401 } | |
402 best_bb->aux = NULL; | |
403 return best_bb; | |
404 } | |
405 | |
406 /* This function marks BB that it was visited in trace number TRACE. */ | |
407 | |
408 static void | |
409 mark_bb_visited (basic_block bb, int trace) | |
410 { | |
411 bb->il.rtl->visited = trace; | |
412 if (bbd[bb->index].heap) | |
413 { | |
414 fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node); | |
415 bbd[bb->index].heap = NULL; | |
416 bbd[bb->index].node = NULL; | |
417 } | |
418 } | |
419 | |
420 /* One round of finding traces. Find traces for BRANCH_TH and EXEC_TH i.e. do | |
421 not include basic blocks their probability is lower than BRANCH_TH or their | |
422 frequency is lower than EXEC_TH into traces (or count is lower than | |
423 COUNT_TH). It stores the new traces into TRACES and modifies the number of | |
424 traces *N_TRACES. Sets the round (which the trace belongs to) to ROUND. It | |
425 expects that starting basic blocks are in *HEAP and at the end it deletes | |
426 *HEAP and stores starting points for the next round into new *HEAP. */ | |
427 | |
428 static void | |
429 find_traces_1_round (int branch_th, int exec_th, gcov_type count_th, | |
430 struct trace *traces, int *n_traces, int round, | |
431 fibheap_t *heap, int number_of_rounds) | |
432 { | |
433 /* Heap for discarded basic blocks which are possible starting points for | |
434 the next round. */ | |
435 fibheap_t new_heap = fibheap_new (); | |
436 | |
437 while (!fibheap_empty (*heap)) | |
438 { | |
439 basic_block bb; | |
440 struct trace *trace; | |
441 edge best_edge, e; | |
442 fibheapkey_t key; | |
443 edge_iterator ei; | |
444 | |
445 bb = (basic_block) fibheap_extract_min (*heap); | |
446 bbd[bb->index].heap = NULL; | |
447 bbd[bb->index].node = NULL; | |
448 | |
449 if (dump_file) | |
450 fprintf (dump_file, "Getting bb %d\n", bb->index); | |
451 | |
452 /* If the BB's frequency is too low send BB to the next round. When | |
453 partitioning hot/cold blocks into separate sections, make sure all | |
454 the cold blocks (and ONLY the cold blocks) go into the (extra) final | |
455 round. */ | |
456 | |
457 if (push_to_next_round_p (bb, round, number_of_rounds, exec_th, | |
458 count_th)) | |
459 { | |
460 int key = bb_to_key (bb); | |
461 bbd[bb->index].heap = new_heap; | |
462 bbd[bb->index].node = fibheap_insert (new_heap, key, bb); | |
463 | |
464 if (dump_file) | |
465 fprintf (dump_file, | |
466 " Possible start point of next round: %d (key: %d)\n", | |
467 bb->index, key); | |
468 continue; | |
469 } | |
470 | |
471 trace = traces + *n_traces; | |
472 trace->first = bb; | |
473 trace->round = round; | |
474 trace->length = 0; | |
475 bbd[bb->index].in_trace = *n_traces; | |
476 (*n_traces)++; | |
477 | |
478 do | |
479 { | |
480 int prob, freq; | |
481 bool ends_in_call; | |
482 | |
483 /* The probability and frequency of the best edge. */ | |
484 int best_prob = INT_MIN / 2; | |
485 int best_freq = INT_MIN / 2; | |
486 | |
487 best_edge = NULL; | |
488 mark_bb_visited (bb, *n_traces); | |
489 trace->length++; | |
490 | |
491 if (dump_file) | |
492 fprintf (dump_file, "Basic block %d was visited in trace %d\n", | |
493 bb->index, *n_traces - 1); | |
494 | |
495 ends_in_call = block_ends_with_call_p (bb); | |
496 | |
497 /* Select the successor that will be placed after BB. */ | |
498 FOR_EACH_EDGE (e, ei, bb->succs) | |
499 { | |
500 gcc_assert (!(e->flags & EDGE_FAKE)); | |
501 | |
502 if (e->dest == EXIT_BLOCK_PTR) | |
503 continue; | |
504 | |
505 if (e->dest->il.rtl->visited | |
506 && e->dest->il.rtl->visited != *n_traces) | |
507 continue; | |
508 | |
509 if (BB_PARTITION (e->dest) != BB_PARTITION (bb)) | |
510 continue; | |
511 | |
512 prob = e->probability; | |
513 freq = e->dest->frequency; | |
514 | |
515 /* The only sensible preference for a call instruction is the | |
516 fallthru edge. Don't bother selecting anything else. */ | |
517 if (ends_in_call) | |
518 { | |
519 if (e->flags & EDGE_CAN_FALLTHRU) | |
520 { | |
521 best_edge = e; | |
522 best_prob = prob; | |
523 best_freq = freq; | |
524 } | |
525 continue; | |
526 } | |
527 | |
528 /* Edge that cannot be fallthru or improbable or infrequent | |
529 successor (i.e. it is unsuitable successor). */ | |
530 if (!(e->flags & EDGE_CAN_FALLTHRU) || (e->flags & EDGE_COMPLEX) | |
531 || prob < branch_th || EDGE_FREQUENCY (e) < exec_th | |
532 || e->count < count_th) | |
533 continue; | |
534 | |
535 /* If partitioning hot/cold basic blocks, don't consider edges | |
536 that cross section boundaries. */ | |
537 | |
538 if (better_edge_p (bb, e, prob, freq, best_prob, best_freq, | |
539 best_edge)) | |
540 { | |
541 best_edge = e; | |
542 best_prob = prob; | |
543 best_freq = freq; | |
544 } | |
545 } | |
546 | |
547 /* If the best destination has multiple predecessors, and can be | |
548 duplicated cheaper than a jump, don't allow it to be added | |
549 to a trace. We'll duplicate it when connecting traces. */ | |
550 if (best_edge && EDGE_COUNT (best_edge->dest->preds) >= 2 | |
551 && copy_bb_p (best_edge->dest, 0)) | |
552 best_edge = NULL; | |
553 | |
554 /* Add all non-selected successors to the heaps. */ | |
555 FOR_EACH_EDGE (e, ei, bb->succs) | |
556 { | |
557 if (e == best_edge | |
558 || e->dest == EXIT_BLOCK_PTR | |
559 || e->dest->il.rtl->visited) | |
560 continue; | |
561 | |
562 key = bb_to_key (e->dest); | |
563 | |
564 if (bbd[e->dest->index].heap) | |
565 { | |
566 /* E->DEST is already in some heap. */ | |
567 if (key != bbd[e->dest->index].node->key) | |
568 { | |
569 if (dump_file) | |
570 { | |
571 fprintf (dump_file, | |
572 "Changing key for bb %d from %ld to %ld.\n", | |
573 e->dest->index, | |
574 (long) bbd[e->dest->index].node->key, | |
575 key); | |
576 } | |
577 fibheap_replace_key (bbd[e->dest->index].heap, | |
578 bbd[e->dest->index].node, key); | |
579 } | |
580 } | |
581 else | |
582 { | |
583 fibheap_t which_heap = *heap; | |
584 | |
585 prob = e->probability; | |
586 freq = EDGE_FREQUENCY (e); | |
587 | |
588 if (!(e->flags & EDGE_CAN_FALLTHRU) | |
589 || (e->flags & EDGE_COMPLEX) | |
590 || prob < branch_th || freq < exec_th | |
591 || e->count < count_th) | |
592 { | |
593 /* When partitioning hot/cold basic blocks, make sure | |
594 the cold blocks (and only the cold blocks) all get | |
595 pushed to the last round of trace collection. */ | |
596 | |
597 if (push_to_next_round_p (e->dest, round, | |
598 number_of_rounds, | |
599 exec_th, count_th)) | |
600 which_heap = new_heap; | |
601 } | |
602 | |
603 bbd[e->dest->index].heap = which_heap; | |
604 bbd[e->dest->index].node = fibheap_insert (which_heap, | |
605 key, e->dest); | |
606 | |
607 if (dump_file) | |
608 { | |
609 fprintf (dump_file, | |
610 " Possible start of %s round: %d (key: %ld)\n", | |
611 (which_heap == new_heap) ? "next" : "this", | |
612 e->dest->index, (long) key); | |
613 } | |
614 | |
615 } | |
616 } | |
617 | |
618 if (best_edge) /* Suitable successor was found. */ | |
619 { | |
620 if (best_edge->dest->il.rtl->visited == *n_traces) | |
621 { | |
622 /* We do nothing with one basic block loops. */ | |
623 if (best_edge->dest != bb) | |
624 { | |
625 if (EDGE_FREQUENCY (best_edge) | |
626 > 4 * best_edge->dest->frequency / 5) | |
627 { | |
628 /* The loop has at least 4 iterations. If the loop | |
629 header is not the first block of the function | |
630 we can rotate the loop. */ | |
631 | |
632 if (best_edge->dest != ENTRY_BLOCK_PTR->next_bb) | |
633 { | |
634 if (dump_file) | |
635 { | |
636 fprintf (dump_file, | |
637 "Rotating loop %d - %d\n", | |
638 best_edge->dest->index, bb->index); | |
639 } | |
640 bb->aux = best_edge->dest; | |
641 bbd[best_edge->dest->index].in_trace = | |
642 (*n_traces) - 1; | |
643 bb = rotate_loop (best_edge, trace, *n_traces); | |
644 } | |
645 } | |
646 else | |
647 { | |
648 /* The loop has less than 4 iterations. */ | |
649 | |
650 if (single_succ_p (bb) | |
651 && copy_bb_p (best_edge->dest, | |
652 optimize_edge_for_speed_p (best_edge))) | |
653 { | |
654 bb = copy_bb (best_edge->dest, best_edge, bb, | |
655 *n_traces); | |
656 trace->length++; | |
657 } | |
658 } | |
659 } | |
660 | |
661 /* Terminate the trace. */ | |
662 break; | |
663 } | |
664 else | |
665 { | |
666 /* Check for a situation | |
667 | |
668 A | |
669 /| | |
670 B | | |
671 \| | |
672 C | |
673 | |
674 where | |
675 EDGE_FREQUENCY (AB) + EDGE_FREQUENCY (BC) | |
676 >= EDGE_FREQUENCY (AC). | |
677 (i.e. 2 * B->frequency >= EDGE_FREQUENCY (AC) ) | |
678 Best ordering is then A B C. | |
679 | |
680 This situation is created for example by: | |
681 | |
682 if (A) B; | |
683 C; | |
684 | |
685 */ | |
686 | |
687 FOR_EACH_EDGE (e, ei, bb->succs) | |
688 if (e != best_edge | |
689 && (e->flags & EDGE_CAN_FALLTHRU) | |
690 && !(e->flags & EDGE_COMPLEX) | |
691 && !e->dest->il.rtl->visited | |
692 && single_pred_p (e->dest) | |
693 && !(e->flags & EDGE_CROSSING) | |
694 && single_succ_p (e->dest) | |
695 && (single_succ_edge (e->dest)->flags | |
696 & EDGE_CAN_FALLTHRU) | |
697 && !(single_succ_edge (e->dest)->flags & EDGE_COMPLEX) | |
698 && single_succ (e->dest) == best_edge->dest | |
699 && 2 * e->dest->frequency >= EDGE_FREQUENCY (best_edge)) | |
700 { | |
701 best_edge = e; | |
702 if (dump_file) | |
703 fprintf (dump_file, "Selecting BB %d\n", | |
704 best_edge->dest->index); | |
705 break; | |
706 } | |
707 | |
708 bb->aux = best_edge->dest; | |
709 bbd[best_edge->dest->index].in_trace = (*n_traces) - 1; | |
710 bb = best_edge->dest; | |
711 } | |
712 } | |
713 } | |
714 while (best_edge); | |
715 trace->last = bb; | |
716 bbd[trace->first->index].start_of_trace = *n_traces - 1; | |
717 bbd[trace->last->index].end_of_trace = *n_traces - 1; | |
718 | |
719 /* The trace is terminated so we have to recount the keys in heap | |
720 (some block can have a lower key because now one of its predecessors | |
721 is an end of the trace). */ | |
722 FOR_EACH_EDGE (e, ei, bb->succs) | |
723 { | |
724 if (e->dest == EXIT_BLOCK_PTR | |
725 || e->dest->il.rtl->visited) | |
726 continue; | |
727 | |
728 if (bbd[e->dest->index].heap) | |
729 { | |
730 key = bb_to_key (e->dest); | |
731 if (key != bbd[e->dest->index].node->key) | |
732 { | |
733 if (dump_file) | |
734 { | |
735 fprintf (dump_file, | |
736 "Changing key for bb %d from %ld to %ld.\n", | |
737 e->dest->index, | |
738 (long) bbd[e->dest->index].node->key, key); | |
739 } | |
740 fibheap_replace_key (bbd[e->dest->index].heap, | |
741 bbd[e->dest->index].node, | |
742 key); | |
743 } | |
744 } | |
745 } | |
746 } | |
747 | |
748 fibheap_delete (*heap); | |
749 | |
750 /* "Return" the new heap. */ | |
751 *heap = new_heap; | |
752 } | |
753 | |
754 /* Create a duplicate of the basic block OLD_BB and redirect edge E to it, add | |
755 it to trace after BB, mark OLD_BB visited and update pass' data structures | |
756 (TRACE is a number of trace which OLD_BB is duplicated to). */ | |
757 | |
758 static basic_block | |
759 copy_bb (basic_block old_bb, edge e, basic_block bb, int trace) | |
760 { | |
761 basic_block new_bb; | |
762 | |
763 new_bb = duplicate_block (old_bb, e, bb); | |
764 BB_COPY_PARTITION (new_bb, old_bb); | |
765 | |
766 gcc_assert (e->dest == new_bb); | |
767 gcc_assert (!e->dest->il.rtl->visited); | |
768 | |
769 if (dump_file) | |
770 fprintf (dump_file, | |
771 "Duplicated bb %d (created bb %d)\n", | |
772 old_bb->index, new_bb->index); | |
773 new_bb->il.rtl->visited = trace; | |
774 new_bb->aux = bb->aux; | |
775 bb->aux = new_bb; | |
776 | |
777 if (new_bb->index >= array_size || last_basic_block > array_size) | |
778 { | |
779 int i; | |
780 int new_size; | |
781 | |
782 new_size = MAX (last_basic_block, new_bb->index + 1); | |
783 new_size = GET_ARRAY_SIZE (new_size); | |
784 bbd = XRESIZEVEC (bbro_basic_block_data, bbd, new_size); | |
785 for (i = array_size; i < new_size; i++) | |
786 { | |
787 bbd[i].start_of_trace = -1; | |
788 bbd[i].in_trace = -1; | |
789 bbd[i].end_of_trace = -1; | |
790 bbd[i].heap = NULL; | |
791 bbd[i].node = NULL; | |
792 } | |
793 array_size = new_size; | |
794 | |
795 if (dump_file) | |
796 { | |
797 fprintf (dump_file, | |
798 "Growing the dynamic array to %d elements.\n", | |
799 array_size); | |
800 } | |
801 } | |
802 | |
803 bbd[new_bb->index].in_trace = trace; | |
804 | |
805 return new_bb; | |
806 } | |
807 | |
808 /* Compute and return the key (for the heap) of the basic block BB. */ | |
809 | |
810 static fibheapkey_t | |
811 bb_to_key (basic_block bb) | |
812 { | |
813 edge e; | |
814 edge_iterator ei; | |
815 int priority = 0; | |
816 | |
817 /* Do not start in probably never executed blocks. */ | |
818 | |
819 if (BB_PARTITION (bb) == BB_COLD_PARTITION | |
820 || probably_never_executed_bb_p (bb)) | |
821 return BB_FREQ_MAX; | |
822 | |
823 /* Prefer blocks whose predecessor is an end of some trace | |
824 or whose predecessor edge is EDGE_DFS_BACK. */ | |
825 FOR_EACH_EDGE (e, ei, bb->preds) | |
826 { | |
827 if ((e->src != ENTRY_BLOCK_PTR && bbd[e->src->index].end_of_trace >= 0) | |
828 || (e->flags & EDGE_DFS_BACK)) | |
829 { | |
830 int edge_freq = EDGE_FREQUENCY (e); | |
831 | |
832 if (edge_freq > priority) | |
833 priority = edge_freq; | |
834 } | |
835 } | |
836 | |
837 if (priority) | |
838 /* The block with priority should have significantly lower key. */ | |
839 return -(100 * BB_FREQ_MAX + 100 * priority + bb->frequency); | |
840 return -bb->frequency; | |
841 } | |
842 | |
843 /* Return true when the edge E from basic block BB is better than the temporary | |
844 best edge (details are in function). The probability of edge E is PROB. The | |
845 frequency of the successor is FREQ. The current best probability is | |
846 BEST_PROB, the best frequency is BEST_FREQ. | |
847 The edge is considered to be equivalent when PROB does not differ much from | |
848 BEST_PROB; similarly for frequency. */ | |
849 | |
850 static bool | |
851 better_edge_p (const_basic_block bb, const_edge e, int prob, int freq, int best_prob, | |
852 int best_freq, const_edge cur_best_edge) | |
853 { | |
854 bool is_better_edge; | |
855 | |
856 /* The BEST_* values do not have to be best, but can be a bit smaller than | |
857 maximum values. */ | |
858 int diff_prob = best_prob / 10; | |
859 int diff_freq = best_freq / 10; | |
860 | |
861 if (prob > best_prob + diff_prob) | |
862 /* The edge has higher probability than the temporary best edge. */ | |
863 is_better_edge = true; | |
864 else if (prob < best_prob - diff_prob) | |
865 /* The edge has lower probability than the temporary best edge. */ | |
866 is_better_edge = false; | |
867 else if (freq < best_freq - diff_freq) | |
868 /* The edge and the temporary best edge have almost equivalent | |
869 probabilities. The higher frequency of a successor now means | |
870 that there is another edge going into that successor. | |
871 This successor has lower frequency so it is better. */ | |
872 is_better_edge = true; | |
873 else if (freq > best_freq + diff_freq) | |
874 /* This successor has higher frequency so it is worse. */ | |
875 is_better_edge = false; | |
876 else if (e->dest->prev_bb == bb) | |
877 /* The edges have equivalent probabilities and the successors | |
878 have equivalent frequencies. Select the previous successor. */ | |
879 is_better_edge = true; | |
880 else | |
881 is_better_edge = false; | |
882 | |
883 /* If we are doing hot/cold partitioning, make sure that we always favor | |
884 non-crossing edges over crossing edges. */ | |
885 | |
886 if (!is_better_edge | |
887 && flag_reorder_blocks_and_partition | |
888 && cur_best_edge | |
889 && (cur_best_edge->flags & EDGE_CROSSING) | |
890 && !(e->flags & EDGE_CROSSING)) | |
891 is_better_edge = true; | |
892 | |
893 return is_better_edge; | |
894 } | |
895 | |
896 /* Connect traces in array TRACES, N_TRACES is the count of traces. */ | |
897 | |
898 static void | |
899 connect_traces (int n_traces, struct trace *traces) | |
900 { | |
901 int i; | |
902 bool *connected; | |
903 bool two_passes; | |
904 int last_trace; | |
905 int current_pass; | |
906 int current_partition; | |
907 int freq_threshold; | |
908 gcov_type count_threshold; | |
909 | |
910 freq_threshold = max_entry_frequency * DUPLICATION_THRESHOLD / 1000; | |
911 if (max_entry_count < INT_MAX / 1000) | |
912 count_threshold = max_entry_count * DUPLICATION_THRESHOLD / 1000; | |
913 else | |
914 count_threshold = max_entry_count / 1000 * DUPLICATION_THRESHOLD; | |
915 | |
916 connected = XCNEWVEC (bool, n_traces); | |
917 last_trace = -1; | |
918 current_pass = 1; | |
919 current_partition = BB_PARTITION (traces[0].first); | |
920 two_passes = false; | |
921 | |
922 if (flag_reorder_blocks_and_partition) | |
923 for (i = 0; i < n_traces && !two_passes; i++) | |
924 if (BB_PARTITION (traces[0].first) | |
925 != BB_PARTITION (traces[i].first)) | |
926 two_passes = true; | |
927 | |
928 for (i = 0; i < n_traces || (two_passes && current_pass == 1) ; i++) | |
929 { | |
930 int t = i; | |
931 int t2; | |
932 edge e, best; | |
933 int best_len; | |
934 | |
935 if (i >= n_traces) | |
936 { | |
937 gcc_assert (two_passes && current_pass == 1); | |
938 i = 0; | |
939 t = i; | |
940 current_pass = 2; | |
941 if (current_partition == BB_HOT_PARTITION) | |
942 current_partition = BB_COLD_PARTITION; | |
943 else | |
944 current_partition = BB_HOT_PARTITION; | |
945 } | |
946 | |
947 if (connected[t]) | |
948 continue; | |
949 | |
950 if (two_passes | |
951 && BB_PARTITION (traces[t].first) != current_partition) | |
952 continue; | |
953 | |
954 connected[t] = true; | |
955 | |
956 /* Find the predecessor traces. */ | |
957 for (t2 = t; t2 > 0;) | |
958 { | |
959 edge_iterator ei; | |
960 best = NULL; | |
961 best_len = 0; | |
962 FOR_EACH_EDGE (e, ei, traces[t2].first->preds) | |
963 { | |
964 int si = e->src->index; | |
965 | |
966 if (e->src != ENTRY_BLOCK_PTR | |
967 && (e->flags & EDGE_CAN_FALLTHRU) | |
968 && !(e->flags & EDGE_COMPLEX) | |
969 && bbd[si].end_of_trace >= 0 | |
970 && !connected[bbd[si].end_of_trace] | |
971 && (BB_PARTITION (e->src) == current_partition) | |
972 && (!best | |
973 || e->probability > best->probability | |
974 || (e->probability == best->probability | |
975 && traces[bbd[si].end_of_trace].length > best_len))) | |
976 { | |
977 best = e; | |
978 best_len = traces[bbd[si].end_of_trace].length; | |
979 } | |
980 } | |
981 if (best) | |
982 { | |
983 best->src->aux = best->dest; | |
984 t2 = bbd[best->src->index].end_of_trace; | |
985 connected[t2] = true; | |
986 | |
987 if (dump_file) | |
988 { | |
989 fprintf (dump_file, "Connection: %d %d\n", | |
990 best->src->index, best->dest->index); | |
991 } | |
992 } | |
993 else | |
994 break; | |
995 } | |
996 | |
997 if (last_trace >= 0) | |
998 traces[last_trace].last->aux = traces[t2].first; | |
999 last_trace = t; | |
1000 | |
1001 /* Find the successor traces. */ | |
1002 while (1) | |
1003 { | |
1004 /* Find the continuation of the chain. */ | |
1005 edge_iterator ei; | |
1006 best = NULL; | |
1007 best_len = 0; | |
1008 FOR_EACH_EDGE (e, ei, traces[t].last->succs) | |
1009 { | |
1010 int di = e->dest->index; | |
1011 | |
1012 if (e->dest != EXIT_BLOCK_PTR | |
1013 && (e->flags & EDGE_CAN_FALLTHRU) | |
1014 && !(e->flags & EDGE_COMPLEX) | |
1015 && bbd[di].start_of_trace >= 0 | |
1016 && !connected[bbd[di].start_of_trace] | |
1017 && (BB_PARTITION (e->dest) == current_partition) | |
1018 && (!best | |
1019 || e->probability > best->probability | |
1020 || (e->probability == best->probability | |
1021 && traces[bbd[di].start_of_trace].length > best_len))) | |
1022 { | |
1023 best = e; | |
1024 best_len = traces[bbd[di].start_of_trace].length; | |
1025 } | |
1026 } | |
1027 | |
1028 if (best) | |
1029 { | |
1030 if (dump_file) | |
1031 { | |
1032 fprintf (dump_file, "Connection: %d %d\n", | |
1033 best->src->index, best->dest->index); | |
1034 } | |
1035 t = bbd[best->dest->index].start_of_trace; | |
1036 traces[last_trace].last->aux = traces[t].first; | |
1037 connected[t] = true; | |
1038 last_trace = t; | |
1039 } | |
1040 else | |
1041 { | |
1042 /* Try to connect the traces by duplication of 1 block. */ | |
1043 edge e2; | |
1044 basic_block next_bb = NULL; | |
1045 bool try_copy = false; | |
1046 | |
1047 FOR_EACH_EDGE (e, ei, traces[t].last->succs) | |
1048 if (e->dest != EXIT_BLOCK_PTR | |
1049 && (e->flags & EDGE_CAN_FALLTHRU) | |
1050 && !(e->flags & EDGE_COMPLEX) | |
1051 && (!best || e->probability > best->probability)) | |
1052 { | |
1053 edge_iterator ei; | |
1054 edge best2 = NULL; | |
1055 int best2_len = 0; | |
1056 | |
1057 /* If the destination is a start of a trace which is only | |
1058 one block long, then no need to search the successor | |
1059 blocks of the trace. Accept it. */ | |
1060 if (bbd[e->dest->index].start_of_trace >= 0 | |
1061 && traces[bbd[e->dest->index].start_of_trace].length | |
1062 == 1) | |
1063 { | |
1064 best = e; | |
1065 try_copy = true; | |
1066 continue; | |
1067 } | |
1068 | |
1069 FOR_EACH_EDGE (e2, ei, e->dest->succs) | |
1070 { | |
1071 int di = e2->dest->index; | |
1072 | |
1073 if (e2->dest == EXIT_BLOCK_PTR | |
1074 || ((e2->flags & EDGE_CAN_FALLTHRU) | |
1075 && !(e2->flags & EDGE_COMPLEX) | |
1076 && bbd[di].start_of_trace >= 0 | |
1077 && !connected[bbd[di].start_of_trace] | |
1078 && (BB_PARTITION (e2->dest) == current_partition) | |
1079 && (EDGE_FREQUENCY (e2) >= freq_threshold) | |
1080 && (e2->count >= count_threshold) | |
1081 && (!best2 | |
1082 || e2->probability > best2->probability | |
1083 || (e2->probability == best2->probability | |
1084 && traces[bbd[di].start_of_trace].length | |
1085 > best2_len)))) | |
1086 { | |
1087 best = e; | |
1088 best2 = e2; | |
1089 if (e2->dest != EXIT_BLOCK_PTR) | |
1090 best2_len = traces[bbd[di].start_of_trace].length; | |
1091 else | |
1092 best2_len = INT_MAX; | |
1093 next_bb = e2->dest; | |
1094 try_copy = true; | |
1095 } | |
1096 } | |
1097 } | |
1098 | |
1099 if (flag_reorder_blocks_and_partition) | |
1100 try_copy = false; | |
1101 | |
1102 /* Copy tiny blocks always; copy larger blocks only when the | |
1103 edge is traversed frequently enough. */ | |
1104 if (try_copy | |
1105 && copy_bb_p (best->dest, | |
1106 optimize_edge_for_speed_p (best) | |
1107 && EDGE_FREQUENCY (best) >= freq_threshold | |
1108 && best->count >= count_threshold)) | |
1109 { | |
1110 basic_block new_bb; | |
1111 | |
1112 if (dump_file) | |
1113 { | |
1114 fprintf (dump_file, "Connection: %d %d ", | |
1115 traces[t].last->index, best->dest->index); | |
1116 if (!next_bb) | |
1117 fputc ('\n', dump_file); | |
1118 else if (next_bb == EXIT_BLOCK_PTR) | |
1119 fprintf (dump_file, "exit\n"); | |
1120 else | |
1121 fprintf (dump_file, "%d\n", next_bb->index); | |
1122 } | |
1123 | |
1124 new_bb = copy_bb (best->dest, best, traces[t].last, t); | |
1125 traces[t].last = new_bb; | |
1126 if (next_bb && next_bb != EXIT_BLOCK_PTR) | |
1127 { | |
1128 t = bbd[next_bb->index].start_of_trace; | |
1129 traces[last_trace].last->aux = traces[t].first; | |
1130 connected[t] = true; | |
1131 last_trace = t; | |
1132 } | |
1133 else | |
1134 break; /* Stop finding the successor traces. */ | |
1135 } | |
1136 else | |
1137 break; /* Stop finding the successor traces. */ | |
1138 } | |
1139 } | |
1140 } | |
1141 | |
1142 if (dump_file) | |
1143 { | |
1144 basic_block bb; | |
1145 | |
1146 fprintf (dump_file, "Final order:\n"); | |
1147 for (bb = traces[0].first; bb; bb = (basic_block) bb->aux) | |
1148 fprintf (dump_file, "%d ", bb->index); | |
1149 fprintf (dump_file, "\n"); | |
1150 fflush (dump_file); | |
1151 } | |
1152 | |
1153 FREE (connected); | |
1154 } | |
1155 | |
1156 /* Return true when BB can and should be copied. CODE_MAY_GROW is true | |
1157 when code size is allowed to grow by duplication. */ | |
1158 | |
1159 static bool | |
1160 copy_bb_p (const_basic_block bb, int code_may_grow) | |
1161 { | |
1162 int size = 0; | |
1163 int max_size = uncond_jump_length; | |
1164 rtx insn; | |
1165 | |
1166 if (!bb->frequency) | |
1167 return false; | |
1168 if (EDGE_COUNT (bb->preds) < 2) | |
1169 return false; | |
1170 if (!can_duplicate_block_p (bb)) | |
1171 return false; | |
1172 | |
1173 /* Avoid duplicating blocks which have many successors (PR/13430). */ | |
1174 if (EDGE_COUNT (bb->succs) > 8) | |
1175 return false; | |
1176 | |
1177 if (code_may_grow && optimize_bb_for_speed_p (bb)) | |
1178 max_size *= PARAM_VALUE (PARAM_MAX_GROW_COPY_BB_INSNS); | |
1179 | |
1180 FOR_BB_INSNS (bb, insn) | |
1181 { | |
1182 if (INSN_P (insn)) | |
1183 size += get_attr_min_length (insn); | |
1184 } | |
1185 | |
1186 if (size <= max_size) | |
1187 return true; | |
1188 | |
1189 if (dump_file) | |
1190 { | |
1191 fprintf (dump_file, | |
1192 "Block %d can't be copied because its size = %d.\n", | |
1193 bb->index, size); | |
1194 } | |
1195 | |
1196 return false; | |
1197 } | |
1198 | |
1199 /* Return the length of unconditional jump instruction. */ | |
1200 | |
1201 static int | |
1202 get_uncond_jump_length (void) | |
1203 { | |
1204 rtx label, jump; | |
1205 int length; | |
1206 | |
1207 label = emit_label_before (gen_label_rtx (), get_insns ()); | |
1208 jump = emit_jump_insn (gen_jump (label)); | |
1209 | |
1210 length = get_attr_min_length (jump); | |
1211 | |
1212 delete_insn (jump); | |
1213 delete_insn (label); | |
1214 return length; | |
1215 } | |
1216 | |
1217 /* Find the basic blocks that are rarely executed and need to be moved to | |
1218 a separate section of the .o file (to cut down on paging and improve | |
1219 cache locality). */ | |
1220 | |
1221 static void | |
1222 find_rarely_executed_basic_blocks_and_crossing_edges (edge **crossing_edges, | |
1223 int *n_crossing_edges, | |
1224 int *max_idx) | |
1225 { | |
1226 basic_block bb; | |
1227 edge e; | |
1228 int i; | |
1229 edge_iterator ei; | |
1230 | |
1231 /* Mark which partition (hot/cold) each basic block belongs in. */ | |
1232 | |
1233 FOR_EACH_BB (bb) | |
1234 { | |
1235 if (probably_never_executed_bb_p (bb)) | |
1236 BB_SET_PARTITION (bb, BB_COLD_PARTITION); | |
1237 else | |
1238 BB_SET_PARTITION (bb, BB_HOT_PARTITION); | |
1239 } | |
1240 | |
1241 /* Mark every edge that crosses between sections. */ | |
1242 | |
1243 i = 0; | |
1244 FOR_EACH_BB (bb) | |
1245 FOR_EACH_EDGE (e, ei, bb->succs) | |
1246 { | |
1247 if (e->src != ENTRY_BLOCK_PTR | |
1248 && e->dest != EXIT_BLOCK_PTR | |
1249 && BB_PARTITION (e->src) != BB_PARTITION (e->dest)) | |
1250 { | |
1251 e->flags |= EDGE_CROSSING; | |
1252 if (i == *max_idx) | |
1253 { | |
1254 *max_idx *= 2; | |
1255 *crossing_edges = XRESIZEVEC (edge, *crossing_edges, *max_idx); | |
1256 } | |
1257 (*crossing_edges)[i++] = e; | |
1258 } | |
1259 else | |
1260 e->flags &= ~EDGE_CROSSING; | |
1261 } | |
1262 *n_crossing_edges = i; | |
1263 } | |
1264 | |
1265 /* If any destination of a crossing edge does not have a label, add label; | |
1266 Convert any fall-through crossing edges (for blocks that do not contain | |
1267 a jump) to unconditional jumps. */ | |
1268 | |
1269 static void | |
1270 add_labels_and_missing_jumps (edge *crossing_edges, int n_crossing_edges) | |
1271 { | |
1272 int i; | |
1273 basic_block src; | |
1274 basic_block dest; | |
1275 rtx label; | |
1276 rtx barrier; | |
1277 rtx new_jump; | |
1278 | |
1279 for (i=0; i < n_crossing_edges; i++) | |
1280 { | |
1281 if (crossing_edges[i]) | |
1282 { | |
1283 src = crossing_edges[i]->src; | |
1284 dest = crossing_edges[i]->dest; | |
1285 | |
1286 /* Make sure dest has a label. */ | |
1287 | |
1288 if (dest && (dest != EXIT_BLOCK_PTR)) | |
1289 { | |
1290 label = block_label (dest); | |
1291 | |
1292 /* Make sure source block ends with a jump. If the | |
1293 source block does not end with a jump it might end | |
1294 with a call_insn; this case will be handled in | |
1295 fix_up_fall_thru_edges function. */ | |
1296 | |
1297 if (src && (src != ENTRY_BLOCK_PTR)) | |
1298 { | |
1299 if (!JUMP_P (BB_END (src)) && !block_ends_with_call_p (src)) | |
1300 /* bb just falls through. */ | |
1301 { | |
1302 /* make sure there's only one successor */ | |
1303 gcc_assert (single_succ_p (src)); | |
1304 | |
1305 /* Find label in dest block. */ | |
1306 label = block_label (dest); | |
1307 | |
1308 new_jump = emit_jump_insn_after (gen_jump (label), | |
1309 BB_END (src)); | |
1310 barrier = emit_barrier_after (new_jump); | |
1311 JUMP_LABEL (new_jump) = label; | |
1312 LABEL_NUSES (label) += 1; | |
1313 src->il.rtl->footer = unlink_insn_chain (barrier, barrier); | |
1314 /* Mark edge as non-fallthru. */ | |
1315 crossing_edges[i]->flags &= ~EDGE_FALLTHRU; | |
1316 } /* end: 'if (GET_CODE ... ' */ | |
1317 } /* end: 'if (src && src->index...' */ | |
1318 } /* end: 'if (dest && dest->index...' */ | |
1319 } /* end: 'if (crossing_edges[i]...' */ | |
1320 } /* end for loop */ | |
1321 } | |
1322 | |
1323 /* Find any bb's where the fall-through edge is a crossing edge (note that | |
1324 these bb's must also contain a conditional jump or end with a call | |
1325 instruction; we've already dealt with fall-through edges for blocks | |
1326 that didn't have a conditional jump or didn't end with call instruction | |
1327 in the call to add_labels_and_missing_jumps). Convert the fall-through | |
1328 edge to non-crossing edge by inserting a new bb to fall-through into. | |
1329 The new bb will contain an unconditional jump (crossing edge) to the | |
1330 original fall through destination. */ | |
1331 | |
1332 static void | |
1333 fix_up_fall_thru_edges (void) | |
1334 { | |
1335 basic_block cur_bb; | |
1336 basic_block new_bb; | |
1337 edge succ1; | |
1338 edge succ2; | |
1339 edge fall_thru; | |
1340 edge cond_jump = NULL; | |
1341 edge e; | |
1342 bool cond_jump_crosses; | |
1343 int invert_worked; | |
1344 rtx old_jump; | |
1345 rtx fall_thru_label; | |
1346 rtx barrier; | |
1347 | |
1348 FOR_EACH_BB (cur_bb) | |
1349 { | |
1350 fall_thru = NULL; | |
1351 if (EDGE_COUNT (cur_bb->succs) > 0) | |
1352 succ1 = EDGE_SUCC (cur_bb, 0); | |
1353 else | |
1354 succ1 = NULL; | |
1355 | |
1356 if (EDGE_COUNT (cur_bb->succs) > 1) | |
1357 succ2 = EDGE_SUCC (cur_bb, 1); | |
1358 else | |
1359 succ2 = NULL; | |
1360 | |
1361 /* Find the fall-through edge. */ | |
1362 | |
1363 if (succ1 | |
1364 && (succ1->flags & EDGE_FALLTHRU)) | |
1365 { | |
1366 fall_thru = succ1; | |
1367 cond_jump = succ2; | |
1368 } | |
1369 else if (succ2 | |
1370 && (succ2->flags & EDGE_FALLTHRU)) | |
1371 { | |
1372 fall_thru = succ2; | |
1373 cond_jump = succ1; | |
1374 } | |
1375 else if (!fall_thru && succ1 && block_ends_with_call_p (cur_bb)) | |
1376 { | |
1377 edge e; | |
1378 edge_iterator ei; | |
1379 | |
1380 /* Find EDGE_CAN_FALLTHRU edge. */ | |
1381 FOR_EACH_EDGE (e, ei, cur_bb->succs) | |
1382 if (e->flags & EDGE_CAN_FALLTHRU) | |
1383 { | |
1384 fall_thru = e; | |
1385 break; | |
1386 } | |
1387 } | |
1388 | |
1389 if (fall_thru && (fall_thru->dest != EXIT_BLOCK_PTR)) | |
1390 { | |
1391 /* Check to see if the fall-thru edge is a crossing edge. */ | |
1392 | |
1393 if (fall_thru->flags & EDGE_CROSSING) | |
1394 { | |
1395 /* The fall_thru edge crosses; now check the cond jump edge, if | |
1396 it exists. */ | |
1397 | |
1398 cond_jump_crosses = true; | |
1399 invert_worked = 0; | |
1400 old_jump = BB_END (cur_bb); | |
1401 | |
1402 /* Find the jump instruction, if there is one. */ | |
1403 | |
1404 if (cond_jump) | |
1405 { | |
1406 if (!(cond_jump->flags & EDGE_CROSSING)) | |
1407 cond_jump_crosses = false; | |
1408 | |
1409 /* We know the fall-thru edge crosses; if the cond | |
1410 jump edge does NOT cross, and its destination is the | |
1411 next block in the bb order, invert the jump | |
1412 (i.e. fix it so the fall thru does not cross and | |
1413 the cond jump does). */ | |
1414 | |
1415 if (!cond_jump_crosses | |
1416 && cur_bb->aux == cond_jump->dest) | |
1417 { | |
1418 /* Find label in fall_thru block. We've already added | |
1419 any missing labels, so there must be one. */ | |
1420 | |
1421 fall_thru_label = block_label (fall_thru->dest); | |
1422 | |
1423 if (old_jump && fall_thru_label) | |
1424 invert_worked = invert_jump (old_jump, | |
1425 fall_thru_label,0); | |
1426 if (invert_worked) | |
1427 { | |
1428 fall_thru->flags &= ~EDGE_FALLTHRU; | |
1429 cond_jump->flags |= EDGE_FALLTHRU; | |
1430 update_br_prob_note (cur_bb); | |
1431 e = fall_thru; | |
1432 fall_thru = cond_jump; | |
1433 cond_jump = e; | |
1434 cond_jump->flags |= EDGE_CROSSING; | |
1435 fall_thru->flags &= ~EDGE_CROSSING; | |
1436 } | |
1437 } | |
1438 } | |
1439 | |
1440 if (cond_jump_crosses || !invert_worked) | |
1441 { | |
1442 /* This is the case where both edges out of the basic | |
1443 block are crossing edges. Here we will fix up the | |
1444 fall through edge. The jump edge will be taken care | |
1445 of later. The EDGE_CROSSING flag of fall_thru edge | |
1446 is unset before the call to force_nonfallthru | |
1447 function because if a new basic-block is created | |
1448 this edge remains in the current section boundary | |
1449 while the edge between new_bb and the fall_thru->dest | |
1450 becomes EDGE_CROSSING. */ | |
1451 | |
1452 fall_thru->flags &= ~EDGE_CROSSING; | |
1453 new_bb = force_nonfallthru (fall_thru); | |
1454 | |
1455 if (new_bb) | |
1456 { | |
1457 new_bb->aux = cur_bb->aux; | |
1458 cur_bb->aux = new_bb; | |
1459 | |
1460 /* Make sure new fall-through bb is in same | |
1461 partition as bb it's falling through from. */ | |
1462 | |
1463 BB_COPY_PARTITION (new_bb, cur_bb); | |
1464 single_succ_edge (new_bb)->flags |= EDGE_CROSSING; | |
1465 } | |
1466 else | |
1467 { | |
1468 /* If a new basic-block was not created; restore | |
1469 the EDGE_CROSSING flag. */ | |
1470 fall_thru->flags |= EDGE_CROSSING; | |
1471 } | |
1472 | |
1473 /* Add barrier after new jump */ | |
1474 | |
1475 if (new_bb) | |
1476 { | |
1477 barrier = emit_barrier_after (BB_END (new_bb)); | |
1478 new_bb->il.rtl->footer = unlink_insn_chain (barrier, | |
1479 barrier); | |
1480 } | |
1481 else | |
1482 { | |
1483 barrier = emit_barrier_after (BB_END (cur_bb)); | |
1484 cur_bb->il.rtl->footer = unlink_insn_chain (barrier, | |
1485 barrier); | |
1486 } | |
1487 } | |
1488 } | |
1489 } | |
1490 } | |
1491 } | |
1492 | |
1493 /* This function checks the destination block of a "crossing jump" to | |
1494 see if it has any crossing predecessors that begin with a code label | |
1495 and end with an unconditional jump. If so, it returns that predecessor | |
1496 block. (This is to avoid creating lots of new basic blocks that all | |
1497 contain unconditional jumps to the same destination). */ | |
1498 | |
1499 static basic_block | |
1500 find_jump_block (basic_block jump_dest) | |
1501 { | |
1502 basic_block source_bb = NULL; | |
1503 edge e; | |
1504 rtx insn; | |
1505 edge_iterator ei; | |
1506 | |
1507 FOR_EACH_EDGE (e, ei, jump_dest->preds) | |
1508 if (e->flags & EDGE_CROSSING) | |
1509 { | |
1510 basic_block src = e->src; | |
1511 | |
1512 /* Check each predecessor to see if it has a label, and contains | |
1513 only one executable instruction, which is an unconditional jump. | |
1514 If so, we can use it. */ | |
1515 | |
1516 if (LABEL_P (BB_HEAD (src))) | |
1517 for (insn = BB_HEAD (src); | |
1518 !INSN_P (insn) && insn != NEXT_INSN (BB_END (src)); | |
1519 insn = NEXT_INSN (insn)) | |
1520 { | |
1521 if (INSN_P (insn) | |
1522 && insn == BB_END (src) | |
1523 && JUMP_P (insn) | |
1524 && !any_condjump_p (insn)) | |
1525 { | |
1526 source_bb = src; | |
1527 break; | |
1528 } | |
1529 } | |
1530 | |
1531 if (source_bb) | |
1532 break; | |
1533 } | |
1534 | |
1535 return source_bb; | |
1536 } | |
1537 | |
1538 /* Find all BB's with conditional jumps that are crossing edges; | |
1539 insert a new bb and make the conditional jump branch to the new | |
1540 bb instead (make the new bb same color so conditional branch won't | |
1541 be a 'crossing' edge). Insert an unconditional jump from the | |
1542 new bb to the original destination of the conditional jump. */ | |
1543 | |
1544 static void | |
1545 fix_crossing_conditional_branches (void) | |
1546 { | |
1547 basic_block cur_bb; | |
1548 basic_block new_bb; | |
1549 basic_block last_bb; | |
1550 basic_block dest; | |
1551 edge succ1; | |
1552 edge succ2; | |
1553 edge crossing_edge; | |
1554 edge new_edge; | |
1555 rtx old_jump; | |
1556 rtx set_src; | |
1557 rtx old_label = NULL_RTX; | |
1558 rtx new_label; | |
1559 rtx new_jump; | |
1560 rtx barrier; | |
1561 | |
1562 last_bb = EXIT_BLOCK_PTR->prev_bb; | |
1563 | |
1564 FOR_EACH_BB (cur_bb) | |
1565 { | |
1566 crossing_edge = NULL; | |
1567 if (EDGE_COUNT (cur_bb->succs) > 0) | |
1568 succ1 = EDGE_SUCC (cur_bb, 0); | |
1569 else | |
1570 succ1 = NULL; | |
1571 | |
1572 if (EDGE_COUNT (cur_bb->succs) > 1) | |
1573 succ2 = EDGE_SUCC (cur_bb, 1); | |
1574 else | |
1575 succ2 = NULL; | |
1576 | |
1577 /* We already took care of fall-through edges, so only one successor | |
1578 can be a crossing edge. */ | |
1579 | |
1580 if (succ1 && (succ1->flags & EDGE_CROSSING)) | |
1581 crossing_edge = succ1; | |
1582 else if (succ2 && (succ2->flags & EDGE_CROSSING)) | |
1583 crossing_edge = succ2; | |
1584 | |
1585 if (crossing_edge) | |
1586 { | |
1587 old_jump = BB_END (cur_bb); | |
1588 | |
1589 /* Check to make sure the jump instruction is a | |
1590 conditional jump. */ | |
1591 | |
1592 set_src = NULL_RTX; | |
1593 | |
1594 if (any_condjump_p (old_jump)) | |
1595 { | |
1596 if (GET_CODE (PATTERN (old_jump)) == SET) | |
1597 set_src = SET_SRC (PATTERN (old_jump)); | |
1598 else if (GET_CODE (PATTERN (old_jump)) == PARALLEL) | |
1599 { | |
1600 set_src = XVECEXP (PATTERN (old_jump), 0,0); | |
1601 if (GET_CODE (set_src) == SET) | |
1602 set_src = SET_SRC (set_src); | |
1603 else | |
1604 set_src = NULL_RTX; | |
1605 } | |
1606 } | |
1607 | |
1608 if (set_src && (GET_CODE (set_src) == IF_THEN_ELSE)) | |
1609 { | |
1610 if (GET_CODE (XEXP (set_src, 1)) == PC) | |
1611 old_label = XEXP (set_src, 2); | |
1612 else if (GET_CODE (XEXP (set_src, 2)) == PC) | |
1613 old_label = XEXP (set_src, 1); | |
1614 | |
1615 /* Check to see if new bb for jumping to that dest has | |
1616 already been created; if so, use it; if not, create | |
1617 a new one. */ | |
1618 | |
1619 new_bb = find_jump_block (crossing_edge->dest); | |
1620 | |
1621 if (new_bb) | |
1622 new_label = block_label (new_bb); | |
1623 else | |
1624 { | |
1625 /* Create new basic block to be dest for | |
1626 conditional jump. */ | |
1627 | |
1628 new_bb = create_basic_block (NULL, NULL, last_bb); | |
1629 new_bb->aux = last_bb->aux; | |
1630 last_bb->aux = new_bb; | |
1631 last_bb = new_bb; | |
1632 /* Put appropriate instructions in new bb. */ | |
1633 | |
1634 new_label = gen_label_rtx (); | |
1635 emit_label_before (new_label, BB_HEAD (new_bb)); | |
1636 BB_HEAD (new_bb) = new_label; | |
1637 | |
1638 if (GET_CODE (old_label) == LABEL_REF) | |
1639 { | |
1640 old_label = JUMP_LABEL (old_jump); | |
1641 new_jump = emit_jump_insn_after (gen_jump | |
1642 (old_label), | |
1643 BB_END (new_bb)); | |
1644 } | |
1645 else | |
1646 { | |
1647 gcc_assert (HAVE_return | |
1648 && GET_CODE (old_label) == RETURN); | |
1649 new_jump = emit_jump_insn_after (gen_return (), | |
1650 BB_END (new_bb)); | |
1651 } | |
1652 | |
1653 barrier = emit_barrier_after (new_jump); | |
1654 JUMP_LABEL (new_jump) = old_label; | |
1655 new_bb->il.rtl->footer = unlink_insn_chain (barrier, | |
1656 barrier); | |
1657 | |
1658 /* Make sure new bb is in same partition as source | |
1659 of conditional branch. */ | |
1660 BB_COPY_PARTITION (new_bb, cur_bb); | |
1661 } | |
1662 | |
1663 /* Make old jump branch to new bb. */ | |
1664 | |
1665 redirect_jump (old_jump, new_label, 0); | |
1666 | |
1667 /* Remove crossing_edge as predecessor of 'dest'. */ | |
1668 | |
1669 dest = crossing_edge->dest; | |
1670 | |
1671 redirect_edge_succ (crossing_edge, new_bb); | |
1672 | |
1673 /* Make a new edge from new_bb to old dest; new edge | |
1674 will be a successor for new_bb and a predecessor | |
1675 for 'dest'. */ | |
1676 | |
1677 if (EDGE_COUNT (new_bb->succs) == 0) | |
1678 new_edge = make_edge (new_bb, dest, 0); | |
1679 else | |
1680 new_edge = EDGE_SUCC (new_bb, 0); | |
1681 | |
1682 crossing_edge->flags &= ~EDGE_CROSSING; | |
1683 new_edge->flags |= EDGE_CROSSING; | |
1684 } | |
1685 } | |
1686 } | |
1687 } | |
1688 | |
1689 /* Find any unconditional branches that cross between hot and cold | |
1690 sections. Convert them into indirect jumps instead. */ | |
1691 | |
1692 static void | |
1693 fix_crossing_unconditional_branches (void) | |
1694 { | |
1695 basic_block cur_bb; | |
1696 rtx last_insn; | |
1697 rtx label; | |
1698 rtx label_addr; | |
1699 rtx indirect_jump_sequence; | |
1700 rtx jump_insn = NULL_RTX; | |
1701 rtx new_reg; | |
1702 rtx cur_insn; | |
1703 edge succ; | |
1704 | |
1705 FOR_EACH_BB (cur_bb) | |
1706 { | |
1707 last_insn = BB_END (cur_bb); | |
1708 | |
1709 if (EDGE_COUNT (cur_bb->succs) < 1) | |
1710 continue; | |
1711 | |
1712 succ = EDGE_SUCC (cur_bb, 0); | |
1713 | |
1714 /* Check to see if bb ends in a crossing (unconditional) jump. At | |
1715 this point, no crossing jumps should be conditional. */ | |
1716 | |
1717 if (JUMP_P (last_insn) | |
1718 && (succ->flags & EDGE_CROSSING)) | |
1719 { | |
1720 rtx label2, table; | |
1721 | |
1722 gcc_assert (!any_condjump_p (last_insn)); | |
1723 | |
1724 /* Make sure the jump is not already an indirect or table jump. */ | |
1725 | |
1726 if (!computed_jump_p (last_insn) | |
1727 && !tablejump_p (last_insn, &label2, &table)) | |
1728 { | |
1729 /* We have found a "crossing" unconditional branch. Now | |
1730 we must convert it to an indirect jump. First create | |
1731 reference of label, as target for jump. */ | |
1732 | |
1733 label = JUMP_LABEL (last_insn); | |
1734 label_addr = gen_rtx_LABEL_REF (Pmode, label); | |
1735 LABEL_NUSES (label) += 1; | |
1736 | |
1737 /* Get a register to use for the indirect jump. */ | |
1738 | |
1739 new_reg = gen_reg_rtx (Pmode); | |
1740 | |
1741 /* Generate indirect the jump sequence. */ | |
1742 | |
1743 start_sequence (); | |
1744 emit_move_insn (new_reg, label_addr); | |
1745 emit_indirect_jump (new_reg); | |
1746 indirect_jump_sequence = get_insns (); | |
1747 end_sequence (); | |
1748 | |
1749 /* Make sure every instruction in the new jump sequence has | |
1750 its basic block set to be cur_bb. */ | |
1751 | |
1752 for (cur_insn = indirect_jump_sequence; cur_insn; | |
1753 cur_insn = NEXT_INSN (cur_insn)) | |
1754 { | |
1755 if (!BARRIER_P (cur_insn)) | |
1756 BLOCK_FOR_INSN (cur_insn) = cur_bb; | |
1757 if (JUMP_P (cur_insn)) | |
1758 jump_insn = cur_insn; | |
1759 } | |
1760 | |
1761 /* Insert the new (indirect) jump sequence immediately before | |
1762 the unconditional jump, then delete the unconditional jump. */ | |
1763 | |
1764 emit_insn_before (indirect_jump_sequence, last_insn); | |
1765 delete_insn (last_insn); | |
1766 | |
1767 /* Make BB_END for cur_bb be the jump instruction (NOT the | |
1768 barrier instruction at the end of the sequence...). */ | |
1769 | |
1770 BB_END (cur_bb) = jump_insn; | |
1771 } | |
1772 } | |
1773 } | |
1774 } | |
1775 | |
1776 /* Add REG_CROSSING_JUMP note to all crossing jump insns. */ | |
1777 | |
1778 static void | |
1779 add_reg_crossing_jump_notes (void) | |
1780 { | |
1781 basic_block bb; | |
1782 edge e; | |
1783 edge_iterator ei; | |
1784 | |
1785 FOR_EACH_BB (bb) | |
1786 FOR_EACH_EDGE (e, ei, bb->succs) | |
1787 if ((e->flags & EDGE_CROSSING) | |
1788 && JUMP_P (BB_END (e->src))) | |
1789 add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX); | |
1790 } | |
1791 | |
1792 /* Hot and cold basic blocks are partitioned and put in separate | |
1793 sections of the .o file, to reduce paging and improve cache | |
1794 performance (hopefully). This can result in bits of code from the | |
1795 same function being widely separated in the .o file. However this | |
1796 is not obvious to the current bb structure. Therefore we must take | |
1797 care to ensure that: 1). There are no fall_thru edges that cross | |
1798 between sections; 2). For those architectures which have "short" | |
1799 conditional branches, all conditional branches that attempt to | |
1800 cross between sections are converted to unconditional branches; | |
1801 and, 3). For those architectures which have "short" unconditional | |
1802 branches, all unconditional branches that attempt to cross between | |
1803 sections are converted to indirect jumps. | |
1804 | |
1805 The code for fixing up fall_thru edges that cross between hot and | |
1806 cold basic blocks does so by creating new basic blocks containing | |
1807 unconditional branches to the appropriate label in the "other" | |
1808 section. The new basic block is then put in the same (hot or cold) | |
1809 section as the original conditional branch, and the fall_thru edge | |
1810 is modified to fall into the new basic block instead. By adding | |
1811 this level of indirection we end up with only unconditional branches | |
1812 crossing between hot and cold sections. | |
1813 | |
1814 Conditional branches are dealt with by adding a level of indirection. | |
1815 A new basic block is added in the same (hot/cold) section as the | |
1816 conditional branch, and the conditional branch is retargeted to the | |
1817 new basic block. The new basic block contains an unconditional branch | |
1818 to the original target of the conditional branch (in the other section). | |
1819 | |
1820 Unconditional branches are dealt with by converting them into | |
1821 indirect jumps. */ | |
1822 | |
1823 static void | |
1824 fix_edges_for_rarely_executed_code (edge *crossing_edges, | |
1825 int n_crossing_edges) | |
1826 { | |
1827 /* Make sure the source of any crossing edge ends in a jump and the | |
1828 destination of any crossing edge has a label. */ | |
1829 | |
1830 add_labels_and_missing_jumps (crossing_edges, n_crossing_edges); | |
1831 | |
1832 /* Convert all crossing fall_thru edges to non-crossing fall | |
1833 thrus to unconditional jumps (that jump to the original fall | |
1834 thru dest). */ | |
1835 | |
1836 fix_up_fall_thru_edges (); | |
1837 | |
1838 /* If the architecture does not have conditional branches that can | |
1839 span all of memory, convert crossing conditional branches into | |
1840 crossing unconditional branches. */ | |
1841 | |
1842 if (!HAS_LONG_COND_BRANCH) | |
1843 fix_crossing_conditional_branches (); | |
1844 | |
1845 /* If the architecture does not have unconditional branches that | |
1846 can span all of memory, convert crossing unconditional branches | |
1847 into indirect jumps. Since adding an indirect jump also adds | |
1848 a new register usage, update the register usage information as | |
1849 well. */ | |
1850 | |
1851 if (!HAS_LONG_UNCOND_BRANCH) | |
1852 fix_crossing_unconditional_branches (); | |
1853 | |
1854 add_reg_crossing_jump_notes (); | |
1855 } | |
1856 | |
1857 /* Verify, in the basic block chain, that there is at most one switch | |
1858 between hot/cold partitions. This is modelled on | |
1859 rtl_verify_flow_info_1, but it cannot go inside that function | |
1860 because this condition will not be true until after | |
1861 reorder_basic_blocks is called. */ | |
1862 | |
1863 static void | |
1864 verify_hot_cold_block_grouping (void) | |
1865 { | |
1866 basic_block bb; | |
1867 int err = 0; | |
1868 bool switched_sections = false; | |
1869 int current_partition = 0; | |
1870 | |
1871 FOR_EACH_BB (bb) | |
1872 { | |
1873 if (!current_partition) | |
1874 current_partition = BB_PARTITION (bb); | |
1875 if (BB_PARTITION (bb) != current_partition) | |
1876 { | |
1877 if (switched_sections) | |
1878 { | |
1879 error ("multiple hot/cold transitions found (bb %i)", | |
1880 bb->index); | |
1881 err = 1; | |
1882 } | |
1883 else | |
1884 { | |
1885 switched_sections = true; | |
1886 current_partition = BB_PARTITION (bb); | |
1887 } | |
1888 } | |
1889 } | |
1890 | |
1891 gcc_assert(!err); | |
1892 } | |
1893 | |
1894 /* Reorder basic blocks. The main entry point to this file. FLAGS is | |
1895 the set of flags to pass to cfg_layout_initialize(). */ | |
1896 | |
1897 void | |
1898 reorder_basic_blocks (void) | |
1899 { | |
1900 int n_traces; | |
1901 int i; | |
1902 struct trace *traces; | |
1903 | |
1904 gcc_assert (current_ir_type () == IR_RTL_CFGLAYOUT); | |
1905 | |
1906 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) | |
1907 return; | |
1908 | |
1909 set_edge_can_fallthru_flag (); | |
1910 mark_dfs_back_edges (); | |
1911 | |
1912 /* We are estimating the length of uncond jump insn only once since the code | |
1913 for getting the insn length always returns the minimal length now. */ | |
1914 if (uncond_jump_length == 0) | |
1915 uncond_jump_length = get_uncond_jump_length (); | |
1916 | |
1917 /* We need to know some information for each basic block. */ | |
1918 array_size = GET_ARRAY_SIZE (last_basic_block); | |
1919 bbd = XNEWVEC (bbro_basic_block_data, array_size); | |
1920 for (i = 0; i < array_size; i++) | |
1921 { | |
1922 bbd[i].start_of_trace = -1; | |
1923 bbd[i].in_trace = -1; | |
1924 bbd[i].end_of_trace = -1; | |
1925 bbd[i].heap = NULL; | |
1926 bbd[i].node = NULL; | |
1927 } | |
1928 | |
1929 traces = XNEWVEC (struct trace, n_basic_blocks); | |
1930 n_traces = 0; | |
1931 find_traces (&n_traces, traces); | |
1932 connect_traces (n_traces, traces); | |
1933 FREE (traces); | |
1934 FREE (bbd); | |
1935 | |
1936 relink_block_chain (/*stay_in_cfglayout_mode=*/true); | |
1937 | |
1938 if (dump_file) | |
1939 dump_flow_info (dump_file, dump_flags); | |
1940 | |
1941 if (flag_reorder_blocks_and_partition) | |
1942 verify_hot_cold_block_grouping (); | |
1943 } | |
1944 | |
1945 /* Determine which partition the first basic block in the function | |
1946 belongs to, then find the first basic block in the current function | |
1947 that belongs to a different section, and insert a | |
1948 NOTE_INSN_SWITCH_TEXT_SECTIONS note immediately before it in the | |
1949 instruction stream. When writing out the assembly code, | |
1950 encountering this note will make the compiler switch between the | |
1951 hot and cold text sections. */ | |
1952 | |
1953 static void | |
1954 insert_section_boundary_note (void) | |
1955 { | |
1956 basic_block bb; | |
1957 rtx new_note; | |
1958 int first_partition = 0; | |
1959 | |
1960 if (flag_reorder_blocks_and_partition) | |
1961 FOR_EACH_BB (bb) | |
1962 { | |
1963 if (!first_partition) | |
1964 first_partition = BB_PARTITION (bb); | |
1965 if (BB_PARTITION (bb) != first_partition) | |
1966 { | |
1967 new_note = emit_note_before (NOTE_INSN_SWITCH_TEXT_SECTIONS, | |
1968 BB_HEAD (bb)); | |
1969 /* ??? This kind of note always lives between basic blocks, | |
1970 but add_insn_before will set BLOCK_FOR_INSN anyway. */ | |
1971 BLOCK_FOR_INSN (new_note) = NULL; | |
1972 break; | |
1973 } | |
1974 } | |
1975 } | |
1976 | |
1977 /* Duplicate the blocks containing computed gotos. This basically unfactors | |
1978 computed gotos that were factored early on in the compilation process to | |
1979 speed up edge based data flow. We used to not unfactoring them again, | |
1980 which can seriously pessimize code with many computed jumps in the source | |
1981 code, such as interpreters. See e.g. PR15242. */ | |
1982 | |
1983 static bool | |
1984 gate_duplicate_computed_gotos (void) | |
1985 { | |
1986 if (targetm.cannot_modify_jumps_p ()) | |
1987 return false; | |
1988 return (optimize > 0 && flag_expensive_optimizations); | |
1989 } | |
1990 | |
1991 | |
1992 static unsigned int | |
1993 duplicate_computed_gotos (void) | |
1994 { | |
1995 basic_block bb, new_bb; | |
1996 bitmap candidates; | |
1997 int max_size; | |
1998 | |
1999 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) | |
2000 return 0; | |
2001 | |
2002 cfg_layout_initialize (0); | |
2003 | |
2004 /* We are estimating the length of uncond jump insn only once | |
2005 since the code for getting the insn length always returns | |
2006 the minimal length now. */ | |
2007 if (uncond_jump_length == 0) | |
2008 uncond_jump_length = get_uncond_jump_length (); | |
2009 | |
2010 max_size = uncond_jump_length * PARAM_VALUE (PARAM_MAX_GOTO_DUPLICATION_INSNS); | |
2011 candidates = BITMAP_ALLOC (NULL); | |
2012 | |
2013 /* Look for blocks that end in a computed jump, and see if such blocks | |
2014 are suitable for unfactoring. If a block is a candidate for unfactoring, | |
2015 mark it in the candidates. */ | |
2016 FOR_EACH_BB (bb) | |
2017 { | |
2018 rtx insn; | |
2019 edge e; | |
2020 edge_iterator ei; | |
2021 int size, all_flags; | |
2022 | |
2023 /* Build the reorder chain for the original order of blocks. */ | |
2024 if (bb->next_bb != EXIT_BLOCK_PTR) | |
2025 bb->aux = bb->next_bb; | |
2026 | |
2027 /* Obviously the block has to end in a computed jump. */ | |
2028 if (!computed_jump_p (BB_END (bb))) | |
2029 continue; | |
2030 | |
2031 /* Only consider blocks that can be duplicated. */ | |
2032 if (find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX) | |
2033 || !can_duplicate_block_p (bb)) | |
2034 continue; | |
2035 | |
2036 /* Make sure that the block is small enough. */ | |
2037 size = 0; | |
2038 FOR_BB_INSNS (bb, insn) | |
2039 if (INSN_P (insn)) | |
2040 { | |
2041 size += get_attr_min_length (insn); | |
2042 if (size > max_size) | |
2043 break; | |
2044 } | |
2045 if (size > max_size) | |
2046 continue; | |
2047 | |
2048 /* Final check: there must not be any incoming abnormal edges. */ | |
2049 all_flags = 0; | |
2050 FOR_EACH_EDGE (e, ei, bb->preds) | |
2051 all_flags |= e->flags; | |
2052 if (all_flags & EDGE_COMPLEX) | |
2053 continue; | |
2054 | |
2055 bitmap_set_bit (candidates, bb->index); | |
2056 } | |
2057 | |
2058 /* Nothing to do if there is no computed jump here. */ | |
2059 if (bitmap_empty_p (candidates)) | |
2060 goto done; | |
2061 | |
2062 /* Duplicate computed gotos. */ | |
2063 FOR_EACH_BB (bb) | |
2064 { | |
2065 if (bb->il.rtl->visited) | |
2066 continue; | |
2067 | |
2068 bb->il.rtl->visited = 1; | |
2069 | |
2070 /* BB must have one outgoing edge. That edge must not lead to | |
2071 the exit block or the next block. | |
2072 The destination must have more than one predecessor. */ | |
2073 if (!single_succ_p (bb) | |
2074 || single_succ (bb) == EXIT_BLOCK_PTR | |
2075 || single_succ (bb) == bb->next_bb | |
2076 || single_pred_p (single_succ (bb))) | |
2077 continue; | |
2078 | |
2079 if (!optimize_bb_for_size_p (bb)) | |
2080 continue; | |
2081 | |
2082 /* The successor block has to be a duplication candidate. */ | |
2083 if (!bitmap_bit_p (candidates, single_succ (bb)->index)) | |
2084 continue; | |
2085 | |
2086 new_bb = duplicate_block (single_succ (bb), single_succ_edge (bb), bb); | |
2087 new_bb->aux = bb->aux; | |
2088 bb->aux = new_bb; | |
2089 new_bb->il.rtl->visited = 1; | |
2090 } | |
2091 | |
2092 done: | |
2093 cfg_layout_finalize (); | |
2094 | |
2095 BITMAP_FREE (candidates); | |
2096 return 0; | |
2097 } | |
2098 | |
2099 struct rtl_opt_pass pass_duplicate_computed_gotos = | |
2100 { | |
2101 { | |
2102 RTL_PASS, | |
2103 "compgotos", /* name */ | |
2104 gate_duplicate_computed_gotos, /* gate */ | |
2105 duplicate_computed_gotos, /* execute */ | |
2106 NULL, /* sub */ | |
2107 NULL, /* next */ | |
2108 0, /* static_pass_number */ | |
2109 TV_REORDER_BLOCKS, /* tv_id */ | |
2110 0, /* properties_required */ | |
2111 0, /* properties_provided */ | |
2112 0, /* properties_destroyed */ | |
2113 0, /* todo_flags_start */ | |
2114 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */ | |
2115 } | |
2116 }; | |
2117 | |
2118 | |
2119 /* This function is the main 'entrance' for the optimization that | |
2120 partitions hot and cold basic blocks into separate sections of the | |
2121 .o file (to improve performance and cache locality). Ideally it | |
2122 would be called after all optimizations that rearrange the CFG have | |
2123 been called. However part of this optimization may introduce new | |
2124 register usage, so it must be called before register allocation has | |
2125 occurred. This means that this optimization is actually called | |
2126 well before the optimization that reorders basic blocks (see | |
2127 function above). | |
2128 | |
2129 This optimization checks the feedback information to determine | |
2130 which basic blocks are hot/cold, updates flags on the basic blocks | |
2131 to indicate which section they belong in. This information is | |
2132 later used for writing out sections in the .o file. Because hot | |
2133 and cold sections can be arbitrarily large (within the bounds of | |
2134 memory), far beyond the size of a single function, it is necessary | |
2135 to fix up all edges that cross section boundaries, to make sure the | |
2136 instructions used can actually span the required distance. The | |
2137 fixes are described below. | |
2138 | |
2139 Fall-through edges must be changed into jumps; it is not safe or | |
2140 legal to fall through across a section boundary. Whenever a | |
2141 fall-through edge crossing a section boundary is encountered, a new | |
2142 basic block is inserted (in the same section as the fall-through | |
2143 source), and the fall through edge is redirected to the new basic | |
2144 block. The new basic block contains an unconditional jump to the | |
2145 original fall-through target. (If the unconditional jump is | |
2146 insufficient to cross section boundaries, that is dealt with a | |
2147 little later, see below). | |
2148 | |
2149 In order to deal with architectures that have short conditional | |
2150 branches (which cannot span all of memory) we take any conditional | |
2151 jump that attempts to cross a section boundary and add a level of | |
2152 indirection: it becomes a conditional jump to a new basic block, in | |
2153 the same section. The new basic block contains an unconditional | |
2154 jump to the original target, in the other section. | |
2155 | |
2156 For those architectures whose unconditional branch is also | |
2157 incapable of reaching all of memory, those unconditional jumps are | |
2158 converted into indirect jumps, through a register. | |
2159 | |
2160 IMPORTANT NOTE: This optimization causes some messy interactions | |
2161 with the cfg cleanup optimizations; those optimizations want to | |
2162 merge blocks wherever possible, and to collapse indirect jump | |
2163 sequences (change "A jumps to B jumps to C" directly into "A jumps | |
2164 to C"). Those optimizations can undo the jump fixes that | |
2165 partitioning is required to make (see above), in order to ensure | |
2166 that jumps attempting to cross section boundaries are really able | |
2167 to cover whatever distance the jump requires (on many architectures | |
2168 conditional or unconditional jumps are not able to reach all of | |
2169 memory). Therefore tests have to be inserted into each such | |
2170 optimization to make sure that it does not undo stuff necessary to | |
2171 cross partition boundaries. This would be much less of a problem | |
2172 if we could perform this optimization later in the compilation, but | |
2173 unfortunately the fact that we may need to create indirect jumps | |
2174 (through registers) requires that this optimization be performed | |
2175 before register allocation. */ | |
2176 | |
2177 static void | |
2178 partition_hot_cold_basic_blocks (void) | |
2179 { | |
2180 basic_block cur_bb; | |
2181 edge *crossing_edges; | |
2182 int n_crossing_edges; | |
2183 int max_edges = 2 * last_basic_block; | |
2184 | |
2185 if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1) | |
2186 return; | |
2187 | |
2188 crossing_edges = XCNEWVEC (edge, max_edges); | |
2189 | |
2190 cfg_layout_initialize (0); | |
2191 | |
2192 FOR_EACH_BB (cur_bb) | |
2193 if (cur_bb->index >= NUM_FIXED_BLOCKS | |
2194 && cur_bb->next_bb->index >= NUM_FIXED_BLOCKS) | |
2195 cur_bb->aux = cur_bb->next_bb; | |
2196 | |
2197 find_rarely_executed_basic_blocks_and_crossing_edges (&crossing_edges, | |
2198 &n_crossing_edges, | |
2199 &max_edges); | |
2200 | |
2201 if (n_crossing_edges > 0) | |
2202 fix_edges_for_rarely_executed_code (crossing_edges, n_crossing_edges); | |
2203 | |
2204 free (crossing_edges); | |
2205 | |
2206 cfg_layout_finalize (); | |
2207 } | |
2208 | |
2209 static bool | |
2210 gate_handle_reorder_blocks (void) | |
2211 { | |
2212 if (targetm.cannot_modify_jumps_p ()) | |
2213 return false; | |
2214 return (optimize > 0); | |
2215 } | |
2216 | |
2217 | |
2218 /* Reorder basic blocks. */ | |
2219 static unsigned int | |
2220 rest_of_handle_reorder_blocks (void) | |
2221 { | |
2222 basic_block bb; | |
2223 | |
2224 /* Last attempt to optimize CFG, as scheduling, peepholing and insn | |
2225 splitting possibly introduced more crossjumping opportunities. */ | |
2226 cfg_layout_initialize (CLEANUP_EXPENSIVE); | |
2227 | |
2228 if ((flag_reorder_blocks || flag_reorder_blocks_and_partition) | |
2229 /* Don't reorder blocks when optimizing for size because extra jump insns may | |
2230 be created; also barrier may create extra padding. | |
2231 | |
2232 More correctly we should have a block reordering mode that tried to | |
2233 minimize the combined size of all the jumps. This would more or less | |
2234 automatically remove extra jumps, but would also try to use more short | |
2235 jumps instead of long jumps. */ | |
2236 && optimize_function_for_speed_p (cfun)) | |
2237 { | |
2238 reorder_basic_blocks (); | |
2239 cleanup_cfg (CLEANUP_EXPENSIVE); | |
2240 } | |
2241 | |
2242 FOR_EACH_BB (bb) | |
2243 if (bb->next_bb != EXIT_BLOCK_PTR) | |
2244 bb->aux = bb->next_bb; | |
2245 cfg_layout_finalize (); | |
2246 | |
2247 /* Add NOTE_INSN_SWITCH_TEXT_SECTIONS notes. */ | |
2248 insert_section_boundary_note (); | |
2249 return 0; | |
2250 } | |
2251 | |
2252 struct rtl_opt_pass pass_reorder_blocks = | |
2253 { | |
2254 { | |
2255 RTL_PASS, | |
2256 "bbro", /* name */ | |
2257 gate_handle_reorder_blocks, /* gate */ | |
2258 rest_of_handle_reorder_blocks, /* execute */ | |
2259 NULL, /* sub */ | |
2260 NULL, /* next */ | |
2261 0, /* static_pass_number */ | |
2262 TV_REORDER_BLOCKS, /* tv_id */ | |
2263 0, /* properties_required */ | |
2264 0, /* properties_provided */ | |
2265 0, /* properties_destroyed */ | |
2266 0, /* todo_flags_start */ | |
2267 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */ | |
2268 } | |
2269 }; | |
2270 | |
2271 static bool | |
2272 gate_handle_partition_blocks (void) | |
2273 { | |
2274 /* The optimization to partition hot/cold basic blocks into separate | |
2275 sections of the .o file does not work well with linkonce or with | |
2276 user defined section attributes. Don't call it if either case | |
2277 arises. */ | |
2278 | |
2279 return (flag_reorder_blocks_and_partition | |
2280 && !DECL_ONE_ONLY (current_function_decl) | |
2281 && !user_defined_section_attribute); | |
2282 } | |
2283 | |
2284 /* Partition hot and cold basic blocks. */ | |
2285 static unsigned int | |
2286 rest_of_handle_partition_blocks (void) | |
2287 { | |
2288 partition_hot_cold_basic_blocks (); | |
2289 return 0; | |
2290 } | |
2291 | |
2292 struct rtl_opt_pass pass_partition_blocks = | |
2293 { | |
2294 { | |
2295 RTL_PASS, | |
2296 "bbpart", /* name */ | |
2297 gate_handle_partition_blocks, /* gate */ | |
2298 rest_of_handle_partition_blocks, /* execute */ | |
2299 NULL, /* sub */ | |
2300 NULL, /* next */ | |
2301 0, /* static_pass_number */ | |
2302 TV_REORDER_BLOCKS, /* tv_id */ | |
2303 0, /* properties_required */ | |
2304 0, /* properties_provided */ | |
2305 0, /* properties_destroyed */ | |
2306 0, /* todo_flags_start */ | |
2307 TODO_dump_func | TODO_verify_rtl_sharing/* todo_flags_finish */ | |
2308 } | |
2309 }; | |
2310 | |
2311 |