Mercurial > hg > CbC > CbC_gcc
comparison gcc/sched-ebb.c @ 55:77e2b8dfacca gcc-4.4.5
update it from 4.4.3 to 4.5.0
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 12 Feb 2010 23:39:51 +0900 |
parents | a06113de4d67 |
children | f6334be47118 |
comparison
equal
deleted
inserted
replaced
52:c156f1bd5cd9 | 55:77e2b8dfacca |
---|---|
142 | 142 |
143 /* An obscure special case, where we do have partially dead | 143 /* An obscure special case, where we do have partially dead |
144 instruction scheduled after last control flow instruction. | 144 instruction scheduled after last control flow instruction. |
145 In this case we can create new basic block. It is | 145 In this case we can create new basic block. It is |
146 always exactly one basic block last in the sequence. */ | 146 always exactly one basic block last in the sequence. */ |
147 | 147 |
148 FOR_EACH_EDGE (e, ei, last_bb->succs) | 148 FOR_EACH_EDGE (e, ei, last_bb->succs) |
149 if (e->flags & EDGE_FALLTHRU) | 149 if (e->flags & EDGE_FALLTHRU) |
150 break; | 150 break; |
151 | 151 |
152 #ifdef ENABLE_CHECKING | 152 #ifdef ENABLE_CHECKING |
153 gcc_assert (!e || !(e->flags & EDGE_COMPLEX)); | 153 gcc_assert (!e || !(e->flags & EDGE_COMPLEX)); |
154 | 154 |
155 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb | 155 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb |
156 && !IS_SPECULATION_CHECK_P (insn) | 156 && !IS_SPECULATION_CHECK_P (insn) |
157 && BB_HEAD (last_bb) != insn | 157 && BB_HEAD (last_bb) != insn |
158 && BB_END (last_bb) == insn); | 158 && BB_END (last_bb) == insn); |
174 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); | 174 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); |
175 } | 175 } |
176 else | 176 else |
177 /* Create an empty unreachable block after the INSN. */ | 177 /* Create an empty unreachable block after the INSN. */ |
178 bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb); | 178 bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb); |
179 | 179 |
180 /* split_edge () creates BB before E->DEST. Keep in mind, that | 180 /* split_edge () creates BB before E->DEST. Keep in mind, that |
181 this operation extends scheduling region till the end of BB. | 181 this operation extends scheduling region till the end of BB. |
182 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out | 182 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out |
183 of the scheduling region. */ | 183 of the scheduling region. */ |
184 current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); | 184 current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); |
284 schedule_more_p, | 284 schedule_more_p, |
285 NULL, | 285 NULL, |
286 rank, | 286 rank, |
287 ebb_print_insn, | 287 ebb_print_insn, |
288 ebb_contributes_to_priority, | 288 ebb_contributes_to_priority, |
289 NULL, /* insn_finishes_block_p */ | |
289 | 290 |
290 NULL, NULL, | 291 NULL, NULL, |
291 NULL, NULL, | 292 NULL, NULL, |
292 1, 0, | 293 1, 0, |
293 | 294 |
324 | 325 |
325 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) | 326 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
326 { | 327 { |
327 rtx insn1 = DEP_PRO (back_dep); | 328 rtx insn1 = DEP_PRO (back_dep); |
328 | 329 |
329 if (DEP_TYPE (back_dep) == REG_DEP_TRUE) | 330 if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
330 /* Found a DEF-USE dependence (insn1, load_insn). */ | 331 /* Found a DEF-USE dependence (insn1, load_insn). */ |
331 { | 332 { |
332 sd_iterator_def fore_sd_it; | 333 sd_iterator_def fore_sd_it; |
333 dep_t fore_dep; | 334 dep_t fore_dep; |
334 | 335 |
461 static basic_block | 462 static basic_block |
462 schedule_ebb (rtx head, rtx tail) | 463 schedule_ebb (rtx head, rtx tail) |
463 { | 464 { |
464 basic_block first_bb, target_bb; | 465 basic_block first_bb, target_bb; |
465 struct deps tmp_deps; | 466 struct deps tmp_deps; |
466 | 467 |
467 first_bb = BLOCK_FOR_INSN (head); | 468 first_bb = BLOCK_FOR_INSN (head); |
468 last_bb = BLOCK_FOR_INSN (tail); | 469 last_bb = BLOCK_FOR_INSN (tail); |
469 | 470 |
470 if (no_real_insns_p (head, tail)) | 471 if (no_real_insns_p (head, tail)) |
471 return BLOCK_FOR_INSN (tail); | 472 return BLOCK_FOR_INSN (tail); |
475 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) | 476 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) |
476 { | 477 { |
477 init_deps_global (); | 478 init_deps_global (); |
478 | 479 |
479 /* Compute dependencies. */ | 480 /* Compute dependencies. */ |
480 init_deps (&tmp_deps); | 481 init_deps (&tmp_deps, false); |
481 sched_analyze (&tmp_deps, head, tail); | 482 sched_analyze (&tmp_deps, head, tail); |
482 free_deps (&tmp_deps); | 483 free_deps (&tmp_deps); |
483 | 484 |
484 add_deps_for_risky_insns (head, tail); | 485 add_deps_for_risky_insns (head, tail); |
485 | 486 |
488 | 489 |
489 finish_deps_global (); | 490 finish_deps_global (); |
490 } | 491 } |
491 else | 492 else |
492 /* Only recovery blocks can have their dependencies already calculated, | 493 /* Only recovery blocks can have their dependencies already calculated, |
493 and they always are single block ebbs. */ | 494 and they always are single block ebbs. */ |
494 gcc_assert (first_bb == last_bb); | 495 gcc_assert (first_bb == last_bb); |
495 | 496 |
496 /* Set priorities. */ | 497 /* Set priorities. */ |
497 current_sched_info->sched_max_insns_priority = 0; | 498 current_sched_info->sched_max_insns_priority = 0; |
498 rgn_n_insns = set_priorities (head, tail); | 499 rgn_n_insns = set_priorities (head, tail); |
513 /* Free ready list. */ | 514 /* Free ready list. */ |
514 sched_finish_ready_list (); | 515 sched_finish_ready_list (); |
515 | 516 |
516 /* We might pack all instructions into fewer blocks, | 517 /* We might pack all instructions into fewer blocks, |
517 so we may made some of them empty. Can't assert (b == last_bb). */ | 518 so we may made some of them empty. Can't assert (b == last_bb). */ |
518 | 519 |
519 /* Sanity check: verify that all region insns were scheduled. */ | 520 /* Sanity check: verify that all region insns were scheduled. */ |
520 gcc_assert (sched_rgn_n_insns == rgn_n_insns); | 521 gcc_assert (sched_rgn_n_insns == rgn_n_insns); |
521 | 522 |
522 /* Free dependencies. */ | 523 /* Free dependencies. */ |
523 sched_free_deps (current_sched_info->head, current_sched_info->tail, true); | 524 sched_free_deps (current_sched_info->head, current_sched_info->tail, true); |
604 | 605 |
605 /* Blah. We should fix the rest of the code not to get confused by | 606 /* Blah. We should fix the rest of the code not to get confused by |
606 a note or two. */ | 607 a note or two. */ |
607 while (head != tail) | 608 while (head != tail) |
608 { | 609 { |
609 if (NOTE_P (head)) | 610 if (NOTE_P (head) || BOUNDARY_DEBUG_INSN_P (head)) |
610 head = NEXT_INSN (head); | 611 head = NEXT_INSN (head); |
611 else if (NOTE_P (tail)) | 612 else if (NOTE_P (tail) || BOUNDARY_DEBUG_INSN_P (tail)) |
612 tail = PREV_INSN (tail); | 613 tail = PREV_INSN (tail); |
613 else if (LABEL_P (head)) | 614 else if (LABEL_P (head)) |
614 head = NEXT_INSN (head); | 615 head = NEXT_INSN (head); |
615 else | 616 else |
616 break; | 617 break; |
640 | 641 |
641 /* BB was added to ebb after AFTER. */ | 642 /* BB was added to ebb after AFTER. */ |
642 static void | 643 static void |
643 ebb_add_block (basic_block bb, basic_block after) | 644 ebb_add_block (basic_block bb, basic_block after) |
644 { | 645 { |
645 /* Recovery blocks are always bounded by BARRIERS, | 646 /* Recovery blocks are always bounded by BARRIERS, |
646 therefore, they always form single block EBB, | 647 therefore, they always form single block EBB, |
647 therefore, we can use rec->index to identify such EBBs. */ | 648 therefore, we can use rec->index to identify such EBBs. */ |
648 if (after == EXIT_BLOCK_PTR) | 649 if (after == EXIT_BLOCK_PTR) |
649 bitmap_set_bit (&dont_calc_deps, bb->index); | 650 bitmap_set_bit (&dont_calc_deps, bb->index); |
650 else if (after == last_bb) | 651 else if (after == last_bb) |