comparison gcc/sched-deps.c @ 55:77e2b8dfacca gcc-4.4.5

update it from 4.4.3 to 4.5.0
author ryoma <e075725@ie.u-ryukyu.ac.jp>
date Fri, 12 Feb 2010 23:39:51 +0900
parents a06113de4d67
children b7f97abdc517
comparison
equal deleted inserted replaced
52:c156f1bd5cd9 55:77e2b8dfacca
1 /* Instruction scheduling pass. This file computes dependencies between 1 /* Instruction scheduling pass. This file computes dependencies between
2 instructions. 2 instructions.
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc. 5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, 6 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7 and currently maintained by, Jim Wilson (wilson@cygnus.com) 7 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 8
9 This file is part of GCC. 9 This file is part of GCC.
39 #include "toplev.h" 39 #include "toplev.h"
40 #include "recog.h" 40 #include "recog.h"
41 #include "sched-int.h" 41 #include "sched-int.h"
42 #include "params.h" 42 #include "params.h"
43 #include "cselib.h" 43 #include "cselib.h"
44 #include "ira.h"
45 #include "target.h"
44 46
45 #ifdef INSN_SCHEDULING 47 #ifdef INSN_SCHEDULING
46 48
47 #ifdef ENABLE_CHECKING 49 #ifdef ENABLE_CHECKING
48 #define CHECK (true) 50 #define CHECK (true)
208 { 210 {
209 dump_dep (stderr, dep, 1); 211 dump_dep (stderr, dep, 1);
210 fprintf (stderr, "\n"); 212 fprintf (stderr, "\n");
211 } 213 }
212 214
215 /* Determine whether DEP is a dependency link of a non-debug insn on a
216 debug insn. */
217
218 static inline bool
219 depl_on_debug_p (dep_link_t dep)
220 {
221 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
222 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
223 }
224
213 /* Functions to operate with a single link from the dependencies lists - 225 /* Functions to operate with a single link from the dependencies lists -
214 dep_link_t. */ 226 dep_link_t. */
215 227
216 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by 228 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
217 PREV_NEXT_P. */ 229 PREV_NEXT_P. */
243 static void 255 static void
244 add_to_deps_list (dep_link_t link, deps_list_t l) 256 add_to_deps_list (dep_link_t link, deps_list_t l)
245 { 257 {
246 attach_dep_link (link, &DEPS_LIST_FIRST (l)); 258 attach_dep_link (link, &DEPS_LIST_FIRST (l));
247 259
248 ++DEPS_LIST_N_LINKS (l); 260 /* Don't count debug deps. */
261 if (!depl_on_debug_p (link))
262 ++DEPS_LIST_N_LINKS (l);
249 } 263 }
250 264
251 /* Detach dep_link L from the list. */ 265 /* Detach dep_link L from the list. */
252 static void 266 static void
253 detach_dep_link (dep_link_t l) 267 detach_dep_link (dep_link_t l)
268 static void 282 static void
269 remove_from_deps_list (dep_link_t link, deps_list_t list) 283 remove_from_deps_list (dep_link_t link, deps_list_t list)
270 { 284 {
271 detach_dep_link (link); 285 detach_dep_link (link);
272 286
273 --DEPS_LIST_N_LINKS (list); 287 /* Don't count debug deps. */
288 if (!depl_on_debug_p (link))
289 --DEPS_LIST_N_LINKS (list);
274 } 290 }
275 291
276 /* Move link LINK from list FROM to list TO. */ 292 /* Move link LINK from list FROM to list TO. */
277 static void 293 static void
278 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to) 294 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
393 409
394 static regset reg_pending_sets; 410 static regset reg_pending_sets;
395 static regset reg_pending_clobbers; 411 static regset reg_pending_clobbers;
396 static regset reg_pending_uses; 412 static regset reg_pending_uses;
397 static enum reg_pending_barrier_mode reg_pending_barrier; 413 static enum reg_pending_barrier_mode reg_pending_barrier;
414
415 /* Hard registers implicitly clobbered or used (or may be implicitly
416 clobbered or used) by the currently analyzed insn. For example,
417 insn in its constraint has one register class. Even if there is
418 currently no hard register in the insn, the particular hard
419 register will be in the insn after reload pass because the
420 constraint requires it. */
421 static HARD_REG_SET implicit_reg_pending_clobbers;
422 static HARD_REG_SET implicit_reg_pending_uses;
398 423
399 /* To speed up the test for duplicate dependency links we keep a 424 /* To speed up the test for duplicate dependency links we keep a
400 record of dependencies created by add_dependence when the average 425 record of dependencies created by add_dependence when the average
401 number of instructions in a basic block is very large. 426 number of instructions in a basic block is very large.
402 427
415 static bitmap_head *spec_dependency_cache = NULL; 440 static bitmap_head *spec_dependency_cache = NULL;
416 static int cache_size; 441 static int cache_size;
417 442
418 static int deps_may_trap_p (const_rtx); 443 static int deps_may_trap_p (const_rtx);
419 static void add_dependence_list (rtx, rtx, int, enum reg_note); 444 static void add_dependence_list (rtx, rtx, int, enum reg_note);
420 static void add_dependence_list_and_free (struct deps *, rtx, 445 static void add_dependence_list_and_free (struct deps *, rtx,
421 rtx *, int, enum reg_note); 446 rtx *, int, enum reg_note);
422 static void delete_all_dependences (rtx); 447 static void delete_all_dependences (rtx);
423 static void fixup_sched_groups (rtx); 448 static void fixup_sched_groups (rtx);
424 449
425 static void flush_pending_lists (struct deps *, rtx, int, int); 450 static void flush_pending_lists (struct deps *, rtx, int, int);
426 static void sched_analyze_1 (struct deps *, rtx, rtx); 451 static void sched_analyze_1 (struct deps *, rtx, rtx);
648 { 673 {
649 deps_list_t list; 674 deps_list_t list;
650 bool resolved_p; 675 bool resolved_p;
651 676
652 sd_next_list (insn, &list_types, &list, &resolved_p); 677 sd_next_list (insn, &list_types, &list, &resolved_p);
653 size += DEPS_LIST_N_LINKS (list); 678 if (list)
679 size += DEPS_LIST_N_LINKS (list);
654 } 680 }
655 681
656 return size; 682 return size;
657 } 683 }
658 684
659 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */ 685 /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
686
660 bool 687 bool
661 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types) 688 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
662 { 689 {
663 return sd_lists_size (insn, list_types) == 0; 690 while (list_types != SD_LIST_NONE)
691 {
692 deps_list_t list;
693 bool resolved_p;
694
695 sd_next_list (insn, &list_types, &list, &resolved_p);
696 if (!deps_list_empty_p (list))
697 return false;
698 }
699
700 return true;
664 } 701 }
665 702
666 /* Initialize data for INSN. */ 703 /* Initialize data for INSN. */
667 void 704 void
668 sd_init_insn (rtx insn) 705 sd_init_insn (rtx insn)
671 INSN_SPEC_BACK_DEPS (insn) = create_deps_list (); 708 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
672 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list (); 709 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
673 INSN_FORW_DEPS (insn) = create_deps_list (); 710 INSN_FORW_DEPS (insn) = create_deps_list ();
674 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list (); 711 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
675 712
713 if (DEBUG_INSN_P (insn))
714 DEBUG_INSN_SCHED_P (insn) = TRUE;
715
676 /* ??? It would be nice to allocate dependency caches here. */ 716 /* ??? It would be nice to allocate dependency caches here. */
677 } 717 }
678 718
679 /* Free data for INSN. */ 719 /* Free data for INSN. */
680 void 720 void
681 sd_finish_insn (rtx insn) 721 sd_finish_insn (rtx insn)
682 { 722 {
683 /* ??? It would be nice to deallocate dependency caches here. */ 723 /* ??? It would be nice to deallocate dependency caches here. */
724
725 if (DEBUG_INSN_P (insn))
726 {
727 gcc_assert (DEBUG_INSN_SCHED_P (insn));
728 DEBUG_INSN_SCHED_P (insn) = FALSE;
729 }
684 730
685 free_deps_list (INSN_HARD_BACK_DEPS (insn)); 731 free_deps_list (INSN_HARD_BACK_DEPS (insn));
686 INSN_HARD_BACK_DEPS (insn) = NULL; 732 INSN_HARD_BACK_DEPS (insn) = NULL;
687 733
688 free_deps_list (INSN_SPEC_BACK_DEPS (insn)); 734 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
831 gcc_assert (true_dependency_cache != NULL 877 gcc_assert (true_dependency_cache != NULL
832 && output_dependency_cache != NULL 878 && output_dependency_cache != NULL
833 && anti_dependency_cache != NULL); 879 && anti_dependency_cache != NULL);
834 880
835 if (!(current_sched_info->flags & USE_DEPS_LIST)) 881 if (!(current_sched_info->flags & USE_DEPS_LIST))
836 { 882 {
837 enum reg_note present_dep_type; 883 enum reg_note present_dep_type;
838 884
839 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) 885 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
840 present_dep_type = REG_DEP_TRUE; 886 present_dep_type = REG_DEP_TRUE;
841 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) 887 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
849 if ((int) DEP_TYPE (dep) >= (int) present_dep_type) 895 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
850 /* DEP does not add anything to the existing dependence. */ 896 /* DEP does not add anything to the existing dependence. */
851 return DEP_PRESENT; 897 return DEP_PRESENT;
852 } 898 }
853 else 899 else
854 { 900 {
855 ds_t present_dep_types = 0; 901 ds_t present_dep_types = 0;
856 902
857 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) 903 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
858 present_dep_types |= DEP_TRUE; 904 present_dep_types |= DEP_TRUE;
859 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) 905 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
860 present_dep_types |= DEP_OUTPUT; 906 present_dep_types |= DEP_OUTPUT;
861 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)) 907 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
957 case REG_DEP_ANTI: 1003 case REG_DEP_ANTI:
958 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid); 1004 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
959 break; 1005 break;
960 1006
961 default: 1007 default:
962 gcc_unreachable (); 1008 gcc_unreachable ();
963 } 1009 }
964 } 1010 }
965 1011
966 set_dependency_caches (dep); 1012 set_dependency_caches (dep);
967 } 1013 }
1038 dw_t dw; 1084 dw_t dw;
1039 1085
1040 dw = estimate_dep_weak (mem1, mem2); 1086 dw = estimate_dep_weak (mem1, mem2);
1041 ds = set_dep_weak (ds, BEGIN_DATA, dw); 1087 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1042 } 1088 }
1043 1089
1044 new_status = ds_merge (dep_status, ds); 1090 new_status = ds_merge (dep_status, ds);
1045 } 1091 }
1046 } 1092 }
1047 1093
1048 ds = new_status; 1094 ds = new_status;
1075 bool maybe_present_p = true; 1121 bool maybe_present_p = true;
1076 bool present_p = false; 1122 bool present_p = false;
1077 1123
1078 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep)) 1124 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1079 && DEP_PRO (new_dep) != DEP_CON (new_dep)); 1125 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1080 1126
1081 #ifdef ENABLE_CHECKING 1127 #ifdef ENABLE_CHECKING
1082 check_dep (new_dep, mem1 != NULL); 1128 check_dep (new_dep, mem1 != NULL);
1083 #endif 1129 #endif
1084 1130
1085 if (true_dependency_cache != NULL) 1131 if (true_dependency_cache != NULL)
1136 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA, 1182 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1137 estimate_dep_weak (mem1, mem2)); 1183 estimate_dep_weak (mem1, mem2));
1138 } 1184 }
1139 1185
1140 sd_add_dep (new_dep, resolved_p); 1186 sd_add_dep (new_dep, resolved_p);
1141 1187
1142 return DEP_CREATED; 1188 return DEP_CREATED;
1143 } 1189 }
1144 1190
1145 /* Initialize BACK_LIST_PTR with consumer's backward list and 1191 /* Initialize BACK_LIST_PTR with consumer's backward list and
1146 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true 1192 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1350 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0))) 1396 if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1351 add_dependence (insn, XEXP (list, 0), dep_type); 1397 add_dependence (insn, XEXP (list, 0), dep_type);
1352 } 1398 }
1353 } 1399 }
1354 1400
1355 /* Similar, but free *LISTP at the same time, when the context 1401 /* Similar, but free *LISTP at the same time, when the context
1356 is not readonly. */ 1402 is not readonly. */
1357 1403
1358 static void 1404 static void
1359 add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp, 1405 add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp,
1360 int uncond, enum reg_note dep_type) 1406 int uncond, enum reg_note dep_type)
1361 { 1407 {
1362 rtx list, next; 1408 rtx list, next;
1363 1409
1364 if (deps->readonly) 1410 if (deps->readonly)
1374 add_dependence (insn, XEXP (list, 0), dep_type); 1420 add_dependence (insn, XEXP (list, 0), dep_type);
1375 free_INSN_LIST_node (list); 1421 free_INSN_LIST_node (list);
1376 } 1422 }
1377 } 1423 }
1378 1424
1379 /* Remove all occurences of INSN from LIST. Return the number of 1425 /* Remove all occurences of INSN from LIST. Return the number of
1380 occurences removed. */ 1426 occurences removed. */
1381 1427
1382 static int 1428 static int
1383 remove_from_dependence_list (rtx insn, rtx* listp) 1429 remove_from_dependence_list (rtx insn, rtx* listp)
1384 { 1430 {
1385 int removed = 0; 1431 int removed = 0;
1386 1432
1387 while (*listp) 1433 while (*listp)
1388 { 1434 {
1389 if (XEXP (*listp, 0) == insn) 1435 if (XEXP (*listp, 0) == insn)
1390 { 1436 {
1391 remove_free_INSN_LIST_node (listp); 1437 remove_free_INSN_LIST_node (listp);
1392 removed++; 1438 removed++;
1393 continue; 1439 continue;
1394 } 1440 }
1395 1441
1396 listp = &XEXP (*listp, 1); 1442 listp = &XEXP (*listp, 1);
1397 } 1443 }
1398 1444
1399 return removed; 1445 return removed;
1400 } 1446 }
1401 1447
1402 /* Same as above, but process two lists at once. */ 1448 /* Same as above, but process two lists at once. */
1403 static int 1449 static int
1404 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp) 1450 remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1405 { 1451 {
1406 int removed = 0; 1452 int removed = 0;
1407 1453
1408 while (*listp) 1454 while (*listp)
1409 { 1455 {
1410 if (XEXP (*listp, 0) == insn) 1456 if (XEXP (*listp, 0) == insn)
1411 { 1457 {
1412 remove_free_INSN_LIST_node (listp); 1458 remove_free_INSN_LIST_node (listp);
1413 remove_free_EXPR_LIST_node (exprp); 1459 remove_free_EXPR_LIST_node (exprp);
1414 removed++; 1460 removed++;
1415 continue; 1461 continue;
1416 } 1462 }
1417 1463
1418 listp = &XEXP (*listp, 1); 1464 listp = &XEXP (*listp, 1);
1419 exprp = &XEXP (*exprp, 1); 1465 exprp = &XEXP (*exprp, 1);
1420 } 1466 }
1421 1467
1422 return removed; 1468 return removed;
1423 } 1469 }
1424 1470
1425 /* Clear all dependencies for an insn. */ 1471 /* Clear all dependencies for an insn. */
1426 static void 1472 static void
1460 { 1506 {
1461 i = prev_nonnote_insn (i); 1507 i = prev_nonnote_insn (i);
1462 1508
1463 if (pro == i) 1509 if (pro == i)
1464 goto next_link; 1510 goto next_link;
1465 } while (SCHED_GROUP_P (i)); 1511 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1466 1512
1467 if (! sched_insns_conditions_mutex_p (i, pro)) 1513 if (! sched_insns_conditions_mutex_p (i, pro))
1468 add_dependence (i, pro, DEP_TYPE (dep)); 1514 add_dependence (i, pro, DEP_TYPE (dep));
1469 next_link:; 1515 next_link:;
1470 } 1516 }
1471 1517
1472 delete_all_dependences (insn); 1518 delete_all_dependences (insn);
1473 1519
1474 prev_nonnote = prev_nonnote_insn (insn); 1520 prev_nonnote = prev_nonnote_insn (insn);
1521 while (DEBUG_INSN_P (prev_nonnote))
1522 prev_nonnote = prev_nonnote_insn (prev_nonnote);
1475 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote) 1523 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1476 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote)) 1524 && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1477 add_dependence (insn, prev_nonnote, REG_DEP_ANTI); 1525 add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1478 } 1526 }
1479 1527
1503 gcc_assert (!deps->readonly); 1551 gcc_assert (!deps->readonly);
1504 if (read_p) 1552 if (read_p)
1505 { 1553 {
1506 insn_list = &deps->pending_read_insns; 1554 insn_list = &deps->pending_read_insns;
1507 mem_list = &deps->pending_read_mems; 1555 mem_list = &deps->pending_read_mems;
1508 deps->pending_read_list_length++; 1556 if (!DEBUG_INSN_P (insn))
1557 deps->pending_read_list_length++;
1509 } 1558 }
1510 else 1559 else
1511 { 1560 {
1512 insn_list = &deps->pending_write_insns; 1561 insn_list = &deps->pending_write_insns;
1513 mem_list = &deps->pending_write_mems; 1562 mem_list = &deps->pending_write_mems;
1534 flush_pending_lists (struct deps *deps, rtx insn, int for_read, 1583 flush_pending_lists (struct deps *deps, rtx insn, int for_read,
1535 int for_write) 1584 int for_write)
1536 { 1585 {
1537 if (for_write) 1586 if (for_write)
1538 { 1587 {
1539 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns, 1588 add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1540 1, REG_DEP_ANTI); 1589 1, REG_DEP_ANTI);
1541 if (!deps->readonly) 1590 if (!deps->readonly)
1542 { 1591 {
1543 free_EXPR_LIST_list (&deps->pending_read_mems); 1592 free_EXPR_LIST_list (&deps->pending_read_mems);
1544 deps->pending_read_list_length = 0; 1593 deps->pending_read_list_length = 0;
1546 } 1595 }
1547 1596
1548 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1, 1597 add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1549 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); 1598 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1550 1599
1551 add_dependence_list_and_free (deps, insn, 1600 add_dependence_list_and_free (deps, insn,
1552 &deps->last_pending_memory_flush, 1, 1601 &deps->last_pending_memory_flush, 1,
1553 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); 1602 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1554 if (!deps->readonly) 1603 if (!deps->readonly)
1555 { 1604 {
1556 free_EXPR_LIST_list (&deps->pending_write_mems); 1605 free_EXPR_LIST_list (&deps->pending_write_mems);
1609 else 1658 else
1610 gcc_assert (ds & BEGIN_DATA); 1659 gcc_assert (ds & BEGIN_DATA);
1611 1660
1612 { 1661 {
1613 dep_def _dep, *dep = &_dep; 1662 dep_def _dep, *dep = &_dep;
1614 1663
1615 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds), 1664 init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1616 current_sched_info->flags & USE_DEPS_LIST ? ds : -1); 1665 current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1617 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem); 1666 maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1618 } 1667 }
1619 1668
1620 } 1669 }
1676 { 1725 {
1677 gcc_assert (ds & DEP_ANTI); 1726 gcc_assert (ds & DEP_ANTI);
1678 return REG_DEP_ANTI; 1727 return REG_DEP_ANTI;
1679 } 1728 }
1680 } 1729 }
1730
1731
1732
1733 /* Functions for computation of info needed for register pressure
1734 sensitive insn scheduling. */
1735
1736
1737 /* Allocate and return reg_use_data structure for REGNO and INSN. */
1738 static struct reg_use_data *
1739 create_insn_reg_use (int regno, rtx insn)
1740 {
1741 struct reg_use_data *use;
1742
1743 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1744 use->regno = regno;
1745 use->insn = insn;
1746 use->next_insn_use = INSN_REG_USE_LIST (insn);
1747 INSN_REG_USE_LIST (insn) = use;
1748 return use;
1749 }
1750
1751 /* Allocate and return reg_set_data structure for REGNO and INSN. */
1752 static struct reg_set_data *
1753 create_insn_reg_set (int regno, rtx insn)
1754 {
1755 struct reg_set_data *set;
1756
1757 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1758 set->regno = regno;
1759 set->insn = insn;
1760 set->next_insn_set = INSN_REG_SET_LIST (insn);
1761 INSN_REG_SET_LIST (insn) = set;
1762 return set;
1763 }
1764
1765 /* Set up insn register uses for INSN and dependency context DEPS. */
1766 static void
1767 setup_insn_reg_uses (struct deps *deps, rtx insn)
1768 {
1769 unsigned i;
1770 reg_set_iterator rsi;
1771 rtx list;
1772 struct reg_use_data *use, *use2, *next;
1773 struct deps_reg *reg_last;
1774
1775 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1776 {
1777 if (i < FIRST_PSEUDO_REGISTER
1778 && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1779 continue;
1780
1781 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1782 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1783 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1784 /* Ignore use which is not dying. */
1785 continue;
1786
1787 use = create_insn_reg_use (i, insn);
1788 use->next_regno_use = use;
1789 reg_last = &deps->reg_last[i];
1790
1791 /* Create the cycle list of uses. */
1792 for (list = reg_last->uses; list; list = XEXP (list, 1))
1793 {
1794 use2 = create_insn_reg_use (i, XEXP (list, 0));
1795 next = use->next_regno_use;
1796 use->next_regno_use = use2;
1797 use2->next_regno_use = next;
1798 }
1799 }
1800 }
1801
1802 /* Register pressure info for the currently processed insn. */
1803 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1804
1805 /* Return TRUE if INSN has the use structure for REGNO. */
1806 static bool
1807 insn_use_p (rtx insn, int regno)
1808 {
1809 struct reg_use_data *use;
1810
1811 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1812 if (use->regno == regno)
1813 return true;
1814 return false;
1815 }
1816
1817 /* Update the register pressure info after birth of pseudo register REGNO
1818 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
1819 the register is in clobber or unused after the insn. */
1820 static void
1821 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1822 {
1823 int incr, new_incr;
1824 enum reg_class cl;
1825
1826 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1827 cl = sched_regno_cover_class[regno];
1828 if (cl != NO_REGS)
1829 {
1830 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1831 if (clobber_p)
1832 {
1833 new_incr = reg_pressure_info[cl].clobber_increase + incr;
1834 reg_pressure_info[cl].clobber_increase = new_incr;
1835 }
1836 else if (unused_p)
1837 {
1838 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1839 reg_pressure_info[cl].unused_set_increase = new_incr;
1840 }
1841 else
1842 {
1843 new_incr = reg_pressure_info[cl].set_increase + incr;
1844 reg_pressure_info[cl].set_increase = new_incr;
1845 if (! insn_use_p (insn, regno))
1846 reg_pressure_info[cl].change += incr;
1847 create_insn_reg_set (regno, insn);
1848 }
1849 gcc_assert (new_incr < (1 << INCREASE_BITS));
1850 }
1851 }
1852
1853 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1854 hard registers involved in the birth. */
1855 static void
1856 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1857 bool clobber_p, bool unused_p)
1858 {
1859 enum reg_class cl;
1860 int new_incr, last = regno + nregs;
1861
1862 while (regno < last)
1863 {
1864 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1865 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1866 {
1867 cl = sched_regno_cover_class[regno];
1868 if (cl != NO_REGS)
1869 {
1870 if (clobber_p)
1871 {
1872 new_incr = reg_pressure_info[cl].clobber_increase + 1;
1873 reg_pressure_info[cl].clobber_increase = new_incr;
1874 }
1875 else if (unused_p)
1876 {
1877 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1878 reg_pressure_info[cl].unused_set_increase = new_incr;
1879 }
1880 else
1881 {
1882 new_incr = reg_pressure_info[cl].set_increase + 1;
1883 reg_pressure_info[cl].set_increase = new_incr;
1884 if (! insn_use_p (insn, regno))
1885 reg_pressure_info[cl].change += 1;
1886 create_insn_reg_set (regno, insn);
1887 }
1888 gcc_assert (new_incr < (1 << INCREASE_BITS));
1889 }
1890 }
1891 regno++;
1892 }
1893 }
1894
1895 /* Update the register pressure info after birth of pseudo or hard
1896 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
1897 correspondingly that the register is in clobber or unused after the
1898 insn. */
1899 static void
1900 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1901 {
1902 int regno;
1903
1904 if (GET_CODE (reg) == SUBREG)
1905 reg = SUBREG_REG (reg);
1906
1907 if (! REG_P (reg))
1908 return;
1909
1910 regno = REGNO (reg);
1911 if (regno < FIRST_PSEUDO_REGISTER)
1912 mark_insn_hard_regno_birth (insn, regno,
1913 hard_regno_nregs[regno][GET_MODE (reg)],
1914 clobber_p, unused_p);
1915 else
1916 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1917 }
1918
1919 /* Update the register pressure info after death of pseudo register
1920 REGNO. */
1921 static void
1922 mark_pseudo_death (int regno)
1923 {
1924 int incr;
1925 enum reg_class cl;
1926
1927 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1928 cl = sched_regno_cover_class[regno];
1929 if (cl != NO_REGS)
1930 {
1931 incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1932 reg_pressure_info[cl].change -= incr;
1933 }
1934 }
1935
1936 /* Like mark_pseudo_death except that NREGS saying how many hard
1937 registers involved in the death. */
1938 static void
1939 mark_hard_regno_death (int regno, int nregs)
1940 {
1941 enum reg_class cl;
1942 int last = regno + nregs;
1943
1944 while (regno < last)
1945 {
1946 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1947 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1948 {
1949 cl = sched_regno_cover_class[regno];
1950 if (cl != NO_REGS)
1951 reg_pressure_info[cl].change -= 1;
1952 }
1953 regno++;
1954 }
1955 }
1956
1957 /* Update the register pressure info after death of pseudo or hard
1958 register REG. */
1959 static void
1960 mark_reg_death (rtx reg)
1961 {
1962 int regno;
1963
1964 if (GET_CODE (reg) == SUBREG)
1965 reg = SUBREG_REG (reg);
1966
1967 if (! REG_P (reg))
1968 return;
1969
1970 regno = REGNO (reg);
1971 if (regno < FIRST_PSEUDO_REGISTER)
1972 mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1973 else
1974 mark_pseudo_death (regno);
1975 }
1976
1977 /* Process SETTER of REG. DATA is an insn containing the setter. */
1978 static void
1979 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1980 {
1981 if (setter != NULL_RTX && GET_CODE (setter) != SET)
1982 return;
1983 mark_insn_reg_birth
1984 ((rtx) data, reg, false,
1985 find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1986 }
1987
1988 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
1989 static void
1990 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1991 {
1992 if (GET_CODE (setter) == CLOBBER)
1993 mark_insn_reg_birth ((rtx) data, reg, true, false);
1994 }
1995
1996 /* Set up reg pressure info related to INSN. */
1997 static void
1998 setup_insn_reg_pressure_info (rtx insn)
1999 {
2000 int i, len;
2001 enum reg_class cl;
2002 static struct reg_pressure_data *pressure_info;
2003 rtx link;
2004
2005 gcc_assert (sched_pressure_p);
2006
2007 if (! INSN_P (insn))
2008 return;
2009
2010 for (i = 0; i < ira_reg_class_cover_size; i++)
2011 {
2012 cl = ira_reg_class_cover[i];
2013 reg_pressure_info[cl].clobber_increase = 0;
2014 reg_pressure_info[cl].set_increase = 0;
2015 reg_pressure_info[cl].unused_set_increase = 0;
2016 reg_pressure_info[cl].change = 0;
2017 }
2018
2019 note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2020
2021 note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2022
2023 #ifdef AUTO_INC_DEC
2024 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2025 if (REG_NOTE_KIND (link) == REG_INC)
2026 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2027 #endif
2028
2029 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2030 if (REG_NOTE_KIND (link) == REG_DEAD)
2031 mark_reg_death (XEXP (link, 0));
2032
2033 len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2034 pressure_info
2035 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2036 INSN_MAX_REG_PRESSURE (insn) = (int *) xmalloc (ira_reg_class_cover_size
2037 * sizeof (int));
2038 for (i = 0; i < ira_reg_class_cover_size; i++)
2039 {
2040 cl = ira_reg_class_cover[i];
2041 pressure_info[i].clobber_increase
2042 = reg_pressure_info[cl].clobber_increase;
2043 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2044 pressure_info[i].unused_set_increase
2045 = reg_pressure_info[cl].unused_set_increase;
2046 pressure_info[i].change = reg_pressure_info[cl].change;
2047 }
2048 }
2049
2050
1681 2051
1682 2052
1683 /* Internal variable for sched_analyze_[12] () functions. 2053 /* Internal variable for sched_analyze_[12] () functions.
1684 If it is nonzero, this means that sched_analyze_[12] looks 2054 If it is nonzero, this means that sched_analyze_[12] looks
1685 at the most toplevel SET. */ 2055 at the most toplevel SET. */
1686 static bool can_start_lhs_rhs_p; 2056 static bool can_start_lhs_rhs_p;
1687 2057
1688 /* Extend reg info for the deps context DEPS given that 2058 /* Extend reg info for the deps context DEPS given that
1689 we have just generated a register numbered REGNO. */ 2059 we have just generated a register numbered REGNO. */
1690 static void 2060 static void
1691 extend_deps_reg_info (struct deps *deps, int regno) 2061 extend_deps_reg_info (struct deps *deps, int regno)
1692 { 2062 {
1693 int max_regno = regno + 1; 2063 int max_regno = regno + 1;
1702 return; 2072 return;
1703 } 2073 }
1704 2074
1705 if (max_regno > deps->max_reg) 2075 if (max_regno > deps->max_reg)
1706 { 2076 {
1707 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last, 2077 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
1708 max_regno); 2078 max_regno);
1709 memset (&deps->reg_last[deps->max_reg], 2079 memset (&deps->reg_last[deps->max_reg],
1710 0, (max_regno - deps->max_reg) 2080 0, (max_regno - deps->max_reg)
1711 * sizeof (struct deps_reg)); 2081 * sizeof (struct deps_reg));
1712 deps->max_reg = max_regno; 2082 deps->max_reg = max_regno;
1713 } 2083 }
1714 } 2084 }
1715 2085
1799 2169
1800 /* Don't let it cross a call after scheduling if it doesn't 2170 /* Don't let it cross a call after scheduling if it doesn't
1801 already cross one. */ 2171 already cross one. */
1802 if (REG_N_CALLS_CROSSED (regno) == 0) 2172 if (REG_N_CALLS_CROSSED (regno) == 0)
1803 { 2173 {
1804 if (!deps->readonly 2174 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
1805 && ref == USE)
1806 deps->sched_before_next_call 2175 deps->sched_before_next_call
1807 = alloc_INSN_LIST (insn, deps->sched_before_next_call); 2176 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
1808 else 2177 else
1809 add_dependence_list (insn, deps->last_function_call, 1, 2178 add_dependence_list (insn, deps->last_function_call, 1,
1810 REG_DEP_ANTI); 2179 REG_DEP_ANTI);
1891 2260
1892 #ifdef STACK_REGS 2261 #ifdef STACK_REGS
1893 /* Treat all writes to a stack register as modifying the TOS. */ 2262 /* Treat all writes to a stack register as modifying the TOS. */
1894 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG) 2263 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
1895 { 2264 {
2265 int nregs;
2266
1896 /* Avoid analyzing the same register twice. */ 2267 /* Avoid analyzing the same register twice. */
1897 if (regno != FIRST_STACK_REG) 2268 if (regno != FIRST_STACK_REG)
1898 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn); 2269 sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
1899 sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn); 2270
2271 nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2272 while (--nregs >= 0)
2273 SET_HARD_REG_BIT (implicit_reg_pending_uses,
2274 FIRST_STACK_REG + nregs);
1900 } 2275 }
1901 #endif 2276 #endif
1902 } 2277 }
1903 else if (MEM_P (dest)) 2278 else if (MEM_P (dest))
1904 { 2279 {
1905 /* Writing memory. */ 2280 /* Writing memory. */
1906 rtx t = dest; 2281 rtx t = dest;
1907 2282
1908 if (sched_deps_info->use_cselib) 2283 if (sched_deps_info->use_cselib)
1909 { 2284 {
2285 enum machine_mode address_mode
2286 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2287
1910 t = shallow_copy_rtx (dest); 2288 t = shallow_copy_rtx (dest);
1911 cselib_lookup (XEXP (t, 0), Pmode, 1); 2289 cselib_lookup (XEXP (t, 0), address_mode, 1);
1912 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); 2290 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
1913 } 2291 }
1914 t = canon_rtx (t); 2292 t = canon_rtx (t);
1915 2293
1916 /* Pending lists can't get larger with a readonly context. */ 2294 /* Pending lists can't get larger with a readonly context. */
2059 rtx pending, pending_mem; 2437 rtx pending, pending_mem;
2060 rtx t = x; 2438 rtx t = x;
2061 2439
2062 if (sched_deps_info->use_cselib) 2440 if (sched_deps_info->use_cselib)
2063 { 2441 {
2442 enum machine_mode address_mode
2443 = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2444
2064 t = shallow_copy_rtx (t); 2445 t = shallow_copy_rtx (t);
2065 cselib_lookup (XEXP (t, 0), Pmode, 1); 2446 cselib_lookup (XEXP (t, 0), address_mode, 1);
2066 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); 2447 XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2067 } 2448 }
2068 t = canon_rtx (t); 2449
2069 pending = deps->pending_read_insns; 2450 if (!DEBUG_INSN_P (insn))
2070 pending_mem = deps->pending_read_mems;
2071 while (pending)
2072 { 2451 {
2073 if (read_dependence (XEXP (pending_mem, 0), t) 2452 t = canon_rtx (t);
2074 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0))) 2453 pending = deps->pending_read_insns;
2075 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0), 2454 pending_mem = deps->pending_read_mems;
2076 DEP_ANTI); 2455 while (pending)
2077
2078 pending = XEXP (pending, 1);
2079 pending_mem = XEXP (pending_mem, 1);
2080 }
2081
2082 pending = deps->pending_write_insns;
2083 pending_mem = deps->pending_write_mems;
2084 while (pending)
2085 {
2086 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2087 t, rtx_varies_p)
2088 && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2089 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2090 sched_deps_info->generate_spec_deps
2091 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2092
2093 pending = XEXP (pending, 1);
2094 pending_mem = XEXP (pending_mem, 1);
2095 }
2096
2097 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2098 {
2099 if (! JUMP_P (XEXP (u, 0)))
2100 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2101 else if (deps_may_trap_p (x))
2102 { 2456 {
2103 if ((sched_deps_info->generate_spec_deps) 2457 if (read_dependence (XEXP (pending_mem, 0), t)
2104 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL)) 2458 && ! sched_insns_conditions_mutex_p (insn,
2459 XEXP (pending, 0)))
2460 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2461 DEP_ANTI);
2462
2463 pending = XEXP (pending, 1);
2464 pending_mem = XEXP (pending_mem, 1);
2465 }
2466
2467 pending = deps->pending_write_insns;
2468 pending_mem = deps->pending_write_mems;
2469 while (pending)
2470 {
2471 if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2472 t, rtx_varies_p)
2473 && ! sched_insns_conditions_mutex_p (insn,
2474 XEXP (pending, 0)))
2475 note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2476 sched_deps_info->generate_spec_deps
2477 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2478
2479 pending = XEXP (pending, 1);
2480 pending_mem = XEXP (pending_mem, 1);
2481 }
2482
2483 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2484 {
2485 if (! JUMP_P (XEXP (u, 0)))
2486 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2487 else if (deps_may_trap_p (x))
2105 { 2488 {
2106 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, 2489 if ((sched_deps_info->generate_spec_deps)
2107 MAX_DEP_WEAK); 2490 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2108 2491 {
2109 note_dep (XEXP (u, 0), ds); 2492 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2493 MAX_DEP_WEAK);
2494
2495 note_dep (XEXP (u, 0), ds);
2496 }
2497 else
2498 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2110 } 2499 }
2111 else
2112 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2113 } 2500 }
2114 } 2501 }
2115 2502
2116 /* Always add these dependencies to pending_reads, since 2503 /* Always add these dependencies to pending_reads, since
2117 this insn may be followed by a write. */ 2504 this insn may be followed by a write. */
2118 if (!deps->readonly) 2505 if (!deps->readonly)
2119 add_insn_mem_dependence (deps, true, insn, x); 2506 add_insn_mem_dependence (deps, true, insn, x);
2120 2507
2121 /* Take advantage of tail recursion here. */
2122 sched_analyze_2 (deps, XEXP (x, 0), insn); 2508 sched_analyze_2 (deps, XEXP (x, 0), insn);
2123 2509
2124 if (cslr_p && sched_deps_info->finish_rhs) 2510 if (cslr_p && sched_deps_info->finish_rhs)
2125 sched_deps_info->finish_rhs (); 2511 sched_deps_info->finish_rhs ();
2126 2512
2128 } 2514 }
2129 2515
2130 /* Force pending stores to memory in case a trap handler needs them. */ 2516 /* Force pending stores to memory in case a trap handler needs them. */
2131 case TRAP_IF: 2517 case TRAP_IF:
2132 flush_pending_lists (deps, insn, true, false); 2518 flush_pending_lists (deps, insn, true, false);
2519 break;
2520
2521 case PREFETCH:
2522 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2523 reg_pending_barrier = TRUE_BARRIER;
2133 break; 2524 break;
2134 2525
2135 case UNSPEC_VOLATILE: 2526 case UNSPEC_VOLATILE:
2136 flush_pending_lists (deps, insn, true, true); 2527 flush_pending_lists (deps, insn, true, true);
2137 /* FALLTHRU */ 2528 /* FALLTHRU */
2223 RTX_CODE code = GET_CODE (x); 2614 RTX_CODE code = GET_CODE (x);
2224 rtx link; 2615 rtx link;
2225 unsigned i; 2616 unsigned i;
2226 reg_set_iterator rsi; 2617 reg_set_iterator rsi;
2227 2618
2619 if (! reload_completed)
2620 {
2621 HARD_REG_SET temp;
2622
2623 extract_insn (insn);
2624 preprocess_constraints ();
2625 ira_implicitly_set_insn_hard_regs (&temp);
2626 IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2627 }
2628
2228 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn) 2629 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2229 && code == SET); 2630 && code == SET);
2631
2632 if (may_trap_p (x))
2633 /* Avoid moving trapping instructions accross function calls that might
2634 not always return. */
2635 add_dependence_list (insn, deps->last_function_call_may_noreturn,
2636 1, REG_DEP_ANTI);
2230 2637
2231 if (code == COND_EXEC) 2638 if (code == COND_EXEC)
2232 { 2639 {
2233 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn); 2640 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2234 2641
2243 2650
2244 /* Bare clobber insns are used for letting life analysis, reg-stack 2651 /* Bare clobber insns are used for letting life analysis, reg-stack
2245 and others know that a value is dead. Depend on the last call 2652 and others know that a value is dead. Depend on the last call
2246 instruction so that reg-stack won't get confused. */ 2653 instruction so that reg-stack won't get confused. */
2247 if (code == CLOBBER) 2654 if (code == CLOBBER)
2248 add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT); 2655 add_dependence_list (insn, deps->last_function_call, 1,
2656 REG_DEP_OUTPUT);
2249 } 2657 }
2250 else if (code == PARALLEL) 2658 else if (code == PARALLEL)
2251 { 2659 {
2252 for (i = XVECLEN (x, 0); i--;) 2660 for (i = XVECLEN (x, 0); i--;)
2253 { 2661 {
2285 2693
2286 if (JUMP_P (insn)) 2694 if (JUMP_P (insn))
2287 { 2695 {
2288 rtx next; 2696 rtx next;
2289 next = next_nonnote_insn (insn); 2697 next = next_nonnote_insn (insn);
2698 while (next && DEBUG_INSN_P (next))
2699 next = next_nonnote_insn (next);
2290 if (next && BARRIER_P (next)) 2700 if (next && BARRIER_P (next))
2291 reg_pending_barrier = MOVE_BARRIER; 2701 reg_pending_barrier = MOVE_BARRIER;
2292 else 2702 else
2293 { 2703 {
2294 rtx pending, pending_mem; 2704 rtx pending, pending_mem;
2304 /* Make latency of jump equal to 0 by using anti-dependence. */ 2714 /* Make latency of jump equal to 0 by using anti-dependence. */
2305 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi) 2715 EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2306 { 2716 {
2307 struct deps_reg *reg_last = &deps->reg_last[i]; 2717 struct deps_reg *reg_last = &deps->reg_last[i];
2308 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI); 2718 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2719 add_dependence_list (insn, reg_last->implicit_sets,
2720 0, REG_DEP_ANTI);
2309 add_dependence_list (insn, reg_last->clobbers, 0, 2721 add_dependence_list (insn, reg_last->clobbers, 0,
2310 REG_DEP_ANTI); 2722 REG_DEP_ANTI);
2311 2723
2312 if (!deps->readonly) 2724 if (!deps->readonly)
2313 { 2725 {
2359 add_branch_dependences should be adjusted for RGN mode instead. */ 2771 add_branch_dependences should be adjusted for RGN mode instead. */
2360 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn)) 2772 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2361 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn))) 2773 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2362 reg_pending_barrier = MOVE_BARRIER; 2774 reg_pending_barrier = MOVE_BARRIER;
2363 2775
2364 /* If the current insn is conditional, we can't free any 2776 if (sched_pressure_p)
2365 of the lists. */ 2777 {
2366 if (sched_has_condition_p (insn)) 2778 setup_insn_reg_uses (deps, insn);
2367 { 2779 setup_insn_reg_pressure_info (insn);
2780 }
2781
2782 /* Add register dependencies for insn. */
2783 if (DEBUG_INSN_P (insn))
2784 {
2785 rtx prev = deps->last_debug_insn;
2786 rtx u;
2787
2788 if (!deps->readonly)
2789 deps->last_debug_insn = insn;
2790
2791 if (prev)
2792 add_dependence (insn, prev, REG_DEP_ANTI);
2793
2794 add_dependence_list (insn, deps->last_function_call, 1,
2795 REG_DEP_ANTI);
2796
2797 for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2798 if (! JUMP_P (XEXP (u, 0))
2799 || !sel_sched_p ())
2800 add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2801
2368 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) 2802 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2369 { 2803 {
2370 struct deps_reg *reg_last = &deps->reg_last[i]; 2804 struct deps_reg *reg_last = &deps->reg_last[i];
2371 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE); 2805 add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2372 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE); 2806 add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2373 2807
2374 if (!deps->readonly) 2808 if (!deps->readonly)
2375 { 2809 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2376 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); 2810 }
2377 reg_last->uses_length++; 2811 CLEAR_REG_SET (reg_pending_uses);
2378 } 2812
2379 } 2813 /* Quite often, a debug insn will refer to stuff in the
2380 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) 2814 previous instruction, but the reason we want this
2381 { 2815 dependency here is to make sure the scheduler doesn't
2382 struct deps_reg *reg_last = &deps->reg_last[i]; 2816 gratuitously move a debug insn ahead. This could dirty
2383 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); 2817 DF flags and cause additional analysis that wouldn't have
2384 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); 2818 occurred in compilation without debug insns, and such
2385 2819 additional analysis can modify the generated code. */
2386 if (!deps->readonly) 2820 prev = PREV_INSN (insn);
2387 { 2821
2388 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers); 2822 if (prev && NONDEBUG_INSN_P (prev))
2389 reg_last->clobbers_length++; 2823 add_dependence (insn, prev, REG_DEP_ANTI);
2390 }
2391 }
2392 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2393 {
2394 struct deps_reg *reg_last = &deps->reg_last[i];
2395 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2396 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2397 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2398
2399 if (!deps->readonly)
2400 {
2401 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2402 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2403 }
2404 }
2405 } 2824 }
2406 else 2825 else
2407 { 2826 {
2408 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) 2827 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2409 { 2828 {
2410 struct deps_reg *reg_last = &deps->reg_last[i]; 2829 struct deps_reg *reg_last = &deps->reg_last[i];
2411 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE); 2830 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2412 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE); 2831 add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2413 2832 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2414 if (!deps->readonly) 2833
2415 { 2834 if (!deps->readonly)
2416 reg_last->uses_length++; 2835 {
2417 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); 2836 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2418 } 2837 reg_last->uses_length++;
2419 } 2838 }
2420 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) 2839 }
2421 { 2840
2422 struct deps_reg *reg_last = &deps->reg_last[i]; 2841 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2423 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH 2842 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2424 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH) 2843 {
2425 { 2844 struct deps_reg *reg_last = &deps->reg_last[i];
2426 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0, 2845 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2427 REG_DEP_OUTPUT); 2846 add_dependence_list (insn, reg_last->implicit_sets, 0,
2428 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0, 2847 REG_DEP_ANTI);
2429 REG_DEP_ANTI); 2848 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2430 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0, 2849
2431 REG_DEP_OUTPUT); 2850 if (!deps->readonly)
2432 2851 {
2433 if (!deps->readonly) 2852 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2434 { 2853 reg_last->uses_length++;
2435 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); 2854 }
2436 reg_last->clobbers_length = 0; 2855 }
2437 reg_last->uses_length = 0; 2856
2438 } 2857 /* If the current insn is conditional, we can't free any
2439 } 2858 of the lists. */
2440 else 2859 if (sched_has_condition_p (insn))
2441 { 2860 {
2442 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); 2861 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2443 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); 2862 {
2444 } 2863 struct deps_reg *reg_last = &deps->reg_last[i];
2445 2864 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2446 if (!deps->readonly) 2865 add_dependence_list (insn, reg_last->implicit_sets, 0,
2447 { 2866 REG_DEP_ANTI);
2448 reg_last->clobbers_length++; 2867 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2449 reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers); 2868
2450 } 2869 if (!deps->readonly)
2451 } 2870 {
2452 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi) 2871 reg_last->clobbers
2453 { 2872 = alloc_INSN_LIST (insn, reg_last->clobbers);
2454 struct deps_reg *reg_last = &deps->reg_last[i]; 2873 reg_last->clobbers_length++;
2455 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0, 2874 }
2456 REG_DEP_OUTPUT); 2875 }
2457 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0, 2876 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2458 REG_DEP_OUTPUT); 2877 {
2459 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0, 2878 struct deps_reg *reg_last = &deps->reg_last[i];
2460 REG_DEP_ANTI); 2879 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2461 2880 add_dependence_list (insn, reg_last->implicit_sets, 0,
2462 if (!deps->readonly) 2881 REG_DEP_ANTI);
2463 { 2882 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2464 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); 2883 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2465 reg_last->uses_length = 0; 2884
2466 reg_last->clobbers_length = 0; 2885 if (!deps->readonly)
2467 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i); 2886 {
2468 } 2887 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2469 } 2888 SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2470 } 2889 }
2890 }
2891 }
2892 else
2893 {
2894 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2895 {
2896 struct deps_reg *reg_last = &deps->reg_last[i];
2897 if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2898 || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2899 {
2900 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2901 REG_DEP_OUTPUT);
2902 add_dependence_list_and_free (deps, insn,
2903 &reg_last->implicit_sets, 0,
2904 REG_DEP_ANTI);
2905 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2906 REG_DEP_ANTI);
2907 add_dependence_list_and_free
2908 (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2909
2910 if (!deps->readonly)
2911 {
2912 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2913 reg_last->clobbers_length = 0;
2914 reg_last->uses_length = 0;
2915 }
2916 }
2917 else
2918 {
2919 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2920 add_dependence_list (insn, reg_last->implicit_sets, 0,
2921 REG_DEP_ANTI);
2922 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2923 }
2924
2925 if (!deps->readonly)
2926 {
2927 reg_last->clobbers_length++;
2928 reg_last->clobbers
2929 = alloc_INSN_LIST (insn, reg_last->clobbers);
2930 }
2931 }
2932 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2933 {
2934 struct deps_reg *reg_last = &deps->reg_last[i];
2935
2936 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2937 REG_DEP_OUTPUT);
2938 add_dependence_list_and_free (deps, insn,
2939 &reg_last->implicit_sets,
2940 0, REG_DEP_ANTI);
2941 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2942 REG_DEP_OUTPUT);
2943 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2944 REG_DEP_ANTI);
2945
2946 if (!deps->readonly)
2947 {
2948 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2949 reg_last->uses_length = 0;
2950 reg_last->clobbers_length = 0;
2951 CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2952 }
2953 }
2954 }
2955 }
2956
2957 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2958 if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2959 {
2960 struct deps_reg *reg_last = &deps->reg_last[i];
2961 add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2962 add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2963 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2964
2965 if (!deps->readonly)
2966 reg_last->implicit_sets
2967 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2968 }
2471 2969
2472 if (!deps->readonly) 2970 if (!deps->readonly)
2473 { 2971 {
2474 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses); 2972 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2475 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers); 2973 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2476 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets); 2974 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2975 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2976 if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2977 || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2978 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2477 2979
2478 /* Set up the pending barrier found. */ 2980 /* Set up the pending barrier found. */
2479 deps->last_reg_pending_barrier = reg_pending_barrier; 2981 deps->last_reg_pending_barrier = reg_pending_barrier;
2480 } 2982 }
2481 2983
2482 CLEAR_REG_SET (reg_pending_uses); 2984 CLEAR_REG_SET (reg_pending_uses);
2483 CLEAR_REG_SET (reg_pending_clobbers); 2985 CLEAR_REG_SET (reg_pending_clobbers);
2484 CLEAR_REG_SET (reg_pending_sets); 2986 CLEAR_REG_SET (reg_pending_sets);
2987 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2988 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2485 2989
2486 /* Add dependencies if a scheduling barrier was found. */ 2990 /* Add dependencies if a scheduling barrier was found. */
2487 if (reg_pending_barrier) 2991 if (reg_pending_barrier)
2488 { 2992 {
2489 /* In the case of barrier the most added dependencies are not 2993 /* In the case of barrier the most added dependencies are not
2492 { 2996 {
2493 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi) 2997 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2494 { 2998 {
2495 struct deps_reg *reg_last = &deps->reg_last[i]; 2999 struct deps_reg *reg_last = &deps->reg_last[i];
2496 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); 3000 add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2497 add_dependence_list 3001 add_dependence_list (insn, reg_last->sets, 0,
2498 (insn, reg_last->sets, 0, 3002 reg_pending_barrier == TRUE_BARRIER
2499 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); 3003 ? REG_DEP_TRUE : REG_DEP_ANTI);
2500 add_dependence_list 3004 add_dependence_list (insn, reg_last->implicit_sets, 0,
2501 (insn, reg_last->clobbers, 0, 3005 REG_DEP_ANTI);
2502 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); 3006 add_dependence_list (insn, reg_last->clobbers, 0,
3007 reg_pending_barrier == TRUE_BARRIER
3008 ? REG_DEP_TRUE : REG_DEP_ANTI);
2503 } 3009 }
2504 } 3010 }
2505 else 3011 else
2506 { 3012 {
2507 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi) 3013 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2508 { 3014 {
2509 struct deps_reg *reg_last = &deps->reg_last[i]; 3015 struct deps_reg *reg_last = &deps->reg_last[i];
2510 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0, 3016 add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2511 REG_DEP_ANTI); 3017 REG_DEP_ANTI);
2512 add_dependence_list_and_free 3018 add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2513 (deps, insn, &reg_last->sets, 0, 3019 reg_pending_barrier == TRUE_BARRIER
2514 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); 3020 ? REG_DEP_TRUE : REG_DEP_ANTI);
2515 add_dependence_list_and_free 3021 add_dependence_list_and_free (deps, insn,
2516 (deps, insn, &reg_last->clobbers, 0, 3022 &reg_last->implicit_sets, 0,
2517 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); 3023 REG_DEP_ANTI);
3024 add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3025 reg_pending_barrier == TRUE_BARRIER
3026 ? REG_DEP_TRUE : REG_DEP_ANTI);
2518 3027
2519 if (!deps->readonly) 3028 if (!deps->readonly)
2520 { 3029 {
2521 reg_last->uses_length = 0; 3030 reg_last->uses_length = 0;
2522 reg_last->clobbers_length = 0; 3031 reg_last->clobbers_length = 0;
2531 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); 3040 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2532 SET_REGNO_REG_SET (&deps->reg_last_in_use, i); 3041 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2533 } 3042 }
2534 3043
2535 /* Flush pending lists on jumps, but not on speculative checks. */ 3044 /* Flush pending lists on jumps, but not on speculative checks. */
2536 if (JUMP_P (insn) && !(sel_sched_p () 3045 if (JUMP_P (insn) && !(sel_sched_p ()
2537 && sel_insn_is_speculation_check (insn))) 3046 && sel_insn_is_speculation_check (insn)))
2538 flush_pending_lists (deps, insn, true, true); 3047 flush_pending_lists (deps, insn, true, true);
2539 3048
2540 if (!deps->readonly) 3049 if (!deps->readonly)
2541 CLEAR_REG_SET (&deps->reg_conditional_sets); 3050 CLEAR_REG_SET (&deps->reg_conditional_sets);
2542 reg_pending_barrier = NOT_A_BARRIER; 3051 reg_pending_barrier = NOT_A_BARRIER;
2543 } 3052 }
2544 3053
2555 { 3064 {
2556 rtx tmp, set = single_set (insn); 3065 rtx tmp, set = single_set (insn);
2557 int src_regno, dest_regno; 3066 int src_regno, dest_regno;
2558 3067
2559 if (set == NULL) 3068 if (set == NULL)
2560 goto end_call_group; 3069 {
3070 if (DEBUG_INSN_P (insn))
3071 /* We don't want to mark debug insns as part of the same
3072 sched group. We know they really aren't, but if we use
3073 debug insns to tell that a call group is over, we'll
3074 get different code if debug insns are not there and
3075 instructions that follow seem like they should be part
3076 of the call group.
3077
3078 Also, if we did, fixup_sched_groups() would move the
3079 deps of the debug insn to the call insn, modifying
3080 non-debug post-dependency counts of the debug insn
3081 dependencies and otherwise messing with the scheduling
3082 order.
3083
3084 Instead, let such debug insns be scheduled freely, but
3085 keep the call group open in case there are insns that
3086 should be part of it afterwards. Since we grant debug
3087 insns higher priority than even sched group insns, it
3088 will all turn out all right. */
3089 goto debug_dont_end_call_group;
3090 else
3091 goto end_call_group;
3092 }
2561 3093
2562 tmp = SET_DEST (set); 3094 tmp = SET_DEST (set);
2563 if (GET_CODE (tmp) == SUBREG) 3095 if (GET_CODE (tmp) == SUBREG)
2564 tmp = SUBREG_REG (tmp); 3096 tmp = SUBREG_REG (tmp);
2565 if (REG_P (tmp)) 3097 if (REG_P (tmp))
2586 { 3118 {
2587 if (!deps->readonly 3119 if (!deps->readonly
2588 && deps->in_post_call_group_p == post_call_initial) 3120 && deps->in_post_call_group_p == post_call_initial)
2589 deps->in_post_call_group_p = post_call; 3121 deps->in_post_call_group_p = post_call;
2590 3122
2591 if (!sel_sched_p () || sched_emulate_haifa_p) 3123 if (!sel_sched_p () || sched_emulate_haifa_p)
2592 { 3124 {
2593 SCHED_GROUP_P (insn) = 1; 3125 SCHED_GROUP_P (insn) = 1;
2594 CANT_MOVE (insn) = 1; 3126 CANT_MOVE (insn) = 1;
2595 } 3127 }
2596 } 3128 }
2600 if (!deps->readonly) 3132 if (!deps->readonly)
2601 deps->in_post_call_group_p = not_post_call; 3133 deps->in_post_call_group_p = not_post_call;
2602 } 3134 }
2603 } 3135 }
2604 3136
3137 debug_dont_end_call_group:
2605 if ((current_sched_info->flags & DO_SPECULATION) 3138 if ((current_sched_info->flags & DO_SPECULATION)
2606 && !sched_insn_is_legitimate_for_speculation_p (insn, 0)) 3139 && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
2607 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot 3140 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
2608 be speculated. */ 3141 be speculated. */
2609 { 3142 {
2611 sel_mark_hard_insn (insn); 3144 sel_mark_hard_insn (insn);
2612 else 3145 else
2613 { 3146 {
2614 sd_iterator_def sd_it; 3147 sd_iterator_def sd_it;
2615 dep_t dep; 3148 dep_t dep;
2616 3149
2617 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); 3150 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
2618 sd_iterator_cond (&sd_it, &dep);) 3151 sd_iterator_cond (&sd_it, &dep);)
2619 change_spec_dep_to_hard (sd_it); 3152 change_spec_dep_to_hard (sd_it);
2620 } 3153 }
2621 } 3154 }
2622 } 3155 }
2623 3156
3157 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3158 longjmp, loop forever, ...). */
3159 static bool
3160 call_may_noreturn_p (rtx insn)
3161 {
3162 rtx call;
3163
3164 /* const or pure calls that aren't looping will always return. */
3165 if (RTL_CONST_OR_PURE_CALL_P (insn)
3166 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3167 return false;
3168
3169 call = PATTERN (insn);
3170 if (GET_CODE (call) == PARALLEL)
3171 call = XVECEXP (call, 0, 0);
3172 if (GET_CODE (call) == SET)
3173 call = SET_SRC (call);
3174 if (GET_CODE (call) == CALL
3175 && MEM_P (XEXP (call, 0))
3176 && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3177 {
3178 rtx symbol = XEXP (XEXP (call, 0), 0);
3179 if (SYMBOL_REF_DECL (symbol)
3180 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3181 {
3182 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3183 == BUILT_IN_NORMAL)
3184 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3185 {
3186 case BUILT_IN_BCMP:
3187 case BUILT_IN_BCOPY:
3188 case BUILT_IN_BZERO:
3189 case BUILT_IN_INDEX:
3190 case BUILT_IN_MEMCHR:
3191 case BUILT_IN_MEMCMP:
3192 case BUILT_IN_MEMCPY:
3193 case BUILT_IN_MEMMOVE:
3194 case BUILT_IN_MEMPCPY:
3195 case BUILT_IN_MEMSET:
3196 case BUILT_IN_RINDEX:
3197 case BUILT_IN_STPCPY:
3198 case BUILT_IN_STPNCPY:
3199 case BUILT_IN_STRCAT:
3200 case BUILT_IN_STRCHR:
3201 case BUILT_IN_STRCMP:
3202 case BUILT_IN_STRCPY:
3203 case BUILT_IN_STRCSPN:
3204 case BUILT_IN_STRLEN:
3205 case BUILT_IN_STRNCAT:
3206 case BUILT_IN_STRNCMP:
3207 case BUILT_IN_STRNCPY:
3208 case BUILT_IN_STRPBRK:
3209 case BUILT_IN_STRRCHR:
3210 case BUILT_IN_STRSPN:
3211 case BUILT_IN_STRSTR:
3212 /* Assume certain string/memory builtins always return. */
3213 return false;
3214 default:
3215 break;
3216 }
3217 }
3218 }
3219
3220 /* For all other calls assume that they might not always return. */
3221 return true;
3222 }
3223
2624 /* Analyze INSN with DEPS as a context. */ 3224 /* Analyze INSN with DEPS as a context. */
2625 void 3225 void
2626 deps_analyze_insn (struct deps *deps, rtx insn) 3226 deps_analyze_insn (struct deps *deps, rtx insn)
2627 { 3227 {
2628 if (sched_deps_info->start_insn) 3228 if (sched_deps_info->start_insn)
2629 sched_deps_info->start_insn (insn); 3229 sched_deps_info->start_insn (insn);
2630 3230
2631 if (NONJUMP_INSN_P (insn) || JUMP_P (insn)) 3231 if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
2632 { 3232 {
2633 /* Make each JUMP_INSN (but not a speculative check) 3233 /* Make each JUMP_INSN (but not a speculative check)
2634 a scheduling barrier for memory references. */ 3234 a scheduling barrier for memory references. */
2635 if (!deps->readonly 3235 if (!deps->readonly
2636 && JUMP_P (insn) 3236 && JUMP_P (insn)
2637 && !(sel_sched_p () 3237 && !(sel_sched_p ()
2638 && sel_insn_is_speculation_check (insn))) 3238 && sel_insn_is_speculation_check (insn)))
2639 { 3239 {
2640 /* Keep the list a reasonable size. */ 3240 /* Keep the list a reasonable size. */
2641 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH) 3241 if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
2642 flush_pending_lists (deps, insn, true, true); 3242 flush_pending_lists (deps, insn, true, true);
2664 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) 3264 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2665 /* A call may read and modify global register variables. */ 3265 /* A call may read and modify global register variables. */
2666 if (global_regs[i]) 3266 if (global_regs[i])
2667 { 3267 {
2668 SET_REGNO_REG_SET (reg_pending_sets, i); 3268 SET_REGNO_REG_SET (reg_pending_sets, i);
2669 SET_REGNO_REG_SET (reg_pending_uses, i); 3269 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
2670 } 3270 }
2671 /* Other call-clobbered hard regs may be clobbered. 3271 /* Other call-clobbered hard regs may be clobbered.
2672 Since we only have a choice between 'might be clobbered' 3272 Since we only have a choice between 'might be clobbered'
2673 and 'definitely not clobbered', we must include all 3273 and 'definitely not clobbered', we must include all
2674 partly call-clobbered registers here. */ 3274 partly call-clobbered registers here. */
2677 SET_REGNO_REG_SET (reg_pending_clobbers, i); 3277 SET_REGNO_REG_SET (reg_pending_clobbers, i);
2678 /* We don't know what set of fixed registers might be used 3278 /* We don't know what set of fixed registers might be used
2679 by the function, but it is certain that the stack pointer 3279 by the function, but it is certain that the stack pointer
2680 is among them, but be conservative. */ 3280 is among them, but be conservative. */
2681 else if (fixed_regs[i]) 3281 else if (fixed_regs[i])
2682 SET_REGNO_REG_SET (reg_pending_uses, i); 3282 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
2683 /* The frame pointer is normally not used by the function 3283 /* The frame pointer is normally not used by the function
2684 itself, but by the debugger. */ 3284 itself, but by the debugger. */
2685 /* ??? MIPS o32 is an exception. It uses the frame pointer 3285 /* ??? MIPS o32 is an exception. It uses the frame pointer
2686 in the macro expansion of jal but does not represent this 3286 in the macro expansion of jal but does not represent this
2687 fact in the call_insn rtl. */ 3287 fact in the call_insn rtl. */
2688 else if (i == FRAME_POINTER_REGNUM 3288 else if (i == FRAME_POINTER_REGNUM
2689 || (i == HARD_FRAME_POINTER_REGNUM 3289 || (i == HARD_FRAME_POINTER_REGNUM
2690 && (! reload_completed || frame_pointer_needed))) 3290 && (! reload_completed || frame_pointer_needed)))
2691 SET_REGNO_REG_SET (reg_pending_uses, i); 3291 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
2692 } 3292 }
2693 3293
2694 /* For each insn which shouldn't cross a call, add a dependence 3294 /* For each insn which shouldn't cross a call, add a dependence
2695 between that insn and this call insn. */ 3295 between that insn and this call insn. */
2696 add_dependence_list_and_free (deps, insn, 3296 add_dependence_list_and_free (deps, insn,
2697 &deps->sched_before_next_call, 1, 3297 &deps->sched_before_next_call, 1,
2698 REG_DEP_ANTI); 3298 REG_DEP_ANTI);
2699 3299
2700 sched_analyze_insn (deps, PATTERN (insn), insn); 3300 sched_analyze_insn (deps, PATTERN (insn), insn);
2701 3301
2717 if (!deps->readonly) 3317 if (!deps->readonly)
2718 { 3318 {
2719 /* Remember the last function call for limiting lifetimes. */ 3319 /* Remember the last function call for limiting lifetimes. */
2720 free_INSN_LIST_list (&deps->last_function_call); 3320 free_INSN_LIST_list (&deps->last_function_call);
2721 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX); 3321 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
2722 3322
3323 if (call_may_noreturn_p (insn))
3324 {
3325 /* Remember the last function call that might not always return
3326 normally for limiting moves of trapping insns. */
3327 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3328 deps->last_function_call_may_noreturn
3329 = alloc_INSN_LIST (insn, NULL_RTX);
3330 }
3331
2723 /* Before reload, begin a post-call group, so as to keep the 3332 /* Before reload, begin a post-call group, so as to keep the
2724 lifetimes of hard registers correct. */ 3333 lifetimes of hard registers correct. */
2725 if (! reload_completed) 3334 if (! reload_completed)
2726 deps->in_post_call_group_p = post_call; 3335 deps->in_post_call_group_p = post_call;
2727 } 3336 }
2738 3347
2739 if (sched_deps_info->finish_insn) 3348 if (sched_deps_info->finish_insn)
2740 sched_deps_info->finish_insn (); 3349 sched_deps_info->finish_insn ();
2741 3350
2742 /* Fixup the dependencies in the sched group. */ 3351 /* Fixup the dependencies in the sched group. */
2743 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn)) 3352 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
2744 && SCHED_GROUP_P (insn) && !sel_sched_p ()) 3353 && SCHED_GROUP_P (insn) && !sel_sched_p ())
2745 fixup_sched_groups (insn); 3354 fixup_sched_groups (insn);
2746 } 3355 }
2747 3356
2748 /* Initialize DEPS for the new block beginning with HEAD. */ 3357 /* Initialize DEPS for the new block beginning with HEAD. */
2756 hard registers correct. */ 3365 hard registers correct. */
2757 if (! reload_completed && !LABEL_P (head)) 3366 if (! reload_completed && !LABEL_P (head))
2758 { 3367 {
2759 rtx insn = prev_nonnote_insn (head); 3368 rtx insn = prev_nonnote_insn (head);
2760 3369
3370 while (insn && DEBUG_INSN_P (insn))
3371 insn = prev_nonnote_insn (insn);
2761 if (insn && CALL_P (insn)) 3372 if (insn && CALL_P (insn))
2762 deps->in_post_call_group_p = post_call_initial; 3373 deps->in_post_call_group_p = post_call_initial;
2763 } 3374 }
2764 } 3375 }
2765 3376
2848 sd_finish_insn (insn); 3459 sd_finish_insn (insn);
2849 } 3460 }
2850 } 3461 }
2851 3462
2852 /* Initialize variables for region data dependence analysis. 3463 /* Initialize variables for region data dependence analysis.
2853 n_bbs is the number of region blocks. */ 3464 When LAZY_REG_LAST is true, do not allocate reg_last array
3465 of struct deps immediately. */
2854 3466
2855 void 3467 void
2856 init_deps (struct deps *deps) 3468 init_deps (struct deps *deps, bool lazy_reg_last)
2857 { 3469 {
2858 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ()); 3470 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
2859 3471
2860 deps->max_reg = max_reg; 3472 deps->max_reg = max_reg;
2861 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg); 3473 if (lazy_reg_last)
3474 deps->reg_last = NULL;
3475 else
3476 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
2862 INIT_REG_SET (&deps->reg_last_in_use); 3477 INIT_REG_SET (&deps->reg_last_in_use);
2863 INIT_REG_SET (&deps->reg_conditional_sets); 3478 INIT_REG_SET (&deps->reg_conditional_sets);
2864 3479
2865 deps->pending_read_insns = 0; 3480 deps->pending_read_insns = 0;
2866 deps->pending_read_mems = 0; 3481 deps->pending_read_mems = 0;
2869 deps->pending_read_list_length = 0; 3484 deps->pending_read_list_length = 0;
2870 deps->pending_write_list_length = 0; 3485 deps->pending_write_list_length = 0;
2871 deps->pending_flush_length = 0; 3486 deps->pending_flush_length = 0;
2872 deps->last_pending_memory_flush = 0; 3487 deps->last_pending_memory_flush = 0;
2873 deps->last_function_call = 0; 3488 deps->last_function_call = 0;
3489 deps->last_function_call_may_noreturn = 0;
2874 deps->sched_before_next_call = 0; 3490 deps->sched_before_next_call = 0;
2875 deps->in_post_call_group_p = not_post_call; 3491 deps->in_post_call_group_p = not_post_call;
3492 deps->last_debug_insn = 0;
2876 deps->last_reg_pending_barrier = NOT_A_BARRIER; 3493 deps->last_reg_pending_barrier = NOT_A_BARRIER;
2877 deps->readonly = 0; 3494 deps->readonly = 0;
2878 } 3495 }
2879 3496
3497 /* Init only reg_last field of DEPS, which was not allocated before as
3498 we inited DEPS lazily. */
3499 void
3500 init_deps_reg_last (struct deps *deps)
3501 {
3502 gcc_assert (deps && deps->max_reg > 0);
3503 gcc_assert (deps->reg_last == NULL);
3504
3505 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3506 }
3507
3508
2880 /* Free insn lists found in DEPS. */ 3509 /* Free insn lists found in DEPS. */
2881 3510
2882 void 3511 void
2883 free_deps (struct deps *deps) 3512 free_deps (struct deps *deps)
2884 { 3513 {
2885 unsigned i; 3514 unsigned i;
2886 reg_set_iterator rsi; 3515 reg_set_iterator rsi;
3516
3517 /* We set max_reg to 0 when this context was already freed. */
3518 if (deps->max_reg == 0)
3519 {
3520 gcc_assert (deps->reg_last == NULL);
3521 return;
3522 }
3523 deps->max_reg = 0;
2887 3524
2888 free_INSN_LIST_list (&deps->pending_read_insns); 3525 free_INSN_LIST_list (&deps->pending_read_insns);
2889 free_EXPR_LIST_list (&deps->pending_read_mems); 3526 free_EXPR_LIST_list (&deps->pending_read_mems);
2890 free_INSN_LIST_list (&deps->pending_write_insns); 3527 free_INSN_LIST_list (&deps->pending_write_insns);
2891 free_EXPR_LIST_list (&deps->pending_write_mems); 3528 free_EXPR_LIST_list (&deps->pending_write_mems);
2899 struct deps_reg *reg_last = &deps->reg_last[i]; 3536 struct deps_reg *reg_last = &deps->reg_last[i];
2900 if (reg_last->uses) 3537 if (reg_last->uses)
2901 free_INSN_LIST_list (&reg_last->uses); 3538 free_INSN_LIST_list (&reg_last->uses);
2902 if (reg_last->sets) 3539 if (reg_last->sets)
2903 free_INSN_LIST_list (&reg_last->sets); 3540 free_INSN_LIST_list (&reg_last->sets);
3541 if (reg_last->implicit_sets)
3542 free_INSN_LIST_list (&reg_last->implicit_sets);
2904 if (reg_last->clobbers) 3543 if (reg_last->clobbers)
2905 free_INSN_LIST_list (&reg_last->clobbers); 3544 free_INSN_LIST_list (&reg_last->clobbers);
2906 } 3545 }
2907 CLEAR_REG_SET (&deps->reg_last_in_use); 3546 CLEAR_REG_SET (&deps->reg_last_in_use);
2908 CLEAR_REG_SET (&deps->reg_conditional_sets); 3547 CLEAR_REG_SET (&deps->reg_conditional_sets);
2909 3548
2910 free (deps->reg_last); 3549 /* As we initialize reg_last lazily, it is possible that we didn't allocate
3550 it at all. */
3551 if (deps->reg_last)
3552 free (deps->reg_last);
2911 deps->reg_last = NULL; 3553 deps->reg_last = NULL;
2912 3554
2913 deps = NULL; 3555 deps = NULL;
2914 } 3556 }
2915 3557
2919 remove_from_deps (struct deps *deps, rtx insn) 3561 remove_from_deps (struct deps *deps, rtx insn)
2920 { 3562 {
2921 int removed; 3563 int removed;
2922 unsigned i; 3564 unsigned i;
2923 reg_set_iterator rsi; 3565 reg_set_iterator rsi;
2924 3566
2925 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns, 3567 removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
2926 &deps->pending_read_mems); 3568 &deps->pending_read_mems);
2927 deps->pending_read_list_length -= removed; 3569 if (!DEBUG_INSN_P (insn))
3570 deps->pending_read_list_length -= removed;
2928 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns, 3571 removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
2929 &deps->pending_write_mems); 3572 &deps->pending_write_mems);
2930 deps->pending_write_list_length -= removed; 3573 deps->pending_write_list_length -= removed;
2931 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush); 3574 removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
2932 deps->pending_flush_length -= removed; 3575 deps->pending_flush_length -= removed;
2936 struct deps_reg *reg_last = &deps->reg_last[i]; 3579 struct deps_reg *reg_last = &deps->reg_last[i];
2937 if (reg_last->uses) 3580 if (reg_last->uses)
2938 remove_from_dependence_list (insn, &reg_last->uses); 3581 remove_from_dependence_list (insn, &reg_last->uses);
2939 if (reg_last->sets) 3582 if (reg_last->sets)
2940 remove_from_dependence_list (insn, &reg_last->sets); 3583 remove_from_dependence_list (insn, &reg_last->sets);
3584 if (reg_last->implicit_sets)
3585 remove_from_dependence_list (insn, &reg_last->implicit_sets);
2941 if (reg_last->clobbers) 3586 if (reg_last->clobbers)
2942 remove_from_dependence_list (insn, &reg_last->clobbers); 3587 remove_from_dependence_list (insn, &reg_last->clobbers);
2943 if (!reg_last->uses && !reg_last->sets && !reg_last->clobbers) 3588 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3589 && !reg_last->clobbers)
2944 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i); 3590 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
2945 } 3591 }
2946 3592
2947 if (CALL_P (insn)) 3593 if (CALL_P (insn))
2948 remove_from_dependence_list (insn, &deps->last_function_call); 3594 {
3595 remove_from_dependence_list (insn, &deps->last_function_call);
3596 remove_from_dependence_list (insn,
3597 &deps->last_function_call_may_noreturn);
3598 }
2949 remove_from_dependence_list (insn, &deps->sched_before_next_call); 3599 remove_from_dependence_list (insn, &deps->sched_before_next_call);
2950 } 3600 }
2951 3601
2952 /* Init deps data vector. */ 3602 /* Init deps data vector. */
2953 static void 3603 static void
2954 init_deps_data_vector (void) 3604 init_deps_data_vector (void)
2955 { 3605 {
2956 int reserve = (sched_max_luid + 1 3606 int reserve = (sched_max_luid + 1
2957 - VEC_length (haifa_deps_insn_data_def, h_d_i_d)); 3607 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
2958 if (reserve > 0 3608 if (reserve > 0
2959 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve)) 3609 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
2960 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d, 3610 VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
2961 3 * sched_max_luid / 2); 3611 3 * sched_max_luid / 2);
2962 } 3612 }
2963 3613
2969 /* Average number of insns in the basic block. 3619 /* Average number of insns in the basic block.
2970 '+ 1' is used to make it nonzero. */ 3620 '+ 1' is used to make it nonzero. */
2971 int insns_in_block = sched_max_luid / n_basic_blocks + 1; 3621 int insns_in_block = sched_max_luid / n_basic_blocks + 1;
2972 3622
2973 init_deps_data_vector (); 3623 init_deps_data_vector ();
2974 3624
2975 /* We use another caching mechanism for selective scheduling, so 3625 /* We use another caching mechanism for selective scheduling, so
2976 we don't use this one. */ 3626 we don't use this one. */
2977 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5) 3627 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
2978 { 3628 {
2979 /* ?!? We could save some memory by computing a per-region luid mapping 3629 /* ?!? We could save some memory by computing a per-region luid mapping
2980 which could reduce both the number of vectors in the cache and the 3630 which could reduce both the number of vectors in the cache and the
2984 what we consider "very high". */ 3634 what we consider "very high". */
2985 cache_size = 0; 3635 cache_size = 0;
2986 extend_dependency_caches (sched_max_luid, true); 3636 extend_dependency_caches (sched_max_luid, true);
2987 } 3637 }
2988 3638
2989 if (global_p) 3639 if (global_p)
2990 { 3640 {
2991 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list), 3641 dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
2992 /* Allocate lists for one block at a time. */ 3642 /* Allocate lists for one block at a time. */
2993 insns_in_block); 3643 insns_in_block);
2994 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node), 3644 dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3042 free_alloc_pool_if_empty (&dl_pool); 3692 free_alloc_pool_if_empty (&dl_pool);
3043 gcc_assert (dn_pool == NULL && dl_pool == NULL); 3693 gcc_assert (dn_pool == NULL && dl_pool == NULL);
3044 3694
3045 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d); 3695 VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3046 cache_size = 0; 3696 cache_size = 0;
3047 3697
3048 if (true_dependency_cache) 3698 if (true_dependency_cache)
3049 { 3699 {
3050 int i; 3700 int i;
3051 3701
3052 for (i = 0; i < cache_size; i++) 3702 for (i = 0; i < cache_size; i++)
3078 code. */ 3728 code. */
3079 3729
3080 void 3730 void
3081 init_deps_global (void) 3731 init_deps_global (void)
3082 { 3732 {
3733 CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3734 CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3083 reg_pending_sets = ALLOC_REG_SET (&reg_obstack); 3735 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3084 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack); 3736 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3085 reg_pending_uses = ALLOC_REG_SET (&reg_obstack); 3737 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3086 reg_pending_barrier = NOT_A_BARRIER; 3738 reg_pending_barrier = NOT_A_BARRIER;
3087 3739
3161 internal = cur_insn != NULL; 3813 internal = cur_insn != NULL;
3162 if (internal) 3814 if (internal)
3163 gcc_assert (insn == cur_insn); 3815 gcc_assert (insn == cur_insn);
3164 else 3816 else
3165 cur_insn = insn; 3817 cur_insn = insn;
3166 3818
3167 note_dep (elem, ds); 3819 note_dep (elem, ds);
3168 if (!internal) 3820 if (!internal)
3169 cur_insn = NULL; 3821 cur_insn = NULL;
3170 } 3822 }
3171 3823
3451 /* Check that dependence type contains the same bits as the status. */ 4103 /* Check that dependence type contains the same bits as the status. */
3452 if (dt == REG_DEP_TRUE) 4104 if (dt == REG_DEP_TRUE)
3453 gcc_assert (ds & DEP_TRUE); 4105 gcc_assert (ds & DEP_TRUE);
3454 else if (dt == REG_DEP_OUTPUT) 4106 else if (dt == REG_DEP_OUTPUT)
3455 gcc_assert ((ds & DEP_OUTPUT) 4107 gcc_assert ((ds & DEP_OUTPUT)
3456 && !(ds & DEP_TRUE)); 4108 && !(ds & DEP_TRUE));
3457 else 4109 else
3458 gcc_assert ((dt == REG_DEP_ANTI) 4110 gcc_assert ((dt == REG_DEP_ANTI)
3459 && (ds & DEP_ANTI) 4111 && (ds & DEP_ANTI)
3460 && !(ds & (DEP_OUTPUT | DEP_TRUE))); 4112 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
3461 4113
3462 /* HARD_DEP can not appear in dep_status of a link. */ 4114 /* HARD_DEP can not appear in dep_status of a link. */
3463 gcc_assert (!(ds & HARD_DEP)); 4115 gcc_assert (!(ds & HARD_DEP));
3464 4116
3465 /* Check that dependence status is set correctly when speculation is not 4117 /* Check that dependence status is set correctly when speculation is not
3466 supported. */ 4118 supported. */
3467 if (!sched_deps_info->generate_spec_deps) 4119 if (!sched_deps_info->generate_spec_deps)
3468 gcc_assert (!(ds & SPECULATIVE)); 4120 gcc_assert (!(ds & SPECULATIVE));
3500 else 4152 else
3501 { 4153 {
3502 /* Subsequent speculations should resolve true dependencies. */ 4154 /* Subsequent speculations should resolve true dependencies. */
3503 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE); 4155 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
3504 } 4156 }
3505 4157
3506 /* Check that true and anti dependencies can't have other speculative 4158 /* Check that true and anti dependencies can't have other speculative
3507 statuses. */ 4159 statuses. */
3508 if (ds & DEP_TRUE) 4160 if (ds & DEP_TRUE)
3509 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC)); 4161 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
3510 /* An output dependence can't be speculative at all. */ 4162 /* An output dependence can't be speculative at all. */
3511 gcc_assert (!(ds & DEP_OUTPUT)); 4163 gcc_assert (!(ds & DEP_OUTPUT));