Mercurial > hg > CbC > CbC_gcc
diff gcc/sched-deps.c @ 55:77e2b8dfacca gcc-4.4.5
update it from 4.4.3 to 4.5.0
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 12 Feb 2010 23:39:51 +0900 |
parents | a06113de4d67 |
children | b7f97abdc517 |
line wrap: on
line diff
--- a/gcc/sched-deps.c Sun Feb 07 18:28:00 2010 +0900 +++ b/gcc/sched-deps.c Fri Feb 12 23:39:51 2010 +0900 @@ -1,7 +1,7 @@ /* Instruction scheduling pass. This file computes dependencies between instructions. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, and currently maintained by, Jim Wilson (wilson@cygnus.com) @@ -41,6 +41,8 @@ #include "sched-int.h" #include "params.h" #include "cselib.h" +#include "ira.h" +#include "target.h" #ifdef INSN_SCHEDULING @@ -210,6 +212,16 @@ fprintf (stderr, "\n"); } +/* Determine whether DEP is a dependency link of a non-debug insn on a + debug insn. */ + +static inline bool +depl_on_debug_p (dep_link_t dep) +{ + return (DEBUG_INSN_P (DEP_LINK_PRO (dep)) + && !DEBUG_INSN_P (DEP_LINK_CON (dep))); +} + /* Functions to operate with a single link from the dependencies lists - dep_link_t. */ @@ -245,7 +257,9 @@ { attach_dep_link (link, &DEPS_LIST_FIRST (l)); - ++DEPS_LIST_N_LINKS (l); + /* Don't count debug deps. */ + if (!depl_on_debug_p (link)) + ++DEPS_LIST_N_LINKS (l); } /* Detach dep_link L from the list. */ @@ -270,7 +284,9 @@ { detach_dep_link (link); - --DEPS_LIST_N_LINKS (list); + /* Don't count debug deps. */ + if (!depl_on_debug_p (link)) + --DEPS_LIST_N_LINKS (list); } /* Move link LINK from list FROM to list TO. */ @@ -396,6 +412,15 @@ static regset reg_pending_uses; static enum reg_pending_barrier_mode reg_pending_barrier; +/* Hard registers implicitly clobbered or used (or may be implicitly + clobbered or used) by the currently analyzed insn. For example, + insn in its constraint has one register class. Even if there is + currently no hard register in the insn, the particular hard + register will be in the insn after reload pass because the + constraint requires it. */ +static HARD_REG_SET implicit_reg_pending_clobbers; +static HARD_REG_SET implicit_reg_pending_uses; + /* To speed up the test for duplicate dependency links we keep a record of dependencies created by add_dependence when the average number of instructions in a basic block is very large. @@ -417,8 +442,8 @@ static int deps_may_trap_p (const_rtx); static void add_dependence_list (rtx, rtx, int, enum reg_note); -static void add_dependence_list_and_free (struct deps *, rtx, - rtx *, int, enum reg_note); +static void add_dependence_list_and_free (struct deps *, rtx, + rtx *, int, enum reg_note); static void delete_all_dependences (rtx); static void fixup_sched_groups (rtx); @@ -650,17 +675,29 @@ bool resolved_p; sd_next_list (insn, &list_types, &list, &resolved_p); - size += DEPS_LIST_N_LINKS (list); + if (list) + size += DEPS_LIST_N_LINKS (list); } return size; } /* Return true if INSN's lists defined by LIST_TYPES are all empty. */ + bool sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types) { - return sd_lists_size (insn, list_types) == 0; + while (list_types != SD_LIST_NONE) + { + deps_list_t list; + bool resolved_p; + + sd_next_list (insn, &list_types, &list, &resolved_p); + if (!deps_list_empty_p (list)) + return false; + } + + return true; } /* Initialize data for INSN. */ @@ -673,6 +710,9 @@ INSN_FORW_DEPS (insn) = create_deps_list (); INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list (); + if (DEBUG_INSN_P (insn)) + DEBUG_INSN_SCHED_P (insn) = TRUE; + /* ??? It would be nice to allocate dependency caches here. */ } @@ -682,6 +722,12 @@ { /* ??? It would be nice to deallocate dependency caches here. */ + if (DEBUG_INSN_P (insn)) + { + gcc_assert (DEBUG_INSN_SCHED_P (insn)); + DEBUG_INSN_SCHED_P (insn) = FALSE; + } + free_deps_list (INSN_HARD_BACK_DEPS (insn)); INSN_HARD_BACK_DEPS (insn) = NULL; @@ -833,7 +879,7 @@ && anti_dependency_cache != NULL); if (!(current_sched_info->flags & USE_DEPS_LIST)) - { + { enum reg_note present_dep_type; if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) @@ -851,9 +897,9 @@ return DEP_PRESENT; } else - { + { ds_t present_dep_types = 0; - + if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)) present_dep_types |= DEP_TRUE; if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)) @@ -959,7 +1005,7 @@ break; default: - gcc_unreachable (); + gcc_unreachable (); } } @@ -1040,7 +1086,7 @@ dw = estimate_dep_weak (mem1, mem2); ds = set_dep_weak (ds, BEGIN_DATA, dw); } - + new_status = ds_merge (dep_status, ds); } } @@ -1077,7 +1123,7 @@ gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep)) && DEP_PRO (new_dep) != DEP_CON (new_dep)); - + #ifdef ENABLE_CHECKING check_dep (new_dep, mem1 != NULL); #endif @@ -1138,7 +1184,7 @@ } sd_add_dep (new_dep, resolved_p); - + return DEP_CREATED; } @@ -1352,11 +1398,11 @@ } } -/* Similar, but free *LISTP at the same time, when the context +/* Similar, but free *LISTP at the same time, when the context is not readonly. */ static void -add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp, +add_dependence_list_and_free (struct deps *deps, rtx insn, rtx *listp, int uncond, enum reg_note dep_type) { rtx list, next; @@ -1376,14 +1422,14 @@ } } -/* Remove all occurences of INSN from LIST. Return the number of +/* Remove all occurences of INSN from LIST. Return the number of occurences removed. */ static int remove_from_dependence_list (rtx insn, rtx* listp) { int removed = 0; - + while (*listp) { if (XEXP (*listp, 0) == insn) @@ -1392,19 +1438,19 @@ removed++; continue; } - + listp = &XEXP (*listp, 1); } - + return removed; } /* Same as above, but process two lists at once. */ -static int +static int remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp) { int removed = 0; - + while (*listp) { if (XEXP (*listp, 0) == insn) @@ -1414,11 +1460,11 @@ removed++; continue; } - + listp = &XEXP (*listp, 1); exprp = &XEXP (*exprp, 1); } - + return removed; } @@ -1462,7 +1508,7 @@ if (pro == i) goto next_link; - } while (SCHED_GROUP_P (i)); + } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i)); if (! sched_insns_conditions_mutex_p (i, pro)) add_dependence (i, pro, DEP_TYPE (dep)); @@ -1472,6 +1518,8 @@ delete_all_dependences (insn); prev_nonnote = prev_nonnote_insn (insn); + while (DEBUG_INSN_P (prev_nonnote)) + prev_nonnote = prev_nonnote_insn (prev_nonnote); if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote) && ! sched_insns_conditions_mutex_p (insn, prev_nonnote)) add_dependence (insn, prev_nonnote, REG_DEP_ANTI); @@ -1505,7 +1553,8 @@ { insn_list = &deps->pending_read_insns; mem_list = &deps->pending_read_mems; - deps->pending_read_list_length++; + if (!DEBUG_INSN_P (insn)) + deps->pending_read_list_length++; } else { @@ -1536,7 +1585,7 @@ { if (for_write) { - add_dependence_list_and_free (deps, insn, &deps->pending_read_insns, + add_dependence_list_and_free (deps, insn, &deps->pending_read_insns, 1, REG_DEP_ANTI); if (!deps->readonly) { @@ -1548,7 +1597,7 @@ add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1, for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); - add_dependence_list_and_free (deps, insn, + add_dependence_list_and_free (deps, insn, &deps->last_pending_memory_flush, 1, for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT); if (!deps->readonly) @@ -1611,8 +1660,8 @@ { dep_def _dep, *dep = &_dep; - - init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds), + + init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds), current_sched_info->flags & USE_DEPS_LIST ? ds : -1); maybe_add_or_update_dep_1 (dep, false, pending_mem, mem); } @@ -1678,6 +1727,327 @@ return REG_DEP_ANTI; } } + + + +/* Functions for computation of info needed for register pressure + sensitive insn scheduling. */ + + +/* Allocate and return reg_use_data structure for REGNO and INSN. */ +static struct reg_use_data * +create_insn_reg_use (int regno, rtx insn) +{ + struct reg_use_data *use; + + use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data)); + use->regno = regno; + use->insn = insn; + use->next_insn_use = INSN_REG_USE_LIST (insn); + INSN_REG_USE_LIST (insn) = use; + return use; +} + +/* Allocate and return reg_set_data structure for REGNO and INSN. */ +static struct reg_set_data * +create_insn_reg_set (int regno, rtx insn) +{ + struct reg_set_data *set; + + set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data)); + set->regno = regno; + set->insn = insn; + set->next_insn_set = INSN_REG_SET_LIST (insn); + INSN_REG_SET_LIST (insn) = set; + return set; +} + +/* Set up insn register uses for INSN and dependency context DEPS. */ +static void +setup_insn_reg_uses (struct deps *deps, rtx insn) +{ + unsigned i; + reg_set_iterator rsi; + rtx list; + struct reg_use_data *use, *use2, *next; + struct deps_reg *reg_last; + + EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) + { + if (i < FIRST_PSEUDO_REGISTER + && TEST_HARD_REG_BIT (ira_no_alloc_regs, i)) + continue; + + if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX + && ! REGNO_REG_SET_P (reg_pending_sets, i) + && ! REGNO_REG_SET_P (reg_pending_clobbers, i)) + /* Ignore use which is not dying. */ + continue; + + use = create_insn_reg_use (i, insn); + use->next_regno_use = use; + reg_last = &deps->reg_last[i]; + + /* Create the cycle list of uses. */ + for (list = reg_last->uses; list; list = XEXP (list, 1)) + { + use2 = create_insn_reg_use (i, XEXP (list, 0)); + next = use->next_regno_use; + use->next_regno_use = use2; + use2->next_regno_use = next; + } + } +} + +/* Register pressure info for the currently processed insn. */ +static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES]; + +/* Return TRUE if INSN has the use structure for REGNO. */ +static bool +insn_use_p (rtx insn, int regno) +{ + struct reg_use_data *use; + + for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use) + if (use->regno == regno) + return true; + return false; +} + +/* Update the register pressure info after birth of pseudo register REGNO + in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that + the register is in clobber or unused after the insn. */ +static void +mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p) +{ + int incr, new_incr; + enum reg_class cl; + + gcc_assert (regno >= FIRST_PSEUDO_REGISTER); + cl = sched_regno_cover_class[regno]; + if (cl != NO_REGS) + { + incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)]; + if (clobber_p) + { + new_incr = reg_pressure_info[cl].clobber_increase + incr; + reg_pressure_info[cl].clobber_increase = new_incr; + } + else if (unused_p) + { + new_incr = reg_pressure_info[cl].unused_set_increase + incr; + reg_pressure_info[cl].unused_set_increase = new_incr; + } + else + { + new_incr = reg_pressure_info[cl].set_increase + incr; + reg_pressure_info[cl].set_increase = new_incr; + if (! insn_use_p (insn, regno)) + reg_pressure_info[cl].change += incr; + create_insn_reg_set (regno, insn); + } + gcc_assert (new_incr < (1 << INCREASE_BITS)); + } +} + +/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many + hard registers involved in the birth. */ +static void +mark_insn_hard_regno_birth (rtx insn, int regno, int nregs, + bool clobber_p, bool unused_p) +{ + enum reg_class cl; + int new_incr, last = regno + nregs; + + while (regno < last) + { + gcc_assert (regno < FIRST_PSEUDO_REGISTER); + if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) + { + cl = sched_regno_cover_class[regno]; + if (cl != NO_REGS) + { + if (clobber_p) + { + new_incr = reg_pressure_info[cl].clobber_increase + 1; + reg_pressure_info[cl].clobber_increase = new_incr; + } + else if (unused_p) + { + new_incr = reg_pressure_info[cl].unused_set_increase + 1; + reg_pressure_info[cl].unused_set_increase = new_incr; + } + else + { + new_incr = reg_pressure_info[cl].set_increase + 1; + reg_pressure_info[cl].set_increase = new_incr; + if (! insn_use_p (insn, regno)) + reg_pressure_info[cl].change += 1; + create_insn_reg_set (regno, insn); + } + gcc_assert (new_incr < (1 << INCREASE_BITS)); + } + } + regno++; + } +} + +/* Update the register pressure info after birth of pseudo or hard + register REG in INSN. Arguments CLOBBER_P and UNUSED_P say + correspondingly that the register is in clobber or unused after the + insn. */ +static void +mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p) +{ + int regno; + + if (GET_CODE (reg) == SUBREG) + reg = SUBREG_REG (reg); + + if (! REG_P (reg)) + return; + + regno = REGNO (reg); + if (regno < FIRST_PSEUDO_REGISTER) + mark_insn_hard_regno_birth (insn, regno, + hard_regno_nregs[regno][GET_MODE (reg)], + clobber_p, unused_p); + else + mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p); +} + +/* Update the register pressure info after death of pseudo register + REGNO. */ +static void +mark_pseudo_death (int regno) +{ + int incr; + enum reg_class cl; + + gcc_assert (regno >= FIRST_PSEUDO_REGISTER); + cl = sched_regno_cover_class[regno]; + if (cl != NO_REGS) + { + incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)]; + reg_pressure_info[cl].change -= incr; + } +} + +/* Like mark_pseudo_death except that NREGS saying how many hard + registers involved in the death. */ +static void +mark_hard_regno_death (int regno, int nregs) +{ + enum reg_class cl; + int last = regno + nregs; + + while (regno < last) + { + gcc_assert (regno < FIRST_PSEUDO_REGISTER); + if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)) + { + cl = sched_regno_cover_class[regno]; + if (cl != NO_REGS) + reg_pressure_info[cl].change -= 1; + } + regno++; + } +} + +/* Update the register pressure info after death of pseudo or hard + register REG. */ +static void +mark_reg_death (rtx reg) +{ + int regno; + + if (GET_CODE (reg) == SUBREG) + reg = SUBREG_REG (reg); + + if (! REG_P (reg)) + return; + + regno = REGNO (reg); + if (regno < FIRST_PSEUDO_REGISTER) + mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]); + else + mark_pseudo_death (regno); +} + +/* Process SETTER of REG. DATA is an insn containing the setter. */ +static void +mark_insn_reg_store (rtx reg, const_rtx setter, void *data) +{ + if (setter != NULL_RTX && GET_CODE (setter) != SET) + return; + mark_insn_reg_birth + ((rtx) data, reg, false, + find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX); +} + +/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */ +static void +mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data) +{ + if (GET_CODE (setter) == CLOBBER) + mark_insn_reg_birth ((rtx) data, reg, true, false); +} + +/* Set up reg pressure info related to INSN. */ +static void +setup_insn_reg_pressure_info (rtx insn) +{ + int i, len; + enum reg_class cl; + static struct reg_pressure_data *pressure_info; + rtx link; + + gcc_assert (sched_pressure_p); + + if (! INSN_P (insn)) + return; + + for (i = 0; i < ira_reg_class_cover_size; i++) + { + cl = ira_reg_class_cover[i]; + reg_pressure_info[cl].clobber_increase = 0; + reg_pressure_info[cl].set_increase = 0; + reg_pressure_info[cl].unused_set_increase = 0; + reg_pressure_info[cl].change = 0; + } + + note_stores (PATTERN (insn), mark_insn_reg_clobber, insn); + + note_stores (PATTERN (insn), mark_insn_reg_store, insn); + +#ifdef AUTO_INC_DEC + for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) + if (REG_NOTE_KIND (link) == REG_INC) + mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn); +#endif + + for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) + if (REG_NOTE_KIND (link) == REG_DEAD) + mark_reg_death (XEXP (link, 0)); + + len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size; + pressure_info + = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len); + INSN_MAX_REG_PRESSURE (insn) = (int *) xmalloc (ira_reg_class_cover_size + * sizeof (int)); + for (i = 0; i < ira_reg_class_cover_size; i++) + { + cl = ira_reg_class_cover[i]; + pressure_info[i].clobber_increase + = reg_pressure_info[cl].clobber_increase; + pressure_info[i].set_increase = reg_pressure_info[cl].set_increase; + pressure_info[i].unused_set_increase + = reg_pressure_info[cl].unused_set_increase; + pressure_info[i].change = reg_pressure_info[cl].change; + } +} + + /* Internal variable for sched_analyze_[12] () functions. @@ -1685,7 +2055,7 @@ at the most toplevel SET. */ static bool can_start_lhs_rhs_p; -/* Extend reg info for the deps context DEPS given that +/* Extend reg info for the deps context DEPS given that we have just generated a register numbered REGNO. */ static void extend_deps_reg_info (struct deps *deps, int regno) @@ -1704,10 +2074,10 @@ if (max_regno > deps->max_reg) { - deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last, + deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last, max_regno); memset (&deps->reg_last[deps->max_reg], - 0, (max_regno - deps->max_reg) + 0, (max_regno - deps->max_reg) * sizeof (struct deps_reg)); deps->max_reg = max_regno; } @@ -1801,8 +2171,7 @@ already cross one. */ if (REG_N_CALLS_CROSSED (regno) == 0) { - if (!deps->readonly - && ref == USE) + if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn)) deps->sched_before_next_call = alloc_INSN_LIST (insn, deps->sched_before_next_call); else @@ -1893,10 +2262,16 @@ /* Treat all writes to a stack register as modifying the TOS. */ if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG) { + int nregs; + /* Avoid analyzing the same register twice. */ if (regno != FIRST_STACK_REG) sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn); - sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn); + + nregs = hard_regno_nregs[FIRST_STACK_REG][mode]; + while (--nregs >= 0) + SET_HARD_REG_BIT (implicit_reg_pending_uses, + FIRST_STACK_REG + nregs); } #endif } @@ -1907,8 +2282,11 @@ if (sched_deps_info->use_cselib) { + enum machine_mode address_mode + = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest)); + t = shallow_copy_rtx (dest); - cselib_lookup (XEXP (t, 0), Pmode, 1); + cselib_lookup (XEXP (t, 0), address_mode, 1); XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); } t = canon_rtx (t); @@ -2061,55 +2439,64 @@ if (sched_deps_info->use_cselib) { + enum machine_mode address_mode + = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t)); + t = shallow_copy_rtx (t); - cselib_lookup (XEXP (t, 0), Pmode, 1); + cselib_lookup (XEXP (t, 0), address_mode, 1); XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0)); } - t = canon_rtx (t); - pending = deps->pending_read_insns; - pending_mem = deps->pending_read_mems; - while (pending) - { - if (read_dependence (XEXP (pending_mem, 0), t) - && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0))) - note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0), - DEP_ANTI); - - pending = XEXP (pending, 1); - pending_mem = XEXP (pending_mem, 1); - } - - pending = deps->pending_write_insns; - pending_mem = deps->pending_write_mems; - while (pending) + + if (!DEBUG_INSN_P (insn)) { - if (true_dependence (XEXP (pending_mem, 0), VOIDmode, - t, rtx_varies_p) - && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0))) - note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0), - sched_deps_info->generate_spec_deps - ? BEGIN_DATA | DEP_TRUE : DEP_TRUE); - - pending = XEXP (pending, 1); - pending_mem = XEXP (pending_mem, 1); - } - - for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) - { - if (! JUMP_P (XEXP (u, 0))) - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); - else if (deps_may_trap_p (x)) + t = canon_rtx (t); + pending = deps->pending_read_insns; + pending_mem = deps->pending_read_mems; + while (pending) + { + if (read_dependence (XEXP (pending_mem, 0), t) + && ! sched_insns_conditions_mutex_p (insn, + XEXP (pending, 0))) + note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0), + DEP_ANTI); + + pending = XEXP (pending, 1); + pending_mem = XEXP (pending_mem, 1); + } + + pending = deps->pending_write_insns; + pending_mem = deps->pending_write_mems; + while (pending) { - if ((sched_deps_info->generate_spec_deps) - && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL)) + if (true_dependence (XEXP (pending_mem, 0), VOIDmode, + t, rtx_varies_p) + && ! sched_insns_conditions_mutex_p (insn, + XEXP (pending, 0))) + note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0), + sched_deps_info->generate_spec_deps + ? BEGIN_DATA | DEP_TRUE : DEP_TRUE); + + pending = XEXP (pending, 1); + pending_mem = XEXP (pending_mem, 1); + } + + for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) + { + if (! JUMP_P (XEXP (u, 0))) + add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); + else if (deps_may_trap_p (x)) { - ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, - MAX_DEP_WEAK); - - note_dep (XEXP (u, 0), ds); + if ((sched_deps_info->generate_spec_deps) + && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL)) + { + ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, + MAX_DEP_WEAK); + + note_dep (XEXP (u, 0), ds); + } + else + add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); } - else - add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); } } @@ -2118,7 +2505,6 @@ if (!deps->readonly) add_insn_mem_dependence (deps, true, insn, x); - /* Take advantage of tail recursion here. */ sched_analyze_2 (deps, XEXP (x, 0), insn); if (cslr_p && sched_deps_info->finish_rhs) @@ -2132,6 +2518,11 @@ flush_pending_lists (deps, insn, true, false); break; + case PREFETCH: + if (PREFETCH_SCHEDULE_BARRIER_P (x)) + reg_pending_barrier = TRUE_BARRIER; + break; + case UNSPEC_VOLATILE: flush_pending_lists (deps, insn, true, true); /* FALLTHRU */ @@ -2225,9 +2616,25 @@ unsigned i; reg_set_iterator rsi; + if (! reload_completed) + { + HARD_REG_SET temp; + + extract_insn (insn); + preprocess_constraints (); + ira_implicitly_set_insn_hard_regs (&temp); + IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp); + } + can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn) && code == SET); + if (may_trap_p (x)) + /* Avoid moving trapping instructions accross function calls that might + not always return. */ + add_dependence_list (insn, deps->last_function_call_may_noreturn, + 1, REG_DEP_ANTI); + if (code == COND_EXEC) { sched_analyze_2 (deps, COND_EXEC_TEST (x), insn); @@ -2245,7 +2652,8 @@ and others know that a value is dead. Depend on the last call instruction so that reg-stack won't get confused. */ if (code == CLOBBER) - add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT); + add_dependence_list (insn, deps->last_function_call, 1, + REG_DEP_OUTPUT); } else if (code == PARALLEL) { @@ -2287,6 +2695,8 @@ { rtx next; next = next_nonnote_insn (insn); + while (next && DEBUG_INSN_P (next)) + next = next_nonnote_insn (next); if (next && BARRIER_P (next)) reg_pending_barrier = MOVE_BARRIER; else @@ -2306,6 +2716,8 @@ { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->implicit_sets, + 0, REG_DEP_ANTI); add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI); @@ -2361,119 +2773,209 @@ || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn))) reg_pending_barrier = MOVE_BARRIER; - /* If the current insn is conditional, we can't free any - of the lists. */ - if (sched_has_condition_p (insn)) + if (sched_pressure_p) + { + setup_insn_reg_uses (deps, insn); + setup_insn_reg_pressure_info (insn); + } + + /* Add register dependencies for insn. */ + if (DEBUG_INSN_P (insn)) { + rtx prev = deps->last_debug_insn; + rtx u; + + if (!deps->readonly) + deps->last_debug_insn = insn; + + if (prev) + add_dependence (insn, prev, REG_DEP_ANTI); + + add_dependence_list (insn, deps->last_function_call, 1, + REG_DEP_ANTI); + + for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1)) + if (! JUMP_P (XEXP (u, 0)) + || !sel_sched_p ()) + add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); + EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE); - add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE); - - if (!deps->readonly) - { - reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); - reg_last->uses_length++; - } - } - EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); - add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); - - if (!deps->readonly) - { - reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers); - reg_last->clobbers_length++; - } - } - EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); - add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT); - add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); - - if (!deps->readonly) - { - reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); - SET_REGNO_REG_SET (&deps->reg_conditional_sets, i); - } - } + { + struct deps_reg *reg_last = &deps->reg_last[i]; + add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI); + + if (!deps->readonly) + reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); + } + CLEAR_REG_SET (reg_pending_uses); + + /* Quite often, a debug insn will refer to stuff in the + previous instruction, but the reason we want this + dependency here is to make sure the scheduler doesn't + gratuitously move a debug insn ahead. This could dirty + DF flags and cause additional analysis that wouldn't have + occurred in compilation without debug insns, and such + additional analysis can modify the generated code. */ + prev = PREV_INSN (insn); + + if (prev && NONDEBUG_INSN_P (prev)) + add_dependence (insn, prev, REG_DEP_ANTI); } else { EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE); - add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE); - - if (!deps->readonly) - { - reg_last->uses_length++; - reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); - } - } - EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH - || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH) - { - add_dependence_list_and_free (deps, insn, ®_last->sets, 0, - REG_DEP_OUTPUT); - add_dependence_list_and_free (deps, insn, ®_last->uses, 0, - REG_DEP_ANTI); - add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, - REG_DEP_OUTPUT); - - if (!deps->readonly) - { - reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); - reg_last->clobbers_length = 0; - reg_last->uses_length = 0; - } - } - else - { - add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); - add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); - } - - if (!deps->readonly) - { - reg_last->clobbers_length++; - reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers); - } - } - EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi) - { - struct deps_reg *reg_last = &deps->reg_last[i]; - add_dependence_list_and_free (deps, insn, ®_last->sets, 0, - REG_DEP_OUTPUT); - add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, - REG_DEP_OUTPUT); - add_dependence_list_and_free (deps, insn, ®_last->uses, 0, - REG_DEP_ANTI); - - if (!deps->readonly) - { - reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); - reg_last->uses_length = 0; - reg_last->clobbers_length = 0; - CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i); - } - } + { + struct deps_reg *reg_last = &deps->reg_last[i]; + add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE); + add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE); + + if (!deps->readonly) + { + reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); + reg_last->uses_length++; + } + } + + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE); + add_dependence_list (insn, reg_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE); + + if (!deps->readonly) + { + reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses); + reg_last->uses_length++; + } + } + + /* If the current insn is conditional, we can't free any + of the lists. */ + if (sched_has_condition_p (insn)) + { + EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); + add_dependence_list (insn, reg_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + + if (!deps->readonly) + { + reg_last->clobbers + = alloc_INSN_LIST (insn, reg_last->clobbers); + reg_last->clobbers_length++; + } + } + EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); + add_dependence_list (insn, reg_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT); + add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + + if (!deps->readonly) + { + reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); + SET_REGNO_REG_SET (&deps->reg_conditional_sets, i); + } + } + } + else + { + EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH + || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH) + { + add_dependence_list_and_free (deps, insn, ®_last->sets, 0, + REG_DEP_OUTPUT); + add_dependence_list_and_free (deps, insn, + ®_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, ®_last->uses, 0, + REG_DEP_ANTI); + add_dependence_list_and_free + (deps, insn, ®_last->clobbers, 0, REG_DEP_OUTPUT); + + if (!deps->readonly) + { + reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); + reg_last->clobbers_length = 0; + reg_last->uses_length = 0; + } + } + else + { + add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT); + add_dependence_list (insn, reg_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + } + + if (!deps->readonly) + { + reg_last->clobbers_length++; + reg_last->clobbers + = alloc_INSN_LIST (insn, reg_last->clobbers); + } + } + EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + + add_dependence_list_and_free (deps, insn, ®_last->sets, 0, + REG_DEP_OUTPUT); + add_dependence_list_and_free (deps, insn, + ®_last->implicit_sets, + 0, REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, + REG_DEP_OUTPUT); + add_dependence_list_and_free (deps, insn, ®_last->uses, 0, + REG_DEP_ANTI); + + if (!deps->readonly) + { + reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets); + reg_last->uses_length = 0; + reg_last->clobbers_length = 0; + CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i); + } + } + } } + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i)) + { + struct deps_reg *reg_last = &deps->reg_last[i]; + add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI); + add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); + + if (!deps->readonly) + reg_last->implicit_sets + = alloc_INSN_LIST (insn, reg_last->implicit_sets); + } + if (!deps->readonly) { IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses); IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers); IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets); + for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) + if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i) + || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i)) + SET_REGNO_REG_SET (&deps->reg_last_in_use, i); /* Set up the pending barrier found. */ deps->last_reg_pending_barrier = reg_pending_barrier; @@ -2482,6 +2984,8 @@ CLEAR_REG_SET (reg_pending_uses); CLEAR_REG_SET (reg_pending_clobbers); CLEAR_REG_SET (reg_pending_sets); + CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers); + CLEAR_HARD_REG_SET (implicit_reg_pending_uses); /* Add dependencies if a scheduling barrier was found. */ if (reg_pending_barrier) @@ -2494,12 +2998,14 @@ { struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI); - add_dependence_list - (insn, reg_last->sets, 0, - reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); - add_dependence_list - (insn, reg_last->clobbers, 0, - reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); + add_dependence_list (insn, reg_last->sets, 0, + reg_pending_barrier == TRUE_BARRIER + ? REG_DEP_TRUE : REG_DEP_ANTI); + add_dependence_list (insn, reg_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list (insn, reg_last->clobbers, 0, + reg_pending_barrier == TRUE_BARRIER + ? REG_DEP_TRUE : REG_DEP_ANTI); } } else @@ -2509,12 +3015,15 @@ struct deps_reg *reg_last = &deps->reg_last[i]; add_dependence_list_and_free (deps, insn, ®_last->uses, 0, REG_DEP_ANTI); - add_dependence_list_and_free - (deps, insn, ®_last->sets, 0, - reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); - add_dependence_list_and_free - (deps, insn, ®_last->clobbers, 0, - reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, ®_last->sets, 0, + reg_pending_barrier == TRUE_BARRIER + ? REG_DEP_TRUE : REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, + ®_last->implicit_sets, 0, + REG_DEP_ANTI); + add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0, + reg_pending_barrier == TRUE_BARRIER + ? REG_DEP_TRUE : REG_DEP_ANTI); if (!deps->readonly) { @@ -2533,10 +3042,10 @@ } /* Flush pending lists on jumps, but not on speculative checks. */ - if (JUMP_P (insn) && !(sel_sched_p () + if (JUMP_P (insn) && !(sel_sched_p () && sel_insn_is_speculation_check (insn))) flush_pending_lists (deps, insn, true, true); - + if (!deps->readonly) CLEAR_REG_SET (&deps->reg_conditional_sets); reg_pending_barrier = NOT_A_BARRIER; @@ -2557,7 +3066,30 @@ int src_regno, dest_regno; if (set == NULL) - goto end_call_group; + { + if (DEBUG_INSN_P (insn)) + /* We don't want to mark debug insns as part of the same + sched group. We know they really aren't, but if we use + debug insns to tell that a call group is over, we'll + get different code if debug insns are not there and + instructions that follow seem like they should be part + of the call group. + + Also, if we did, fixup_sched_groups() would move the + deps of the debug insn to the call insn, modifying + non-debug post-dependency counts of the debug insn + dependencies and otherwise messing with the scheduling + order. + + Instead, let such debug insns be scheduled freely, but + keep the call group open in case there are insns that + should be part of it afterwards. Since we grant debug + insns higher priority than even sched group insns, it + will all turn out all right. */ + goto debug_dont_end_call_group; + else + goto end_call_group; + } tmp = SET_DEST (set); if (GET_CODE (tmp) == SUBREG) @@ -2588,7 +3120,7 @@ && deps->in_post_call_group_p == post_call_initial) deps->in_post_call_group_p = post_call; - if (!sel_sched_p () || sched_emulate_haifa_p) + if (!sel_sched_p () || sched_emulate_haifa_p) { SCHED_GROUP_P (insn) = 1; CANT_MOVE (insn) = 1; @@ -2602,6 +3134,7 @@ } } + debug_dont_end_call_group: if ((current_sched_info->flags & DO_SPECULATION) && !sched_insn_is_legitimate_for_speculation_p (insn, 0)) /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot @@ -2613,7 +3146,7 @@ { sd_iterator_def sd_it; dep_t dep; - + for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK); sd_iterator_cond (&sd_it, &dep);) change_spec_dep_to_hard (sd_it); @@ -2621,6 +3154,73 @@ } } +/* Return TRUE if INSN might not always return normally (e.g. call exit, + longjmp, loop forever, ...). */ +static bool +call_may_noreturn_p (rtx insn) +{ + rtx call; + + /* const or pure calls that aren't looping will always return. */ + if (RTL_CONST_OR_PURE_CALL_P (insn) + && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)) + return false; + + call = PATTERN (insn); + if (GET_CODE (call) == PARALLEL) + call = XVECEXP (call, 0, 0); + if (GET_CODE (call) == SET) + call = SET_SRC (call); + if (GET_CODE (call) == CALL + && MEM_P (XEXP (call, 0)) + && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF) + { + rtx symbol = XEXP (XEXP (call, 0), 0); + if (SYMBOL_REF_DECL (symbol) + && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL) + { + if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol)) + == BUILT_IN_NORMAL) + switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))) + { + case BUILT_IN_BCMP: + case BUILT_IN_BCOPY: + case BUILT_IN_BZERO: + case BUILT_IN_INDEX: + case BUILT_IN_MEMCHR: + case BUILT_IN_MEMCMP: + case BUILT_IN_MEMCPY: + case BUILT_IN_MEMMOVE: + case BUILT_IN_MEMPCPY: + case BUILT_IN_MEMSET: + case BUILT_IN_RINDEX: + case BUILT_IN_STPCPY: + case BUILT_IN_STPNCPY: + case BUILT_IN_STRCAT: + case BUILT_IN_STRCHR: + case BUILT_IN_STRCMP: + case BUILT_IN_STRCPY: + case BUILT_IN_STRCSPN: + case BUILT_IN_STRLEN: + case BUILT_IN_STRNCAT: + case BUILT_IN_STRNCMP: + case BUILT_IN_STRNCPY: + case BUILT_IN_STRPBRK: + case BUILT_IN_STRRCHR: + case BUILT_IN_STRSPN: + case BUILT_IN_STRSTR: + /* Assume certain string/memory builtins always return. */ + return false; + default: + break; + } + } + } + + /* For all other calls assume that they might not always return. */ + return true; +} + /* Analyze INSN with DEPS as a context. */ void deps_analyze_insn (struct deps *deps, rtx insn) @@ -2628,13 +3228,13 @@ if (sched_deps_info->start_insn) sched_deps_info->start_insn (insn); - if (NONJUMP_INSN_P (insn) || JUMP_P (insn)) + if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn)) { - /* Make each JUMP_INSN (but not a speculative check) + /* Make each JUMP_INSN (but not a speculative check) a scheduling barrier for memory references. */ if (!deps->readonly - && JUMP_P (insn) - && !(sel_sched_p () + && JUMP_P (insn) + && !(sel_sched_p () && sel_insn_is_speculation_check (insn))) { /* Keep the list a reasonable size. */ @@ -2666,7 +3266,7 @@ if (global_regs[i]) { SET_REGNO_REG_SET (reg_pending_sets, i); - SET_REGNO_REG_SET (reg_pending_uses, i); + SET_HARD_REG_BIT (implicit_reg_pending_uses, i); } /* Other call-clobbered hard regs may be clobbered. Since we only have a choice between 'might be clobbered' @@ -2679,7 +3279,7 @@ by the function, but it is certain that the stack pointer is among them, but be conservative. */ else if (fixed_regs[i]) - SET_REGNO_REG_SET (reg_pending_uses, i); + SET_HARD_REG_BIT (implicit_reg_pending_uses, i); /* The frame pointer is normally not used by the function itself, but by the debugger. */ /* ??? MIPS o32 is an exception. It uses the frame pointer @@ -2688,12 +3288,12 @@ else if (i == FRAME_POINTER_REGNUM || (i == HARD_FRAME_POINTER_REGNUM && (! reload_completed || frame_pointer_needed))) - SET_REGNO_REG_SET (reg_pending_uses, i); + SET_HARD_REG_BIT (implicit_reg_pending_uses, i); } /* For each insn which shouldn't cross a call, add a dependence between that insn and this call insn. */ - add_dependence_list_and_free (deps, insn, + add_dependence_list_and_free (deps, insn, &deps->sched_before_next_call, 1, REG_DEP_ANTI); @@ -2719,7 +3319,16 @@ /* Remember the last function call for limiting lifetimes. */ free_INSN_LIST_list (&deps->last_function_call); deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX); - + + if (call_may_noreturn_p (insn)) + { + /* Remember the last function call that might not always return + normally for limiting moves of trapping insns. */ + free_INSN_LIST_list (&deps->last_function_call_may_noreturn); + deps->last_function_call_may_noreturn + = alloc_INSN_LIST (insn, NULL_RTX); + } + /* Before reload, begin a post-call group, so as to keep the lifetimes of hard registers correct. */ if (! reload_completed) @@ -2740,7 +3349,7 @@ sched_deps_info->finish_insn (); /* Fixup the dependencies in the sched group. */ - if ((NONJUMP_INSN_P (insn) || JUMP_P (insn)) + if ((NONJUMP_INSN_P (insn) || JUMP_P (insn)) && SCHED_GROUP_P (insn) && !sel_sched_p ()) fixup_sched_groups (insn); } @@ -2758,6 +3367,8 @@ { rtx insn = prev_nonnote_insn (head); + while (insn && DEBUG_INSN_P (insn)) + insn = prev_nonnote_insn (insn); if (insn && CALL_P (insn)) deps->in_post_call_group_p = post_call_initial; } @@ -2850,15 +3461,19 @@ } /* Initialize variables for region data dependence analysis. - n_bbs is the number of region blocks. */ + When LAZY_REG_LAST is true, do not allocate reg_last array + of struct deps immediately. */ void -init_deps (struct deps *deps) +init_deps (struct deps *deps, bool lazy_reg_last) { int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ()); deps->max_reg = max_reg; - deps->reg_last = XCNEWVEC (struct deps_reg, max_reg); + if (lazy_reg_last) + deps->reg_last = NULL; + else + deps->reg_last = XCNEWVEC (struct deps_reg, max_reg); INIT_REG_SET (&deps->reg_last_in_use); INIT_REG_SET (&deps->reg_conditional_sets); @@ -2871,12 +3486,26 @@ deps->pending_flush_length = 0; deps->last_pending_memory_flush = 0; deps->last_function_call = 0; + deps->last_function_call_may_noreturn = 0; deps->sched_before_next_call = 0; deps->in_post_call_group_p = not_post_call; + deps->last_debug_insn = 0; deps->last_reg_pending_barrier = NOT_A_BARRIER; deps->readonly = 0; } +/* Init only reg_last field of DEPS, which was not allocated before as + we inited DEPS lazily. */ +void +init_deps_reg_last (struct deps *deps) +{ + gcc_assert (deps && deps->max_reg > 0); + gcc_assert (deps->reg_last == NULL); + + deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg); +} + + /* Free insn lists found in DEPS. */ void @@ -2885,6 +3514,14 @@ unsigned i; reg_set_iterator rsi; + /* We set max_reg to 0 when this context was already freed. */ + if (deps->max_reg == 0) + { + gcc_assert (deps->reg_last == NULL); + return; + } + deps->max_reg = 0; + free_INSN_LIST_list (&deps->pending_read_insns); free_EXPR_LIST_list (&deps->pending_read_mems); free_INSN_LIST_list (&deps->pending_write_insns); @@ -2901,13 +3538,18 @@ free_INSN_LIST_list (®_last->uses); if (reg_last->sets) free_INSN_LIST_list (®_last->sets); + if (reg_last->implicit_sets) + free_INSN_LIST_list (®_last->implicit_sets); if (reg_last->clobbers) free_INSN_LIST_list (®_last->clobbers); } CLEAR_REG_SET (&deps->reg_last_in_use); CLEAR_REG_SET (&deps->reg_conditional_sets); - free (deps->reg_last); + /* As we initialize reg_last lazily, it is possible that we didn't allocate + it at all. */ + if (deps->reg_last) + free (deps->reg_last); deps->reg_last = NULL; deps = NULL; @@ -2921,10 +3563,11 @@ int removed; unsigned i; reg_set_iterator rsi; - + removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns, &deps->pending_read_mems); - deps->pending_read_list_length -= removed; + if (!DEBUG_INSN_P (insn)) + deps->pending_read_list_length -= removed; removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns, &deps->pending_write_mems); deps->pending_write_list_length -= removed; @@ -2938,14 +3581,21 @@ remove_from_dependence_list (insn, ®_last->uses); if (reg_last->sets) remove_from_dependence_list (insn, ®_last->sets); + if (reg_last->implicit_sets) + remove_from_dependence_list (insn, ®_last->implicit_sets); if (reg_last->clobbers) remove_from_dependence_list (insn, ®_last->clobbers); - if (!reg_last->uses && !reg_last->sets && !reg_last->clobbers) + if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets + && !reg_last->clobbers) CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i); } if (CALL_P (insn)) - remove_from_dependence_list (insn, &deps->last_function_call); + { + remove_from_dependence_list (insn, &deps->last_function_call); + remove_from_dependence_list (insn, + &deps->last_function_call_may_noreturn); + } remove_from_dependence_list (insn, &deps->sched_before_next_call); } @@ -2953,9 +3603,9 @@ static void init_deps_data_vector (void) { - int reserve = (sched_max_luid + 1 + int reserve = (sched_max_luid + 1 - VEC_length (haifa_deps_insn_data_def, h_d_i_d)); - if (reserve > 0 + if (reserve > 0 && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve)) VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d, 3 * sched_max_luid / 2); @@ -2971,8 +3621,8 @@ int insns_in_block = sched_max_luid / n_basic_blocks + 1; init_deps_data_vector (); - - /* We use another caching mechanism for selective scheduling, so + + /* We use another caching mechanism for selective scheduling, so we don't use this one. */ if (!sel_sched_p () && global_p && insns_in_block > 100 * 5) { @@ -2986,7 +3636,7 @@ extend_dependency_caches (sched_max_luid, true); } - if (global_p) + if (global_p) { dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list), /* Allocate lists for one block at a time. */ @@ -3044,7 +3694,7 @@ VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d); cache_size = 0; - + if (true_dependency_cache) { int i; @@ -3080,6 +3730,8 @@ void init_deps_global (void) { + CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers); + CLEAR_HARD_REG_SET (implicit_reg_pending_uses); reg_pending_sets = ALLOC_REG_SET (®_obstack); reg_pending_clobbers = ALLOC_REG_SET (®_obstack); reg_pending_uses = ALLOC_REG_SET (®_obstack); @@ -3163,7 +3815,7 @@ gcc_assert (insn == cur_insn); else cur_insn = insn; - + note_dep (elem, ds); if (!internal) cur_insn = NULL; @@ -3453,14 +4105,14 @@ gcc_assert (ds & DEP_TRUE); else if (dt == REG_DEP_OUTPUT) gcc_assert ((ds & DEP_OUTPUT) - && !(ds & DEP_TRUE)); - else + && !(ds & DEP_TRUE)); + else gcc_assert ((dt == REG_DEP_ANTI) && (ds & DEP_ANTI) && !(ds & (DEP_OUTPUT | DEP_TRUE))); /* HARD_DEP can not appear in dep_status of a link. */ - gcc_assert (!(ds & HARD_DEP)); + gcc_assert (!(ds & HARD_DEP)); /* Check that dependence status is set correctly when speculation is not supported. */ @@ -3502,8 +4154,8 @@ /* Subsequent speculations should resolve true dependencies. */ gcc_assert ((ds & DEP_TYPES) == DEP_TRUE); } - - /* Check that true and anti dependencies can't have other speculative + + /* Check that true and anti dependencies can't have other speculative statuses. */ if (ds & DEP_TRUE) gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));