diff gcc/sel-sched-ir.c @ 55:77e2b8dfacca gcc-4.4.5

update it from 4.4.3 to 4.5.0
author ryoma <e075725@ie.u-ryukyu.ac.jp>
date Fri, 12 Feb 2010 23:39:51 +0900
parents a06113de4d67
children b7f97abdc517
line wrap: on
line diff
--- a/gcc/sel-sched-ir.c	Sun Feb 07 18:28:00 2010 +0900
+++ b/gcc/sel-sched-ir.c	Fri Feb 12 23:39:51 2010 +0900
@@ -1,5 +1,5 @@
 /* Instruction scheduling pass.  Selective scheduler and pipeliner.
-   Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
+   Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
 
 This file is part of GCC.
 
@@ -122,7 +122,7 @@
   int n;
 
   /* Its size.  */
-  int s;  
+  int s;
 } nop_pool = { NULL, 0, 0 };
 
 /* The pool for basic block notes.  */
@@ -134,7 +134,7 @@
    EXIT_INSN is successor of the insns that lead to EXIT_BLOCK.  */
 rtx exit_insn = NULL_RTX;
 
-/* TRUE if while scheduling current region, which is loop, its preheader 
+/* TRUE if while scheduling current region, which is loop, its preheader
    was removed.  */
 bool preheader_removed = false;
 
@@ -157,6 +157,7 @@
 static bool insn_is_the_only_one_in_bb_p (insn_t);
 static void create_initial_data_sets (basic_block);
 
+static void free_av_set (basic_block);
 static void invalidate_av_set (basic_block);
 static void extend_insn_data (void);
 static void sel_init_new_insn (insn_t, int);
@@ -258,10 +259,10 @@
 
 /* Add new fence consisting of INSN and STATE to the list pointed to by LP.  */
 static void
-flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, 
-           insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns, 
-           int *ready_ticks, int ready_ticks_size, insn_t sched_next, 
-           int cycle, int cycle_issued_insns, 
+flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
+           insn_t last_scheduled_insn, VEC(rtx,gc) *executing_insns,
+           int *ready_ticks, int ready_ticks_size, insn_t sched_next,
+           int cycle, int cycle_issued_insns,
            bool starts_cycle_p, bool after_stall_p)
 {
   fence_t f;
@@ -316,7 +317,7 @@
 def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call)
 {
   def_t d;
-  
+
   _list_add (dl);
   d = DEF_LIST_DEF (*dl);
 
@@ -327,11 +328,11 @@
 
 /* Functions to work with target contexts.  */
 
-/* Bulk target context.  It is convenient for debugging purposes to ensure 
+/* Bulk target context.  It is convenient for debugging purposes to ensure
    that there are no uninitialized (null) target contexts.  */
 static tc_t bulk_tc = (tc_t) 1;
 
-/* Target hooks wrappers.  In the future we can provide some default 
+/* Target hooks wrappers.  In the future we can provide some default
    implementations for them.  */
 
 /* Allocate a store for the target context.  */
@@ -423,7 +424,7 @@
   init_target_context (tc, clean_p);
 }
 
-/* Functions to work with dependence contexts. 
+/* Functions to work with dependence contexts.
    Dc (aka deps context, aka deps_t, aka struct deps *) is short for dependence
    context.  It accumulates information about processed insns to decide if
    current insn is dependent on the processed ones.  */
@@ -432,7 +433,7 @@
 static void
 copy_deps_context (deps_t to, deps_t from)
 {
-  init_deps (to);
+  init_deps (to, false);
   deps_join (to, from);
 }
 
@@ -449,7 +450,7 @@
 {
   deps_t dc = alloc_deps_context ();
 
-  init_deps (dc);
+  init_deps (dc, false);
   return dc;
 }
 
@@ -483,10 +484,10 @@
 reset_deps_context (deps_t dc)
 {
   clear_deps_context (dc);
-  init_deps (dc);
-}
-
-/* This structure describes the dependence analysis hooks for advancing 
+  init_deps (dc, false);
+}
+
+/* This structure describes the dependence analysis hooks for advancing
    dependence context.  */
 static struct sched_deps_info_def advance_deps_context_sched_deps_info =
   {
@@ -597,11 +598,11 @@
   succ_iterator si;
   bool first = true;
   int ready_ticks_size = get_max_uid () + 1;
-      
-  FOR_EACH_SUCC_1 (succ, si, old_fence, 
+
+  FOR_EACH_SUCC_1 (succ, si, old_fence,
                    SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
     {
-      
+
       if (first)
         first = false;
       else
@@ -611,28 +612,28 @@
 		 state_create (),
 		 create_deps_context () /* dc */,
 		 create_target_context (true) /* tc */,
-		 NULL_RTX /* last_scheduled_insn */, 
+		 NULL_RTX /* last_scheduled_insn */,
                  NULL, /* executing_insns */
                  XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
                  ready_ticks_size,
                  NULL_RTX /* sched_next */,
-		 1 /* cycle */, 0 /* cycle_issued_insns */, 
-		 1 /* starts_cycle_p */, 0 /* after_stall_p */);  
+		 1 /* cycle */, 0 /* cycle_issued_insns */,
+		 1 /* starts_cycle_p */, 0 /* after_stall_p */);
     }
 }
 
 /* Merges two fences (filling fields of fence F with resulting values) by
    following rules: 1) state, target context and last scheduled insn are
-   propagated from fallthrough edge if it is available; 
+   propagated from fallthrough edge if it is available;
    2) deps context and cycle is propagated from more probable edge;
-   3) all other fields are set to corresponding constant values.  
-
-   INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, 
+   3) all other fields are set to corresponding constant values.
+
+   INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS,
    READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE and AFTER_STALL_P
    are the corresponding fields of the second fence.  */
 static void
 merge_fences (fence_t f, insn_t insn,
-	      state_t state, deps_t dc, void *tc, 
+	      state_t state, deps_t dc, void *tc,
               rtx last_scheduled_insn, VEC(rtx, gc) *executing_insns,
               int *ready_ticks, int ready_ticks_size,
 	      rtx sched_next, int cycle, bool after_stall_p)
@@ -642,22 +643,22 @@
   gcc_assert (sel_bb_head_p (FENCE_INSN (f))
               && !sched_next && !FENCE_SCHED_NEXT (f));
 
-  /* Check if we can decide which path fences came.  
+  /* Check if we can decide which path fences came.
      If we can't (or don't want to) - reset all.  */
   if (last_scheduled_insn == NULL
       || last_scheduled_insn_old == NULL
-      /* This is a case when INSN is reachable on several paths from 
-         one insn (this can happen when pipelining of outer loops is on and 
-         there are two edges: one going around of inner loop and the other - 
+      /* This is a case when INSN is reachable on several paths from
+         one insn (this can happen when pipelining of outer loops is on and
+         there are two edges: one going around of inner loop and the other -
          right through it; in such case just reset everything).  */
       || last_scheduled_insn == last_scheduled_insn_old)
     {
       state_reset (FENCE_STATE (f));
       state_free (state);
-  
+
       reset_deps_context (FENCE_DC (f));
       delete_deps_context (dc);
-  
+
       reset_target_context (FENCE_TC (f), true);
       delete_target_context (tc);
 
@@ -668,7 +669,7 @@
       VEC_free (rtx, gc, executing_insns);
       free (ready_ticks);
       if (FENCE_EXECUTING_INSNS (f))
-        VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, 
+        VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
                           VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
       if (FENCE_READY_TICKS (f))
         memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
@@ -679,7 +680,7 @@
       edge candidate;
       succ_iterator si;
       insn_t succ;
-    
+
       /* Find fallthrough edge.  */
       gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb);
       candidate = find_fallthru_edge (BLOCK_FOR_INSN (insn)->prev_bb);
@@ -691,16 +692,16 @@
           /* No fallthrough edge leading to basic block of INSN.  */
           state_reset (FENCE_STATE (f));
           state_free (state);
-  
+
           reset_target_context (FENCE_TC (f), true);
           delete_target_context (tc);
-  
+
           FENCE_LAST_SCHEDULED_INSN (f) = NULL;
         }
       else
         if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn))
           {
-            /* Would be weird if same insn is successor of several fallthrough 
+            /* Would be weird if same insn is successor of several fallthrough
                edges.  */
             gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb
                         != BLOCK_FOR_INSN (last_scheduled_insn_old));
@@ -753,10 +754,10 @@
             delete_deps_context (dc);
             VEC_free (rtx, gc, executing_insns);
             free (ready_ticks);
-  
+
             FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle);
             if (FENCE_EXECUTING_INSNS (f))
-              VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0, 
+              VEC_block_remove (rtx, FENCE_EXECUTING_INSNS (f), 0,
                                 VEC_length (rtx, FENCE_EXECUTING_INSNS (f)));
             if (FENCE_READY_TICKS (f))
               memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f));
@@ -791,13 +792,13 @@
   FENCE_SCHED_NEXT (f) = NULL;
 }
 
-/* Add a new fence to NEW_FENCES list, initializing it from all 
+/* Add a new fence to NEW_FENCES list, initializing it from all
    other parameters.  */
 static void
 add_to_fences (flist_tail_t new_fences, insn_t insn,
-               state_t state, deps_t dc, void *tc, rtx last_scheduled_insn, 
-               VEC(rtx, gc) *executing_insns, int *ready_ticks, 
-               int ready_ticks_size, rtx sched_next, int cycle, 
+               state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
+               VEC(rtx, gc) *executing_insns, int *ready_ticks,
+               int ready_ticks_size, rtx sched_next, int cycle,
                int cycle_issued_insns, bool starts_cycle_p, bool after_stall_p)
 {
   fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn);
@@ -805,7 +806,7 @@
   if (! f)
     {
       flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc,
-		 last_scheduled_insn, executing_insns, ready_ticks, 
+		 last_scheduled_insn, executing_insns, ready_ticks,
                  ready_ticks_size, sched_next, cycle, cycle_issued_insns,
 		 starts_cycle_p, after_stall_p);
 
@@ -814,8 +815,8 @@
     }
   else
     {
-      merge_fences (f, insn, state, dc, tc, last_scheduled_insn, 
-                    executing_insns, ready_ticks, ready_ticks_size, 
+      merge_fences (f, insn, state, dc, tc, last_scheduled_insn,
+                    executing_insns, ready_ticks, ready_ticks_size,
                     sched_next, cycle, after_stall_p);
     }
 }
@@ -828,14 +829,14 @@
   flist_t *tailp = FLIST_TAIL_TAILP (new_fences);
 
   old = FLIST_FENCE (old_fences);
-  f = flist_lookup (FLIST_TAIL_HEAD (new_fences), 
+  f = flist_lookup (FLIST_TAIL_HEAD (new_fences),
                     FENCE_INSN (FLIST_FENCE (old_fences)));
   if (f)
     {
       merge_fences (f, old->insn, old->state, old->dc, old->tc,
                     old->last_scheduled_insn, old->executing_insns,
                     old->ready_ticks, old->ready_ticks_size,
-                    old->sched_next, old->cycle, 
+                    old->sched_next, old->cycle,
                     old->after_stall_p);
     }
   else
@@ -848,37 +849,37 @@
   FENCE_INSN (old) = NULL;
 }
 
-/* Add a new fence to NEW_FENCES list and initialize most of its data 
+/* Add a new fence to NEW_FENCES list and initialize most of its data
    as a clean one.  */
 void
 add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
 {
   int ready_ticks_size = get_max_uid () + 1;
-  
+
   add_to_fences (new_fences,
                  succ, state_create (), create_deps_context (),
                  create_target_context (true),
-                 NULL_RTX, NULL, 
+                 NULL_RTX, NULL,
                  XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
                  NULL_RTX, FENCE_CYCLE (fence) + 1,
                  0, 1, FENCE_AFTER_STALL_P (fence));
 }
 
-/* Add a new fence to NEW_FENCES list and initialize all of its data 
+/* Add a new fence to NEW_FENCES list and initialize all of its data
    from FENCE and SUCC.  */
 void
 add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence)
 {
-  int * new_ready_ticks 
+  int * new_ready_ticks
     = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence));
-  
+
   memcpy (new_ready_ticks, FENCE_READY_TICKS (fence),
           FENCE_READY_TICKS_SIZE (fence) * sizeof (int));
   add_to_fences (new_fences,
                  succ, state_create_copy (FENCE_STATE (fence)),
                  create_copy_of_deps_context (FENCE_DC (fence)),
                  create_copy_of_target_context (FENCE_TC (fence)),
-                 FENCE_LAST_SCHEDULED_INSN (fence), 
+                 FENCE_LAST_SCHEDULED_INSN (fence),
                  VEC_copy (rtx, gc, FENCE_EXECUTING_INSNS (fence)),
                  new_ready_ticks,
                  FENCE_READY_TICKS_SIZE (fence),
@@ -958,19 +959,19 @@
     regset *v = regset_pool.v;
     int i = 0;
     int n = regset_pool.n;
-    
+
     regset *vv = regset_pool.vv;
     int ii = 0;
     int nn = regset_pool.nn;
-    
+
     int diff = 0;
-    
+
     gcc_assert (n <= nn);
-    
+
     /* Sort both vectors so it will be possible to compare them.  */
     qsort (v, n, sizeof (*v), cmp_v_in_regset_pool);
     qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool);
-    
+
     while (ii < nn)
       {
         if (v[i] == vv[ii])
@@ -978,17 +979,17 @@
         else
           /* VV[II] was lost.  */
           diff++;
-        
+
         ii++;
       }
-    
+
     gcc_assert (diff == regset_pool.diff);
   }
 #endif
-  
+
   /* If not true - we have a memory leak.  */
   gcc_assert (regset_pool.diff == 0);
-  
+
   while (regset_pool.n)
     {
       --regset_pool.n;
@@ -998,7 +999,7 @@
   free (regset_pool.v);
   regset_pool.v = NULL;
   regset_pool.s = 0;
-  
+
   free (regset_pool.vv);
   regset_pool.vv = NULL;
   regset_pool.nn = 0;
@@ -1008,8 +1009,8 @@
 }
 
 
-/* Functions to work with nop pools.  NOP insns are used as temporary 
-   placeholders of the insns being scheduled to allow correct update of 
+/* Functions to work with nop pools.  NOP insns are used as temporary
+   placeholders of the insns being scheduled to allow correct update of
    the data sets.  When update is finished, NOPs are deleted.  */
 
 /* A vinsn that is used to represent a nop.  This vinsn is shared among all
@@ -1044,13 +1045,13 @@
 
 /* Remove NOP from the instruction stream and return it to the pool.  */
 void
-return_nop_to_pool (insn_t nop)
+return_nop_to_pool (insn_t nop, bool full_tidying)
 {
   gcc_assert (INSN_IN_STREAM_P (nop));
-  sel_remove_insn (nop, false, true);
+  sel_remove_insn (nop, false, full_tidying);
 
   if (nop_pool.n == nop_pool.s)
-    nop_pool.v = XRESIZEVEC (rtx, nop_pool.v, 
+    nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
                              (nop_pool.s = 2 * nop_pool.s + 1));
   nop_pool.v[nop_pool.n++] = nop;
 }
@@ -1066,7 +1067,7 @@
 }
 
 
-/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.  
+/* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb.
    The callback is given two rtxes XX and YY and writes the new rtxes
    to NX and NY in case some needs to be skipped.  */
 static int
@@ -1074,7 +1075,7 @@
 {
   const_rtx x = *xx;
   const_rtx y = *yy;
-  
+
   if (GET_CODE (x) == UNSPEC
       && (targetm.sched.skip_rtx_p == NULL
           || targetm.sched.skip_rtx_p (x)))
@@ -1083,7 +1084,7 @@
       *ny = CONST_CAST_RTX (y);
       return 1;
     }
-  
+
   if (GET_CODE (y) == UNSPEC
       && (targetm.sched.skip_rtx_p == NULL
           || targetm.sched.skip_rtx_p (y)))
@@ -1092,26 +1093,26 @@
       *ny = XVECEXP (y, 0, 0);
       return 1;
     }
-  
+
   return 0;
 }
 
-/* Callback, called from hash_rtx_cb.  Helps to hash UNSPEC rtx X in a correct way 
+/* Callback, called from hash_rtx_cb.  Helps to hash UNSPEC rtx X in a correct way
    to support ia64 speculation.  When changes are needed, new rtx X and new mode
    NMODE are written, and the callback returns true.  */
 static int
 hash_with_unspec_callback (const_rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
                            rtx *nx, enum machine_mode* nmode)
 {
-  if (GET_CODE (x) == UNSPEC 
+  if (GET_CODE (x) == UNSPEC
       && targetm.sched.skip_rtx_p
       && targetm.sched.skip_rtx_p (x))
     {
       *nx = XVECEXP (x, 0 ,0);
-      *nmode = 0;
+      *nmode = VOIDmode;
       return 1;
     }
-  
+
   return 0;
 }
 
@@ -1122,9 +1123,9 @@
   if (lhs == NULL || rhs == NULL)
     return false;
 
-  /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point 
-     to use reg, if const can be used.  Moreover, scheduling const as rhs may 
-     lead to mode mismatch cause consts don't have modes but they could be 
+  /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
+     to use reg, if const can be used.  Moreover, scheduling const as rhs may
+     lead to mode mismatch cause consts don't have modes but they could be
      merged from branches where the same const used in different modes.  */
   if (CONSTANT_P (rhs))
     return false;
@@ -1137,7 +1138,7 @@
   if (REG_P (rhs))
     return false;
 
-  /* See comment at find_used_regs_1 (*1) for explanation of this 
+  /* See comment at find_used_regs_1 (*1) for explanation of this
      restriction.  */
   /* FIXME: remove this later.  */
   if (MEM_P (lhs))
@@ -1151,8 +1152,8 @@
   return true;
 }
 
-/* Initialize vinsn VI for INSN.  Only for use from vinsn_create ().  When 
-   FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable.  This is 
+/* Initialize vinsn VI for INSN.  Only for use from vinsn_create ().  When
+   FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable.  This is
    used e.g. for insns from recovery blocks.  */
 static void
 vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p)
@@ -1163,12 +1164,12 @@
   VINSN_INSN_RTX (vi) = insn;
   VINSN_COUNT (vi) = 0;
   vi->cost = -1;
-  
+
   if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL)
     init_id_from_df (VINSN_ID (vi), insn, force_unique_p);
   else
     deps_init_id (VINSN_ID (vi), insn, force_unique_p);
-  
+
   /* Hash vinsn depending on whether it is separable or not.  */
   hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL;
   if (VINSN_SEPARABLE_P (vi))
@@ -1187,7 +1188,7 @@
                                      NULL, NULL, false, hrcf);
       VINSN_HASH_RTX (vi) = VINSN_HASH (vi);
     }
-    
+
   insn_class = haifa_classify_insn (insn);
   if (insn_class >= 2
       && (!targetm.sched.get_insn_spec_ds
@@ -1208,7 +1209,7 @@
   VINSN_COUNT (vi)++;
 }
 
-/* Create and init VI from the INSN.  Use UNIQUE_P for determining the correct 
+/* Create and init VI from the INSN.  Use UNIQUE_P for determining the correct
    VINSN_TYPE (VI).  */
 static vinsn_t
 vinsn_create (insn_t insn, bool force_unique_p)
@@ -1221,13 +1222,13 @@
 
 /* Return a copy of VI.  When REATTACH_P is true, detach VI and attach
    the copy.  */
-vinsn_t 
+vinsn_t
 vinsn_copy (vinsn_t vi, bool reattach_p)
 {
   rtx copy;
   bool unique = VINSN_UNIQUE_P (vi);
   vinsn_t new_vi;
-  
+
   copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi));
   new_vi = create_vinsn_from_insn_rtx (copy, unique);
   if (reattach_p)
@@ -1252,7 +1253,7 @@
   free (vi);
 }
 
-/* Indicate that VI is no longer a part of some rtx object.  
+/* Indicate that VI is no longer a part of some rtx object.
    Remove VI if it is no longer needed.  */
 void
 vinsn_detach (vinsn_t vi)
@@ -1359,21 +1360,21 @@
 }
 
 /* Emit new insn after AFTER based on EXPR and SEQNO.  If VINSN is not NULL,
-   take it as a new vinsn instead of EXPR's vinsn.  
-   We simplify insns later, after scheduling region in 
+   take it as a new vinsn instead of EXPR's vinsn.
+   We simplify insns later, after scheduling region in
    simplify_changed_insns.  */
 insn_t
-sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, 
+sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
                               insn_t after)
 {
   expr_t emit_expr;
   insn_t insn;
   int flags;
-  
-  emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), 
+
+  emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr),
                              seqno);
   insn = EXPR_INSN_RTX (emit_expr);
-  add_insn_after (insn, after, BLOCK_FOR_INSN (insn));          
+  add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
 
   flags = INSN_INIT_TODO_SSID;
   if (INSN_LUID (insn) == 0)
@@ -1403,7 +1404,7 @@
   df_insn_change_bb (insn, bb);
   if (BB_END (bb) == after)
     BB_END (bb) = insn;
-      
+
   prepare_insn_expr (insn, seqno);
   return insn;
 }
@@ -1411,14 +1412,14 @@
 
 /* Functions to work with right-hand sides.  */
 
-/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector 
+/* Search for a hash value determined by UID/NEW_VINSN in a sorted vector
    VECT and return true when found.  Use NEW_VINSN for comparison only when
-   COMPARE_VINSNS is true.  Write to INDP the index on which 
-   the search has stopped, such that inserting the new element at INDP will 
+   COMPARE_VINSNS is true.  Write to INDP the index on which
+   the search has stopped, such that inserting the new element at INDP will
    retain VECT's sort order.  */
 static bool
-find_in_history_vect_1 (VEC(expr_history_def, heap) *vect, 
-                        unsigned uid, vinsn_t new_vinsn, 
+find_in_history_vect_1 (VEC(expr_history_def, heap) *vect,
+                        unsigned uid, vinsn_t new_vinsn,
                         bool compare_vinsns, int *indp)
 {
   expr_history_def *arr;
@@ -1436,14 +1437,14 @@
   while (i <= j)
     {
       unsigned auid = arr[i].uid;
-      vinsn_t avinsn = arr[i].new_expr_vinsn; 
+      vinsn_t avinsn = arr[i].new_expr_vinsn;
 
       if (auid == uid
-          /* When undoing transformation on a bookkeeping copy, the new vinsn 
-             may not be exactly equal to the one that is saved in the vector. 
+          /* When undoing transformation on a bookkeeping copy, the new vinsn
+             may not be exactly equal to the one that is saved in the vector.
              This is because the insn whose copy we're checking was possibly
              substituted itself.  */
-          && (! compare_vinsns 
+          && (! compare_vinsns
               || vinsn_equal_p (avinsn, new_vinsn)))
         {
           *indp = i;
@@ -1458,16 +1459,16 @@
   return false;
 }
 
-/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT.  Return 
-   the position found or -1, if no such value is in vector.  
+/* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT.  Return
+   the position found or -1, if no such value is in vector.
    Search also for UIDs of insn's originators, if ORIGINATORS_P is true.  */
 int
-find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn, 
+find_in_history_vect (VEC(expr_history_def, heap) *vect, rtx insn,
                       vinsn_t new_vinsn, bool originators_p)
 {
   int ind;
 
-  if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, 
+  if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn,
                               false, &ind))
     return ind;
 
@@ -1480,18 +1481,18 @@
         if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind))
           return ind;
     }
-  
+
   return -1;
 }
 
-/* Insert new element in a sorted history vector pointed to by PVECT, 
-   if it is not there already.  The element is searched using 
+/* Insert new element in a sorted history vector pointed to by PVECT,
+   if it is not there already.  The element is searched using
    UID/NEW_EXPR_VINSN pair.  TYPE, OLD_EXPR_VINSN and SPEC_DS save
    the history of a transformation.  */
 void
 insert_in_history_vect (VEC (expr_history_def, heap) **pvect,
                         unsigned uid, enum local_trans_type type,
-                        vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, 
+                        vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn,
                         ds_t spec_ds)
 {
   VEC(expr_history_def, heap) *vect = *pvect;
@@ -1505,25 +1506,17 @@
     {
       expr_history_def *phist = VEC_index (expr_history_def, vect, ind);
 
-      /* When merging, either old vinsns are the *same* or, if not, both 
-         old and new vinsns are different pointers.  In the latter case, 
-         though, new vinsns should be equal.  */
-      gcc_assert (phist->old_expr_vinsn == old_expr_vinsn
-                  || (phist->new_expr_vinsn != new_expr_vinsn 
-                      && (vinsn_equal_p 
-                          (phist->old_expr_vinsn, old_expr_vinsn))));
-
-      /* It is possible that speculation types of expressions that were 
+      /* It is possible that speculation types of expressions that were
          propagated through different paths will be different here.  In this
          case, merge the status to get the correct check later.  */
       if (phist->spec_ds != spec_ds)
         phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds);
       return;
     }
-      
+
   temp.uid = uid;
   temp.old_expr_vinsn = old_expr_vinsn;
-  temp.new_expr_vinsn = new_expr_vinsn; 
+  temp.new_expr_vinsn = new_expr_vinsn;
   temp.spec_ds = spec_ds;
   temp.type = type;
 
@@ -1542,15 +1535,15 @@
 
   if (! *pvect)
     return;
-  
-  for (i = 0; 
+
+  for (i = 0;
        VEC_iterate (expr_history_def, *pvect, i, phist);
        i++)
     {
       vinsn_detach (phist->old_expr_vinsn);
       vinsn_detach (phist->new_expr_vinsn);
     }
-  
+
   VEC_free (expr_history_def, heap, *pvect);
   *pvect = NULL;
 }
@@ -1572,7 +1565,7 @@
     return false;
 
   repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL;
-  if (VINSN_SEPARABLE_P (x)) 
+  if (VINSN_SEPARABLE_P (x))
     {
       /* Compare RHSes of VINSNs.  */
       gcc_assert (VINSN_RHS (x));
@@ -1592,7 +1585,7 @@
 init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority,
 	   int sched_times, int orig_bb_index, ds_t spec_done_ds,
 	   ds_t spec_to_check_ds, int orig_sched_cycle,
-	   VEC(expr_history_def, heap) *history, bool target_available, 
+	   VEC(expr_history_def, heap) *history, bool target_available,
            bool was_substituted, bool was_renamed, bool needs_spec_check_p,
            bool cant_move)
 {
@@ -1633,7 +1626,7 @@
       expr_history_def *phist;
 
       temp = VEC_copy (expr_history_def, heap, EXPR_HISTORY_OF_CHANGES (from));
-      for (i = 0; 
+      for (i = 0;
            VEC_iterate (expr_history_def, temp, i, phist);
            i++)
         {
@@ -1642,17 +1635,17 @@
         }
     }
 
-  init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), 
+  init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from),
              EXPR_USEFULNESS (from), EXPR_PRIORITY (from),
 	     EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from),
-	     EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 
+	     EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from),
 	     EXPR_ORIG_SCHED_CYCLE (from), temp,
-             EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), 
+             EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
              EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from),
              EXPR_CANT_MOVE (from));
 }
 
-/* Same, but the final expr will not ever be in av sets, so don't copy 
+/* Same, but the final expr will not ever be in av sets, so don't copy
    "uninteresting" data such as bitmap cache.  */
 void
 copy_expr_onside (expr_t to, expr_t from)
@@ -1672,7 +1665,7 @@
 {
   expr_t expr = INSN_EXPR (insn);
   ds_t ds;
-  
+
   INSN_SEQNO (insn) = seqno;
   EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn);
   EXPR_SPEC (expr) = 0;
@@ -1695,12 +1688,12 @@
 }
 
 /* Update target_available bits when merging exprs TO and FROM.  SPLIT_POINT
-   is non-null when expressions are merged from different successors at 
+   is non-null when expressions are merged from different successors at
    a split point.  */
 static void
 update_target_availability (expr_t to, expr_t from, insn_t split_point)
 {
-  if (EXPR_TARGET_AVAILABLE (to) < 0  
+  if (EXPR_TARGET_AVAILABLE (to) < 0
       || EXPR_TARGET_AVAILABLE (from) < 0)
     EXPR_TARGET_AVAILABLE (to) = -1;
   else
@@ -1714,9 +1707,9 @@
 
           toind = EXPR_ORIG_BB_INDEX (to);
           fromind = EXPR_ORIG_BB_INDEX (from);
-          
+
           if (toind && toind == fromind)
-            /* Do nothing -- everything is done in 
+            /* Do nothing -- everything is done in
                merge_with_other_exprs.  */
             ;
           else
@@ -1728,7 +1721,7 @@
 }
 
 /* Update speculation bits when merging exprs TO and FROM.  SPLIT_POINT
-   is non-null when expressions are merged from different successors at 
+   is non-null when expressions are merged from different successors at
    a split point.  */
 static void
 update_speculative_bits (expr_t to, expr_t from, insn_t split_point)
@@ -1737,30 +1730,30 @@
 
   old_to_ds = EXPR_SPEC_DONE_DS (to);
   old_from_ds = EXPR_SPEC_DONE_DS (from);
-    
+
   EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds);
   EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from);
   EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from);
 
   /* When merging e.g. control & data speculative exprs, or a control
-     speculative with a control&data speculative one, we really have 
+     speculative with a control&data speculative one, we really have
      to change vinsn too.  Also, when speculative status is changed,
      we also need to record this as a transformation in expr's history.  */
   if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE))
     {
       old_to_ds = ds_get_speculation_types (old_to_ds);
       old_from_ds = ds_get_speculation_types (old_from_ds);
-        
+
       if (old_to_ds != old_from_ds)
         {
           ds_t record_ds;
-            
-          /* When both expressions are speculative, we need to change 
+
+          /* When both expressions are speculative, we need to change
              the vinsn first.  */
           if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE))
             {
               int res;
-                
+
               res = speculate_expr (to, EXPR_SPEC_DONE_DS (to));
               gcc_assert (res >= 0);
             }
@@ -1771,9 +1764,9 @@
               record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE;
               record_ds &= ~(old_to_ds & SPECULATIVE);
               record_ds &= ~(old_from_ds & SPECULATIVE);
-                
-              insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), 
-                                      INSN_UID (split_point), TRANS_SPECULATION, 
+
+              insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
+                                      INSN_UID (split_point), TRANS_SPECULATION,
                                       EXPR_VINSN (from), EXPR_VINSN (to),
                                       record_ds);
             }
@@ -1789,7 +1782,7 @@
 {
   int i;
   expr_history_def *phist;
-  
+
   /* For now, we just set the spec of resulting expr to be minimum of the specs
      of merged exprs.  */
   if (EXPR_SPEC (to) > EXPR_SPEC (from))
@@ -1798,7 +1791,7 @@
   if (split_point)
     EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from);
   else
-    EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), 
+    EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to),
                                 EXPR_USEFULNESS (from));
 
   if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from))
@@ -1810,17 +1803,17 @@
   if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from))
     EXPR_ORIG_BB_INDEX (to) = 0;
 
-  EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), 
+  EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to),
                                     EXPR_ORIG_SCHED_CYCLE (from));
 
   /* We keep this vector sorted.  */
-  for (i = 0; 
-       VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from), 
+  for (i = 0;
+       VEC_iterate (expr_history_def, EXPR_HISTORY_OF_CHANGES (from),
                     i, phist);
        i++)
-    insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), 
-                            phist->uid, phist->type, 
-                            phist->old_expr_vinsn, phist->new_expr_vinsn, 
+    insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to),
+                            phist->uid, phist->type,
+                            phist->old_expr_vinsn, phist->new_expr_vinsn,
                             phist->spec_ds);
 
   EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
@@ -1832,7 +1825,7 @@
 }
 
 /* Merge bits of FROM expr to TO expr.  Vinsns in the exprs should be equal
-   in terms of vinsn_equal_p.  SPLIT_POINT is non-null when expressions 
+   in terms of vinsn_equal_p.  SPLIT_POINT is non-null when expressions
    are merged from different successors at a split point.  */
 void
 merge_expr (expr_t to, expr_t from, insn_t split_point)
@@ -1857,7 +1850,7 @@
 void
 clear_expr (expr_t expr)
 {
- 
+
   vinsn_detach (EXPR_VINSN (expr));
   EXPR_VINSN (expr) = NULL;
 
@@ -1873,17 +1866,17 @@
       if (REG_P (EXPR_LHS (expr))
           && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
 	{
-	  /* If it's an insn like r1 = use (r1, ...), and it exists in 
-	     different forms in each of the av_sets being merged, we can't say 
-	     whether original destination register is available or not.  
-	     However, this still works if destination register is not used 
+	  /* If it's an insn like r1 = use (r1, ...), and it exists in
+	     different forms in each of the av_sets being merged, we can't say
+	     whether original destination register is available or not.
+	     However, this still works if destination register is not used
 	     in the original expression: if the branch at which LV_SET we're
 	     looking here is not actually 'other branch' in sense that same
-	     expression is available through it (but it can't be determined 
+	     expression is available through it (but it can't be determined
 	     at computation stage because of transformations on one of the
-	     branches), it still won't affect the availability.  
-	     Liveness of a register somewhere on a code motion path means 
-	     it's either read somewhere on a codemotion path, live on 
+	     branches), it still won't affect the availability.
+	     Liveness of a register somewhere on a code motion path means
+	     it's either read somewhere on a codemotion path, live on
 	     'other' branch, live at the point immediately following
 	     the original operation, or is read by the original operation.
 	     The latter case is filtered out in the condition below.
@@ -1892,7 +1885,7 @@
 	     miss a unifying code motion along both branches using a renamed
 	     register, but it won't affect a code correctness since upon
 	     an actual code motion a bookkeeping code would be generated.  */
-	  if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), 
+	  if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
 			    REGNO (EXPR_LHS (expr))))
 	    EXPR_TARGET_AVAILABLE (expr) = -1;
 	  else
@@ -1903,8 +1896,8 @@
     {
       unsigned regno;
       reg_set_iterator rsi;
-      
-      EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), 
+
+      EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)),
                                  0, regno, rsi)
         if (bitmap_bit_p (lv_set, regno))
           {
@@ -1922,7 +1915,7 @@
     }
 }
 
-/* Try to make EXPR speculative.  Return 1 when EXPR's pattern 
+/* Try to make EXPR speculative.  Return 1 when EXPR's pattern
    or dependence status have changed, 2 when also the target register
    became unavailable, 0 if nothing had to be changed.  */
 int
@@ -1947,7 +1940,7 @@
     case 0:
       EXPR_SPEC_DONE_DS (expr) = ds;
       return current_ds != ds ? 1 : 0;
-      
+
     case 1:
       {
 	rtx spec_insn_rtx = create_insn_rtx_from_pattern (spec_pat, NULL_RTX);
@@ -1957,9 +1950,9 @@
 	EXPR_SPEC_DONE_DS (expr) = ds;
         EXPR_NEEDS_SPEC_CHECK_P (expr) = true;
 
-        /* Do not allow clobbering the address register of speculative 
+        /* Do not allow clobbering the address register of speculative
            insns.  */
-        if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), 
+        if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
                           expr_dest_regno (expr)))
           {
             EXPR_TARGET_AVAILABLE (expr) = false;
@@ -2000,7 +1993,7 @@
   return REGNO (dest);
 }
 
-/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in 
+/* For a given LV_SET, mark all expressions in JOIN_SET, but not present in
    AV_SET having unavailable target register.  */
 void
 mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
@@ -2031,7 +2024,7 @@
 av_set_add (av_set_t *setp, expr_t expr)
 {
   av_set_t elem;
-  
+
   gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
   elem = av_set_add_element (setp);
   copy_expr (_AV_SET_EXPR (elem), expr);
@@ -2121,10 +2114,10 @@
       EXPR_USEFULNESS (expr2) = 0;
 
       merge_expr (expr2, expr, NULL);
-      
+
       /* Fix usefulness as it should be now REG_BR_PROB_BASE.  */
       EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE;
-      
+
       av_set_iter_remove (ip);
       return expr2;
     }
@@ -2187,7 +2180,7 @@
   join_distinct_sets (i.lp, fromp);
 }
 
-/* Same as above, but also update availability of target register in 
+/* Same as above, but also update availability of target register in
    TOP judging by TO_LV_SET and FROM_LV_SET.  */
 void
 av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set,
@@ -2204,16 +2197,16 @@
 
       if (expr2)
 	{
-          /* It may be that the expressions have different destination 
+          /* It may be that the expressions have different destination
              registers, in which case we need to check liveness here.  */
           if (EXPR_SEPARABLE_P (expr1))
             {
-              int regno1 = (REG_P (EXPR_LHS (expr1)) 
+              int regno1 = (REG_P (EXPR_LHS (expr1))
                             ? (int) expr_dest_regno (expr1) : -1);
-              int regno2 = (REG_P (EXPR_LHS (expr2)) 
+              int regno2 = (REG_P (EXPR_LHS (expr2))
                             ? (int) expr_dest_regno (expr2) : -1);
-              
-              /* ??? We don't have a way to check restrictions for 
+
+              /* ??? We don't have a way to check restrictions for
                *other* register on the current path, we did it only
                for the current target register.  Give up.  */
               if (regno1 != regno2)
@@ -2227,7 +2220,7 @@
 	  av_set_iter_remove (&i);
 	}
       else
-        /* EXPR1 is present in TOP, but not in FROMP.  Check it on 
+        /* EXPR1 is present in TOP, but not in FROMP.  Check it on
            FROM_LV_SET.  */
         set_unavailable_target_for_expr (expr1, from_lv_set);
     }
@@ -2263,7 +2256,7 @@
   av_set_iterator i;
   bool has_one_nonspec = false;
 
-  /* Keep all speculative exprs, and leave one non-speculative 
+  /* Keep all speculative exprs, and leave one non-speculative
      (the first one).  */
   FOR_EACH_EXPR_1 (expr, i, setp)
     {
@@ -2304,7 +2297,7 @@
       av_set_iter_remove (&i);
 }
 
-/* Multiplies usefulness attribute of each member of av-set *AVP by 
+/* Multiplies usefulness attribute of each member of av-set *AVP by
    value PROB / ALL_PROB.  */
 void
 av_set_split_usefulness (av_set_t av, int prob, int all_prob)
@@ -2313,7 +2306,7 @@
   expr_t expr;
 
   FOR_EACH_EXPR (expr, i, av)
-    EXPR_USEFULNESS (expr) = (all_prob 
+    EXPR_USEFULNESS (expr) = (all_prob
                               ? (EXPR_USEFULNESS (expr) * prob) / all_prob
                               : 0);
 }
@@ -2353,13 +2346,13 @@
 } deps_init_id_data;
 
 
-/* Setup ID for INSN.  FORCE_UNIQUE_P is true when INSN should not be 
+/* Setup ID for INSN.  FORCE_UNIQUE_P is true when INSN should not be
    clonable.  */
 static void
 setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p)
 {
   int type;
-  
+
   /* Determine whether INSN could be cloned and return appropriate vinsn type.
      That clonable insns which can be separated into lhs and rhs have type SET.
      Other clonable insns have type USE.  */
@@ -2370,7 +2363,9 @@
     type = SET;
   else if (type == JUMP_INSN && simplejump_p (insn))
     type = PC;
-  
+  else if (type == DEBUG_INSN)
+    type = !force_unique_p ? USE : INSN;
+
   IDATA_TYPE (id) = type;
   IDATA_REG_SETS (id) = get_clear_regset_from_pool ();
   IDATA_REG_USES (id) = get_clear_regset_from_pool ();
@@ -2422,7 +2417,7 @@
     SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
 
 #ifdef STACK_REGS
-  /* Make instructions that set stack registers to be ineligible for 
+  /* Make instructions that set stack registers to be ineligible for
      renaming to avoid issues with find_used_regs.  */
   if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
     deps_init_id_data.force_use_p = true;
@@ -2489,9 +2484,9 @@
       if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs)
 	  || deps_init_id_data.force_use_p)
 	{
-          /* This should be a USE, as we don't want to schedule its RHS 
+          /* This should be a USE, as we don't want to schedule its RHS
              separately.  However, we still want to have them recorded
-             for the purposes of substitution.  That's why we don't 
+             for the purposes of substitution.  That's why we don't
              simply call downgrade_to_use () here.  */
 	  gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET);
 	  gcc_assert (!lhs == !rhs);
@@ -2534,9 +2529,9 @@
 setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p)
 {
   rtx pat = PATTERN (insn);
-  
-  if (GET_CODE (insn) == INSN
-      && GET_CODE (pat) == SET 
+
+  if (NONJUMP_INSN_P (insn)
+      && GET_CODE (pat) == SET
       && !force_unique_p)
     {
       IDATA_RHS (id) = SET_SRC (pat);
@@ -2555,7 +2550,7 @@
   df_ref *rec;
   rtx lhs = IDATA_LHS (id);
   rtx rhs = IDATA_RHS (id);
-  
+
   /* We downgrade only SETs.  */
   if (IDATA_TYPE (id) != SET)
     return;
@@ -2565,11 +2560,11 @@
       IDATA_TYPE (id) = USE;
       return;
     }
-  
+
   for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
     {
       df_ref def = *rec;
-      
+
       if (DF_REF_INSN (def)
           && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY)
           && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id)))
@@ -2579,7 +2574,7 @@
         }
 
 #ifdef STACK_REGS
-      /* Make instructions that set stack registers to be ineligible for 
+      /* Make instructions that set stack registers to be ineligible for
 	 renaming to avoid issues with find_used_regs.  */
       if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
 	{
@@ -2587,8 +2582,8 @@
 	  break;
 	}
 #endif
-    }    
-  
+    }
+
   if (must_be_use)
     IDATA_TYPE (id) = USE;
 }
@@ -2600,12 +2595,12 @@
   unsigned uid = INSN_UID (insn);
   df_ref *rec;
   regset tmp = get_clear_regset_from_pool ();
-  
+
   for (rec = DF_INSN_UID_DEFS (uid); *rec; rec++)
     {
       df_ref def = *rec;
       unsigned int regno = DF_REF_REGNO (def);
-      
+
       /* Post modifies are treated like clobbers by sched-deps.c.  */
       if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER
                                      | DF_REF_PRE_POST_MODIFY)))
@@ -2615,7 +2610,7 @@
 	  SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
 
 #ifdef STACK_REGS
-	  /* For stack registers, treat writes to them as writes 
+	  /* For stack registers, treat writes to them as writes
 	     to the first one to be consistent with sched-deps.c.  */
 	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
 	    SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
@@ -2626,7 +2621,7 @@
           || regno == STACK_POINTER_REGNUM)
         bitmap_set_bit (tmp, regno);
     }
-      
+
   for (rec = DF_INSN_UID_USES (uid); *rec; rec++)
     {
       df_ref use = *rec;
@@ -2641,7 +2636,7 @@
 	  SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
 
 #ifdef STACK_REGS
-	  /* For stack registers, treat reads from them as reads from 
+	  /* For stack registers, treat reads from them as reads from
 	     the first one to be consistent with sched-deps.c.  */
 	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
 	    SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
@@ -2679,7 +2674,7 @@
   deps_init_id_data.force_unique_p = force_unique_p;
   deps_init_id_data.force_use_p = false;
 
-  init_deps (dc);
+  init_deps (dc, false);
 
   memcpy (&deps_init_id_sched_deps_info,
 	  &const_deps_init_id_sched_deps_info,
@@ -2739,7 +2734,7 @@
   free (pti);
 }
 
-/* Init the s_i_d data for INSN which should be inited just once, when 
+/* Init the s_i_d data for INSN which should be inited just once, when
    we first see the insn.  */
 static void
 init_first_time_insn_data (insn_t insn)
@@ -2747,39 +2742,58 @@
   /* This should not be set if this is the first time we init data for
      insn.  */
   gcc_assert (first_time_insn_init (insn));
-  
+
   /* These are needed for nops too.  */
   INSN_LIVE (insn) = get_regset_from_pool ();
   INSN_LIVE_VALID_P (insn) = false;
-  
+
   if (!INSN_NOP_P (insn))
     {
       INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
       INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
-      INSN_TRANSFORMED_INSNS (insn) 
+      INSN_TRANSFORMED_INSNS (insn)
         = htab_create (16, hash_transformed_insns,
                        eq_transformed_insns, free_transformed_insns);
-      init_deps (&INSN_DEPS_CONTEXT (insn));
+      init_deps (&INSN_DEPS_CONTEXT (insn), true);
     }
 }
 
+/* Free almost all above data for INSN that is scheduled already.
+   Used for extra-large basic blocks.  */
+void
+free_data_for_scheduled_insn (insn_t insn)
+{
+  gcc_assert (! first_time_insn_init (insn));
+
+  if (! INSN_ANALYZED_DEPS (insn))
+    return;
+
+  BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
+  BITMAP_FREE (INSN_FOUND_DEPS (insn));
+  htab_delete (INSN_TRANSFORMED_INSNS (insn));
+
+  /* This is allocated only for bookkeeping insns.  */
+  if (INSN_ORIGINATORS (insn))
+    BITMAP_FREE (INSN_ORIGINATORS (insn));
+  free_deps (&INSN_DEPS_CONTEXT (insn));
+
+  INSN_ANALYZED_DEPS (insn) = NULL;
+
+  /* Clear the readonly flag so we would ICE when trying to recalculate
+     the deps context (as we believe that it should not happen).  */
+  (&INSN_DEPS_CONTEXT (insn))->readonly = 0;
+}
+
 /* Free the same data as above for INSN.  */
 static void
 free_first_time_insn_data (insn_t insn)
 {
   gcc_assert (! first_time_insn_init (insn));
 
-  BITMAP_FREE (INSN_ANALYZED_DEPS (insn));
-  BITMAP_FREE (INSN_FOUND_DEPS (insn));
-  htab_delete (INSN_TRANSFORMED_INSNS (insn));
+  free_data_for_scheduled_insn (insn);
   return_regset_to_pool (INSN_LIVE (insn));
   INSN_LIVE (insn) = NULL;
   INSN_LIVE_VALID_P (insn) = false;
-
-  /* This is allocated only for bookkeeping insns.  */
-  if (INSN_ORIGINATORS (insn))
-    BITMAP_FREE (INSN_ORIGINATORS (insn));
-  free_deps (&INSN_DEPS_CONTEXT (insn));
 }
 
 /* Initialize region-scope data structures for basic blocks.  */
@@ -2842,7 +2856,7 @@
     if (CANT_MOVE (insn)
 	|| INSN_ASM_P (insn)
 	|| SCHED_GROUP_P (insn)
-	|| prologue_epilogue_contains (insn) 
+	|| prologue_epilogue_contains (insn)
 	/* Exception handling insns are always unique.  */
 	|| (flag_non_call_exceptions && can_throw_internal (insn))
 	/* TRAP_IF though have an INSN code is control_flow_insn_p ().  */
@@ -2862,7 +2876,7 @@
     /* Initialize INSN's expr.  */
     init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0,
 	       REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn),
-	       spec_done_ds, 0, 0, NULL, true, false, false, false, 
+	       spec_done_ds, 0, 0, NULL, true, false, false, false,
                CANT_MOVE (insn));
   }
 
@@ -2881,7 +2895,7 @@
       extend_insn_data, /* extend_insn */
       init_global_and_expr_for_insn /* init_insn */
     };
-  
+
   sched_scan (&ssi, bbs, NULL, NULL, NULL);
 }
 
@@ -2907,9 +2921,9 @@
       free_first_time_insn_data (insn);
       INSN_WS_LEVEL (insn) = 0;
       CANT_MOVE (insn) = 0;
-      
-      /* We can no longer assert this, as vinsns of this insn could be 
-         easily live in other insn's caches.  This should be changed to 
+
+      /* We can no longer assert this, as vinsns of this insn could be
+         easily live in other insn's caches.  This should be changed to
          a counter-like approach among all vinsns.  */
       gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1);
       clear_expr (INSN_EXPR (insn));
@@ -2949,8 +2963,8 @@
 }
 
 
-/* In the below hooks, we merely calculate whether or not a dependence 
-   exists, and in what part of insn.  However, we will need more data 
+/* In the below hooks, we merely calculate whether or not a dependence
+   exists, and in what part of insn.  However, we will need more data
    when we'll start caching dependence requests.  */
 
 /* Container to hold information for dependency analysis.  */
@@ -3063,7 +3077,7 @@
 
       if (reg_last->sets)
 	*dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT;
-	
+
       if (reg_last->uses)
 	*dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI;
     }
@@ -3216,6 +3230,11 @@
     return false;
 
   dc = &INSN_DEPS_CONTEXT (pred);
+
+  /* We init this field lazily.  */
+  if (dc->reg_last == NULL)
+    init_deps_reg_last (dc);
+
   if (!dc->readonly)
     {
       has_dependence_data.pro = NULL;
@@ -3245,13 +3264,13 @@
 
   /* Do not allow stores to memory to move through checks.  Currently
      we don't move this to sched-deps.c as the check doesn't have
-     obvious places to which this dependence can be attached.  
+     obvious places to which this dependence can be attached.
      FIMXE: this should go to a hook.  */
   if (EXPR_LHS (expr)
       && MEM_P (EXPR_LHS (expr))
       && sel_insn_is_speculation_check (pred))
     has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
-  
+
   *has_dep_pp = has_dependence_data.has_dep_p;
   ds = 0;
   for (i = 0; i < DEPS_IN_NOWHERE; i++)
@@ -3262,9 +3281,9 @@
 }
 
 
-/* Dependence hooks implementation that checks dependence latency constraints 
-   on the insns being scheduled.  The entry point for these routines is 
-   tick_check_p predicate.  */ 
+/* Dependence hooks implementation that checks dependence latency constraints
+   on the insns being scheduled.  The entry point for these routines is
+   tick_check_p predicate.  */
 
 static struct
 {
@@ -3374,7 +3393,7 @@
   tick_check_data.cycle = 0;
   tick_check_data.seen_true_dep_p = false;
   sched_deps_info = &tick_check_sched_deps_info;
-  
+
   gcc_assert (!dc->readonly);
   dc->readonly = 1;
   deps_analyze_insn (dc, EXPR_INSN_RTX (expr));
@@ -3397,7 +3416,7 @@
 
   if (lhs == NULL || dest == NULL)
     return false;
-  
+
   return rtx_equal_p (lhs, dest);
 }
 
@@ -3417,7 +3436,7 @@
   return s_i_d && !! INSN_SPEC_CHECKED_DS (insn);
 }
 
-/* Extracts machine mode MODE and destination location DST_LOC 
+/* Extracts machine mode MODE and destination location DST_LOC
    for given INSN.  */
 void
 get_dest_and_mode (rtx insn, rtx *dst_loc, enum machine_mode *mode)
@@ -3436,7 +3455,7 @@
     *mode = GET_MODE (*dst_loc);
 }
 
-/* Returns true when moving through JUMP will result in bookkeeping 
+/* Returns true when moving through JUMP will result in bookkeeping
    creation.  */
 bool
 bookkeeping_can_be_created_if_moved_through_p (insn_t jump)
@@ -3459,7 +3478,7 @@
 }
 
 #ifdef ENABLE_CHECKING
-/* Check that the region we're scheduling still has at most one 
+/* Check that the region we're scheduling still has at most one
    backedge.  */
 static void
 verify_backedges (void)
@@ -3469,13 +3488,13 @@
       int i, n = 0;
       edge e;
       edge_iterator ei;
-          
+
       for (i = 0; i < current_nr_blocks; i++)
         FOR_EACH_EDGE (e, ei, BASIC_BLOCK (BB_TO_BLOCK (i))->succs)
           if (in_current_region_p (e->dest)
               && BLOCK_TO_BB (e->dest->index) < i)
             n++;
-          
+
       gcc_assert (n <= 1);
     }
 }
@@ -3489,17 +3508,24 @@
 maybe_tidy_empty_bb (basic_block bb)
 {
   basic_block succ_bb, pred_bb;
+  edge e;
+  edge_iterator ei;
   bool rescan_p;
 
   /* Keep empty bb only if this block immediately precedes EXIT and
      has incoming non-fallthrough edge.  Otherwise remove it.  */
-  if (!sel_bb_empty_p (bb) 
-      || (single_succ_p (bb) 
+  if (!sel_bb_empty_p (bb)
+      || (single_succ_p (bb)
           && single_succ (bb) == EXIT_BLOCK_PTR
-          && (!single_pred_p (bb) 
+          && (!single_pred_p (bb)
               || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))))
     return false;
 
+  /* Do not attempt to redirect complex edges.  */
+  FOR_EACH_EDGE (e, ei, bb->preds)
+    if (e->flags & EDGE_COMPLEX)
+      return false;
+
   free_data_sets (bb);
 
   /* Do not delete BB if it has more than one successor.
@@ -3518,9 +3544,6 @@
   /* Redirect all non-fallthru edges to the next bb.  */
   while (rescan_p)
     {
-      edge e;
-      edge_iterator ei;
-
       rescan_p = false;
 
       FOR_EACH_EDGE (e, ei, bb->preds)
@@ -3556,19 +3579,20 @@
   return true;
 }
 
-/* Tidy the control flow after we have removed original insn from 
+/* Tidy the control flow after we have removed original insn from
    XBB.  Return true if we have removed some blocks.  When FULL_TIDYING
    is true, also try to optimize control flow on non-empty blocks.  */
 bool
 tidy_control_flow (basic_block xbb, bool full_tidying)
 {
   bool changed = true;
-  
+  insn_t first, last;
+
   /* First check whether XBB is empty.  */
   changed = maybe_tidy_empty_bb (xbb);
   if (changed || !full_tidying)
     return changed;
-  
+
   /* Check if there is a unnecessary jump after insn left.  */
   if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
       && INSN_SCHED_TIMES (BB_END (xbb)) == 0
@@ -3579,16 +3603,30 @@
       tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
     }
 
+  first = sel_bb_head (xbb);
+  last = sel_bb_end (xbb);
+  if (MAY_HAVE_DEBUG_INSNS)
+    {
+      if (first != last && DEBUG_INSN_P (first))
+	do
+	  first = NEXT_INSN (first);
+	while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first)));
+
+      if (first != last && DEBUG_INSN_P (last))
+	do
+	  last = PREV_INSN (last);
+	while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last)));
+    }
   /* Check if there is an unnecessary jump in previous basic block leading
-     to next basic block left after removing INSN from stream.  
-     If it is so, remove that jump and redirect edge to current 
-     basic block (where there was INSN before deletion).  This way 
-     when NOP will be deleted several instructions later with its 
-     basic block we will not get a jump to next instruction, which 
+     to next basic block left after removing INSN from stream.
+     If it is so, remove that jump and redirect edge to current
+     basic block (where there was INSN before deletion).  This way
+     when NOP will be deleted several instructions later with its
+     basic block we will not get a jump to next instruction, which
      can be harmful.  */
-  if (sel_bb_head (xbb) == sel_bb_end (xbb) 
+  if (first == last
       && !sel_bb_empty_p (xbb)
-      && INSN_NOP_P (sel_bb_end (xbb))
+      && INSN_NOP_P (last)
       /* Flow goes fallthru from current block to the next.  */
       && EDGE_COUNT (xbb->succs) == 1
       && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU)
@@ -3618,8 +3656,8 @@
   return changed;
 }
 
-/* Rip-off INSN from the insn stream.  When ONLY_DISCONNECT is true, 
-   do not delete insn's data, because it will be later re-emitted.  
+/* Rip-off INSN from the insn stream.  When ONLY_DISCONNECT is true,
+   do not delete insn's data, because it will be later re-emitted.
    Return true if we have removed some blocks afterwards.  */
 bool
 sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying)
@@ -3628,6 +3666,21 @@
 
   gcc_assert (INSN_IN_STREAM_P (insn));
 
+  if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb))
+    {
+      expr_t expr;
+      av_set_iterator i;
+
+      /* When we remove a debug insn that is head of a BB, it remains
+	 in the AV_SET of the block, but it shouldn't.  */
+      FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb))
+	if (EXPR_INSN_RTX (expr) == insn)
+	  {
+	    av_set_iter_remove (&i);
+	    break;
+	  }
+    }
+
   if (only_disconnect)
     {
       insn_t prev = PREV_INSN (insn);
@@ -3666,7 +3719,7 @@
   insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb));
 
   for (; insn != next_tail; insn = NEXT_INSN (insn))
-    if (INSN_P (insn))
+    if (NONDEBUG_INSN_P (insn))
       res++;
 
   return res;
@@ -3699,15 +3752,15 @@
 	  && !in_current_region_p (single_pred (bb)))
 	{
           /* We can have preds outside a region when splitting edges
-             for pipelining of an outer loop.  Use succ instead.  
+             for pipelining of an outer loop.  Use succ instead.
              There should be only one of them.  */
 	  insn_t succ = NULL;
           succ_iterator si;
           bool first = true;
-          
+
 	  gcc_assert (flag_sel_sched_pipelining_outer_loops
 		      && current_loop_nest);
-          FOR_EACH_SUCC_1 (succ, si, insn, 
+          FOR_EACH_SUCC_1 (succ, si, insn,
                            SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
             {
               gcc_assert (first);
@@ -3726,7 +3779,7 @@
 	  gcc_assert (n == 1);
 
 	  seqno = INSN_SEQNO (preds[0]);
-              
+
 	  free (preds);
 	}
     }
@@ -3734,7 +3787,8 @@
   return seqno;
 }
 
-/*  Find the proper seqno for inserting at INSN.  */
+/*  Find the proper seqno for inserting at INSN.  Returns -1 if no predecessors
+    with positive seqno exist.  */
 int
 get_seqno_by_preds (rtx insn)
 {
@@ -3748,12 +3802,11 @@
       return INSN_SEQNO (tmp);
     else
       tmp = PREV_INSN (tmp);
-  
+
   cfg_preds (bb, &preds, &n);
   for (i = 0, seqno = -1; i < n; i++)
     seqno = MAX (seqno, INSN_SEQNO (preds[i]));
 
-  gcc_assert (seqno > 0);
   return seqno;
 }
 
@@ -3809,17 +3862,26 @@
 extend_insn_data (void)
 {
   int reserve;
-  
+
   sched_extend_target ();
   sched_deps_init (false);
 
   /* Extend data structures for insns from current region.  */
-  reserve = (sched_max_luid + 1 
+  reserve = (sched_max_luid + 1
              - VEC_length (sel_insn_data_def, s_i_d));
-  if (reserve > 0 
+  if (reserve > 0
       && ! VEC_space (sel_insn_data_def, s_i_d, reserve))
-    VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d,
-                           3 * sched_max_luid / 2);
+    {
+      int size;
+
+      if (sched_max_luid / 2 > 1024)
+        size = sched_max_luid + 1024;
+      else
+        size = 3 * sched_max_luid / 2;
+
+
+      VEC_safe_grow_cleared (sel_insn_data_def, heap, s_i_d, size);
+    }
 }
 
 /* Finalize data structures for insns from current region.  */
@@ -3833,7 +3895,7 @@
   for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
     {
       sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
-      
+
       if (sid_entry->live)
         return_regset_to_pool (sid_entry->live);
       if (sid_entry->analyzed_deps)
@@ -3846,13 +3908,13 @@
       if (EXPR_VINSN (&sid_entry->expr))
         {
           clear_expr (&sid_entry->expr);
-          
+
           /* Also, clear CANT_MOVE bit here, because we really don't want it
              to be passed to the next region.  */
           CANT_MOVE_BY_LUID (i) = 0;
         }
     }
-  
+
   VEC_free (sel_insn_data_def, heap, s_i_d);
 }
 
@@ -3901,7 +3963,7 @@
 
   if (insn_init_create_new_vinsn_p)
     change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p));
-  
+
   if (first_time_insn_init (insn))
     init_first_time_insn_data (insn);
 }
@@ -3912,13 +3974,13 @@
 init_simplejump_data (insn_t insn)
 {
   init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0,
-	     REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false, 
+	     REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, NULL, true, false, false,
 	     false, true);
   INSN_SEQNO (insn) = get_seqno_of_a_pred (insn);
   init_first_time_insn_data (insn);
 }
 
-/* Perform deferred initialization of insns.  This is used to process 
+/* Perform deferred initialization of insns.  This is used to process
    a new jump that may be created by redirect_edge.  */
 void
 sel_init_new_insn (insn_t insn, int flags)
@@ -3931,7 +3993,7 @@
       extend_bb_info ();
       create_initial_data_sets (BLOCK_FOR_INSN (insn));
     }
-  
+
   if (flags & INSN_INIT_TODO_LUID)
     sched_init_luids (NULL, NULL, NULL, insn);
 
@@ -3947,7 +4009,7 @@
       extend_insn_data ();
       init_simplejump_data (insn);
     }
-  
+
   gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn))
               == CONTAINING_RGN (BB_TO_BLOCK (0)));
 }
@@ -3962,7 +4024,7 @@
   gcc_assert (!BB_LV_SET_VALID_P (bb));
 
   BB_LV_SET (bb) = get_regset_from_pool ();
-  COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); 
+  COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb));
   BB_LV_SET_VALID_P (bb) = true;
 }
 
@@ -3971,10 +4033,10 @@
 copy_lv_set_from (basic_block bb, basic_block from_bb)
 {
   gcc_assert (!BB_LV_SET_VALID_P (bb));
-  
+
   COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
   BB_LV_SET_VALID_P (bb) = true;
-}                
+}
 
 /* Initialize lv set of all bb headers.  */
 void
@@ -4261,6 +4323,11 @@
   if (!any_condjump_p (jump))
     return NULL;
 
+  /* A basic block that ends with a conditional jump may still have one successor
+     (and be followed by a barrier), we are not interested.  */
+  if (single_succ_p (BLOCK_FOR_INSN (jump)))
+    return NULL;
+
   return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest;
 }
 
@@ -4350,7 +4417,7 @@
   if (succs_info_pool.top == succs_info_pool.max_top)
     {
       int i;
-      
+
       if (++succs_info_pool.max_top >= succs_info_pool.size)
         gcc_unreachable ();
 
@@ -4369,23 +4436,23 @@
 void
 free_succs_info (struct succs_info * sinfo)
 {
-  gcc_assert (succs_info_pool.top >= 0 
+  gcc_assert (succs_info_pool.top >= 0
               && &succs_info_pool.stack[succs_info_pool.top] == sinfo);
   succs_info_pool.top--;
 
   /* Clear stale info.  */
-  VEC_block_remove (rtx, sinfo->succs_ok, 
+  VEC_block_remove (rtx, sinfo->succs_ok,
                     0, VEC_length (rtx, sinfo->succs_ok));
-  VEC_block_remove (rtx, sinfo->succs_other, 
+  VEC_block_remove (rtx, sinfo->succs_other,
                     0, VEC_length (rtx, sinfo->succs_other));
-  VEC_block_remove (int, sinfo->probs_ok, 
+  VEC_block_remove (int, sinfo->probs_ok,
                     0, VEC_length (int, sinfo->probs_ok));
   sinfo->all_prob = 0;
   sinfo->succs_ok_n = 0;
   sinfo->all_succs_n = 0;
 }
 
-/* Compute successor info for INSN.  FLAGS are the flags passed 
+/* Compute successor info for INSN.  FLAGS are the flags passed
    to the FOR_EACH_SUCC_1 iterator.  */
 struct succs_info *
 compute_succs_info (insn_t insn, short flags)
@@ -4405,10 +4472,10 @@
         {
           VEC_safe_push (rtx, heap, sinfo->succs_ok, succ);
           VEC_safe_push (int, heap, sinfo->probs_ok,
-                         /* FIXME: Improve calculation when skipping 
+                         /* FIXME: Improve calculation when skipping
                             inner loop to exits.  */
-                         (si.bb_end 
-                          ? si.e1->probability 
+                         (si.bb_end
+                          ? si.e1->probability
                           : REG_BR_PROB_BASE));
           sinfo->succs_ok_n++;
         }
@@ -4427,7 +4494,7 @@
   return sinfo;
 }
 
-/* Return the predecessors of BB in PREDS and their number in N. 
+/* Return the predecessors of BB in PREDS and their number in N.
    Empty blocks are skipped.  SIZE is used to allocate PREDS.  */
 static void
 cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size)
@@ -4450,7 +4517,7 @@
       else
 	{
 	  if (*n == *size)
-	    *preds = XRESIZEVEC (insn_t, *preds, 
+	    *preds = XRESIZEVEC (insn_t, *preds,
                                  (*size = 2 * *size + 1));
 	  (*preds)[(*n)++] = bb_end;
 	}
@@ -4459,8 +4526,8 @@
   gcc_assert (*n != 0);
 }
 
-/* Find all predecessors of BB and record them in PREDS and their number 
-   in N.  Empty blocks are skipped, and only normal (forward in-region) 
+/* Find all predecessors of BB and record them in PREDS and their number
+   in N.  Empty blocks are skipped, and only normal (forward in-region)
    edges are processed.  */
 static void
 cfg_preds (basic_block bb, insn_t **preds, int *n)
@@ -4498,7 +4565,7 @@
   return false;
 }
 
-/* Returns true when BB should be the end of an ebb.  Adapted from the 
+/* Returns true when BB should be the end of an ebb.  Adapted from the
    code in sched-ebb.c.  */
 bool
 bb_ends_ebb_p (basic_block bb)
@@ -4506,7 +4573,7 @@
   basic_block next_bb = bb_next_bb (bb);
   edge e;
   edge_iterator ei;
-  
+
   if (next_bb == EXIT_BLOCK_PTR
       || bitmap_bit_p (forced_ebb_heads, next_bb->index)
       || (LABEL_P (BB_HEAD (next_bb))
@@ -4540,7 +4607,7 @@
     {
       if (ptr == BLOCK_FOR_INSN (succ))
         return true;
-    
+
       if (bb_ends_ebb_p (ptr))
         return false;
 
@@ -4562,7 +4629,7 @@
 
   if (!rev_top_order_index || rev_top_order_index_len < last_basic_block)
     {
-      rev_top_order_index_len = last_basic_block; 
+      rev_top_order_index_len = last_basic_block;
       rev_top_order_index = XRESIZEVEC (int, rev_top_order_index,
                                         rev_top_order_index_len);
     }
@@ -4598,7 +4665,7 @@
 	EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) = 0;
 
         /* We cannot use the changed caches, as previously we could ignore
-           the LHS dependence due to enabled renaming and transform 
+           the LHS dependence due to enabled renaming and transform
            the expression, and currently we'll be unable to do this.  */
         htab_empty (INSN_TRANSFORMED_INSNS (insn));
       }
@@ -4649,12 +4716,12 @@
   int succs_size;
 
   succs_size = MAX_WS + 1;
-  succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size); 
+  succs_info_pool.stack = XCNEWVEC (struct succs_info, succs_size);
   succs_info_pool.size = succs_size;
   succs_info_pool.top = -1;
   succs_info_pool.max_top = -1;
 
-  sched_lists_pool = create_alloc_pool ("sel-sched-lists", 
+  sched_lists_pool = create_alloc_pool ("sel-sched-lists",
                                         sizeof (struct _list_node), 500);
 }
 
@@ -4663,7 +4730,7 @@
 free_sched_pools (void)
 {
   int i;
-  
+
   free_alloc_pool (sched_lists_pool);
   gcc_assert (succs_info_pool.top == -1);
   for (i = 0; i < succs_info_pool.max_top; i++)
@@ -4676,7 +4743,7 @@
 }
 
 
-/* Returns a position in RGN where BB can be inserted retaining 
+/* Returns a position in RGN where BB can be inserted retaining
    topological order.  */
 static int
 find_place_to_insert_bb (basic_block bb, int rgn)
@@ -4684,7 +4751,7 @@
   bool has_preds_outside_rgn = false;
   edge e;
   edge_iterator ei;
-  
+
   /* Find whether we have preds outside the region.  */
   FOR_EACH_EDGE (e, ei, bb->preds)
     if (!in_current_region_p (e->src))
@@ -4692,7 +4759,7 @@
         has_preds_outside_rgn = true;
         break;
       }
-  
+
   /* Recompute the top order -- needed when we have > 1 pred
      and in case we don't have preds outside.  */
   if (flag_sel_sched_pipelining_outer_loops
@@ -4704,11 +4771,11 @@
       for (i = RGN_NR_BLOCKS (rgn) - 1; i >= 0; i--)
         {
           cur_bbi = BB_TO_BLOCK (i);
-          if (rev_top_order_index[bbi] 
+          if (rev_top_order_index[bbi]
               < rev_top_order_index[cur_bbi])
             break;
         }
-              
+
       /* We skipped the right block, so we increase i.  We accomodate
          it for increasing by step later, so we decrease i.  */
       return (i + 1) - 1;
@@ -4731,9 +4798,9 @@
   if (EDGE_COUNT (bb->succs) > 0)
     {
       int pred_bbi;
-          
+
       gcc_assert (EDGE_COUNT (bb->preds) == 1);
-          
+
       pred_bbi = EDGE_PRED (bb, 0)->src->index;
       return BLOCK_TO_BB (pred_bbi);
     }
@@ -4753,14 +4820,14 @@
 
   bitmap_clear_bit (blocks_to_reschedule, bb->index);
 
-  /* Can't assert av_set properties because we use sel_aremove_bb 
-     when removing loop preheader from the region.  At the point of 
+  /* Can't assert av_set properties because we use sel_aremove_bb
+     when removing loop preheader from the region.  At the point of
      removing the preheader we already have deallocated sel_region_bb_info.  */
   gcc_assert (BB_LV_SET (bb) == NULL
               && !BB_LV_SET_VALID_P (bb)
               && BB_AV_LEVEL (bb) == 0
               && BB_AV_SET (bb) == NULL);
-  
+
   delete_basic_block (bb);
 }
 
@@ -4777,13 +4844,13 @@
 
   gcc_assert (RGN_HAS_REAL_EBB (rgn) == 0
               && ebb_head[bbi] == pos);
-  
+
   /* Make a place for the new block.  */
   extend_regions ();
 
   for (i = RGN_BLOCKS (rgn + 1) - 1; i >= pos; i--)
     BLOCK_TO_BB (rgn_bb_table[i])++;
-  
+
   memmove (rgn_bb_table + pos + 1,
            rgn_bb_table + pos,
            (RGN_BLOCKS (nr_regions) - pos) * sizeof (*rgn_bb_table));
@@ -4794,7 +4861,7 @@
   CONTAINING_RGN (bb->index) = rgn;
 
   RGN_NR_BLOCKS (rgn)++;
-  
+
   for (i = rgn + 1; i <= nr_regions; i++)
     RGN_BLOCKS (i)++;
 }
@@ -4824,7 +4891,7 @@
     RGN_BLOCKS (i)--;
 }
 
-/* Add BB to the current region  and update all data.  If BB is NULL, add all 
+/* Add BB to the current region  and update all data.  If BB is NULL, add all
    blocks from last_added_blocks vector.  */
 static void
 sel_add_bb (basic_block bb)
@@ -4834,23 +4901,23 @@
   sched_init_bbs ();
   sel_init_bbs (last_added_blocks, NULL);
 
-  /* When bb is passed explicitly, the vector should contain 
+  /* When bb is passed explicitly, the vector should contain
      the only element that equals to bb; otherwise, the vector
      should not be NULL.  */
   gcc_assert (last_added_blocks != NULL);
-  
+
   if (bb != NULL)
     {
       gcc_assert (VEC_length (basic_block, last_added_blocks) == 1
-                  && VEC_index (basic_block, 
-                                last_added_blocks, 0) == bb);     
+                  && VEC_index (basic_block,
+                                last_added_blocks, 0) == bb);
       add_block_to_current_region (bb);
 
       /* We associate creating/deleting data sets with the first insn
          appearing / disappearing in the bb.  */
       if (!sel_bb_empty_p (bb) && BB_LV_SET (bb) == NULL)
 	create_initial_data_sets (bb);
-    
+
       VEC_free (basic_block, heap, last_added_blocks);
     }
   else
@@ -4859,14 +4926,14 @@
       int i;
       basic_block temp_bb = NULL;
 
-      for (i = 0; 
+      for (i = 0;
            VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
         {
           add_block_to_current_region (bb);
           temp_bb = bb;
         }
 
-      /* We need to fetch at least one bb so we know the region 
+      /* We need to fetch at least one bb so we know the region
          to update.  */
       gcc_assert (temp_bb != NULL);
       bb = temp_bb;
@@ -4877,17 +4944,17 @@
   rgn_setup_region (CONTAINING_RGN (bb->index));
 }
 
-/* Remove BB from the current region and update all data.  
+/* Remove BB from the current region and update all data.
    If REMOVE_FROM_CFG_PBB is true, also remove the block cfom cfg.  */
 static void
 sel_remove_bb (basic_block bb, bool remove_from_cfg_p)
 {
   gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX);
-  
+
   remove_bb_from_region (bb);
   return_bb_to_pool (bb);
   bitmap_clear_bit (blocks_to_reschedule, bb->index);
-  
+
   if (remove_from_cfg_p)
     delete_and_free_basic_block (bb);
 
@@ -4900,15 +4967,15 @@
 {
   gcc_assert (in_current_region_p (merge_bb));
 
-  concat_note_lists (BB_NOTE_LIST (empty_bb), 
+  concat_note_lists (BB_NOTE_LIST (empty_bb),
 		     &BB_NOTE_LIST (merge_bb));
   BB_NOTE_LIST (empty_bb) = NULL_RTX;
 
 }
 
-/* Remove an empty basic block EMPTY_BB.  When MERGE_UP_P is true, we put 
-   EMPTY_BB's note lists into its predecessor instead of putting them 
-   into the successor.  When REMOVE_FROM_CFG_P is true, also remove 
+/* Remove an empty basic block EMPTY_BB.  When MERGE_UP_P is true, we put
+   EMPTY_BB's note lists into its predecessor instead of putting them
+   into the successor.  When REMOVE_FROM_CFG_P is true, also remove
    the empty block.  */
 void
 sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p,
@@ -4931,7 +4998,7 @@
 
       merge_bb = bb_next_bb (empty_bb);
 
-      /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its 
+      /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its
          successor block.  */
       for (ei = ei_start (empty_bb->preds);
            (e = ei_safe_edge (ei)); )
@@ -5017,15 +5084,15 @@
   sel_remove_bb (empty_bb, remove_from_cfg_p);
 }
 
-/* An implementation of create_basic_block hook, which additionally updates 
+/* An implementation of create_basic_block hook, which additionally updates
    per-bb data structures.  */
 static basic_block
 sel_create_basic_block (void *headp, void *endp, basic_block after)
 {
   basic_block new_bb;
   insn_t new_bb_note;
-  
-  gcc_assert (flag_sel_sched_pipelining_outer_loops 
+
+  gcc_assert (flag_sel_sched_pipelining_outer_loops
               || last_added_blocks == NULL);
 
   new_bb_note = get_bb_note_from_pool ();
@@ -5075,7 +5142,7 @@
     }
 }
 
-/* Splits BB on two basic blocks, adding it to the region and extending 
+/* Splits BB on two basic blocks, adding it to the region and extending
    per-bb data structures.  Returns the newly created bb.  */
 static basic_block
 sel_split_block (basic_block bb, rtx after)
@@ -5087,7 +5154,7 @@
   sel_add_bb (new_bb);
 
   /* This should be called after sel_add_bb, because this uses
-     CONTAINING_RGN for the new block, which is not yet initialized.  
+     CONTAINING_RGN for the new block, which is not yet initialized.
      FIXME: this function may be a no-op now.  */
   change_loops_latches (bb, new_bb);
 
@@ -5127,7 +5194,7 @@
   return NULL;
 }
 
-/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block. 
+/* Look for a new jump either in FROM_BB block or in newly created JUMP_BB block.
    New means having UID at least equal to PREV_MAX_UID.  */
 static rtx
 find_new_jump (basic_block from, basic_block jump_bb, int prev_max_uid)
@@ -5137,7 +5204,7 @@
   /* Return immediately if no new insns were emitted.  */
   if (get_max_uid () == prev_max_uid)
     return NULL;
-  
+
   /* Now check both blocks for new jumps.  It will ever be only one.  */
   if ((jump = check_for_new_jump (from, prev_max_uid)))
     return jump;
@@ -5161,15 +5228,15 @@
   prev_max_uid = get_max_uid ();
   new_bb = split_edge (e);
 
-  if (flag_sel_sched_pipelining_outer_loops 
+  if (flag_sel_sched_pipelining_outer_loops
       && current_loop_nest)
     {
       int i;
       basic_block bb;
 
-      /* Some of the basic blocks might not have been added to the loop.  
+      /* Some of the basic blocks might not have been added to the loop.
          Add them here, until this is fixed in force_fallthru.  */
-      for (i = 0; 
+      for (i = 0;
            VEC_iterate (basic_block, last_added_blocks, i, bb); i++)
         if (!bb->loop_father)
           {
@@ -5238,9 +5305,9 @@
   sched_create_recovery_edges (first_bb, recovery_block, second_bb);
   if (current_loops != NULL)
     add_bb_to_loop (recovery_block, first_bb->loop_father);
-  
+
   sel_add_bb (recovery_block);
-    
+
   jump = BB_END (recovery_block);
   gcc_assert (sel_bb_head (recovery_block) == jump);
   sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
@@ -5252,8 +5319,6 @@
 void
 sel_merge_blocks (basic_block a, basic_block b)
 {
-  gcc_assert (can_merge_blocks_p (a, b));
-
   sel_remove_empty_bb (b, true, false);
   merge_blocks (a, b);
 
@@ -5269,9 +5334,9 @@
   basic_block jump_bb, src;
   int prev_max_uid;
   rtx jump;
-    
+
   gcc_assert (!sel_bb_empty_p (e->src));
-  
+
   src = e->src;
   prev_max_uid = get_max_uid ();
   jump_bb = redirect_edge_and_branch_force (e, to);
@@ -5284,7 +5349,7 @@
   if (current_loop_nest
       && pipelining_p)
     gcc_assert (loop_latch_edge (current_loop_nest));
-  
+
   jump = find_new_jump (src, jump_bb, prev_max_uid);
   if (jump)
     sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
@@ -5298,6 +5363,7 @@
   basic_block src;
   int prev_max_uid;
   rtx jump;
+  edge redirected;
 
   latch_edge_p = (pipelining_p
                   && current_loop_nest
@@ -5305,9 +5371,10 @@
 
   src = e->src;
   prev_max_uid = get_max_uid ();
-  
-  redirect_edge_and_branch (e, to);
-  gcc_assert (last_added_blocks == NULL);
+
+  redirected = redirect_edge_and_branch (e, to);
+
+  gcc_assert (redirected && last_added_blocks == NULL);
 
   /* When we've redirected a latch edge, update the header.  */
   if (latch_edge_p)
@@ -5367,6 +5434,8 @@
 
   if (label == NULL_RTX)
     insn_rtx = emit_insn (pattern);
+  else if (DEBUG_INSN_P (label))
+    insn_rtx = emit_debug_insn (pattern);
   else
     {
       insn_rtx = emit_jump_insn (pattern);
@@ -5402,6 +5471,10 @@
 {
   rtx res;
 
+  if (DEBUG_INSN_P (insn_rtx))
+    return create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
+					 insn_rtx);
+
   gcc_assert (NONJUMP_INSN_P (insn_rtx));
 
   res = create_insn_rtx_from_pattern (copy_rtx (PATTERN (insn_rtx)),
@@ -5422,7 +5495,7 @@
 /* Helpers for global init.  */
 /* This structure is used to be able to call existing bundling mechanism
    and calculate insn priorities.  */
-static struct haifa_sched_info sched_sel_haifa_sched_info = 
+static struct haifa_sched_info sched_sel_haifa_sched_info =
 {
   NULL, /* init_ready_list */
   NULL, /* can_schedule_ready_p */
@@ -5431,6 +5504,7 @@
   NULL, /* rgn_rank */
   sel_print_insn, /* rgn_print_insn */
   contributes_to_priority,
+  NULL, /* insn_finishes_block_p */
 
   NULL, NULL,
   NULL, NULL,
@@ -5443,7 +5517,7 @@
 };
 
 /* Setup special insns used in the scheduler.  */
-void 
+void
 setup_nop_and_exit_insns (void)
 {
   gcc_assert (nop_pattern == NULL_RTX
@@ -5487,9 +5561,9 @@
 void
 sel_set_sched_flags (void)
 {
-  /* ??? This means that set_sched_flags were called, and we decided to 
+  /* ??? This means that set_sched_flags were called, and we decided to
      support speculation.  However, set_sched_flags also modifies flags
-     on current_sched_info, doing this only at global init.  And we 
+     on current_sched_info, doing this only at global init.  And we
      sometimes change c_s_i later.  So put the correct flags again.  */
   if (spec_info && targetm.sched.set_sched_flags)
     targetm.sched.set_sched_flags (spec_info);
@@ -5514,9 +5588,9 @@
   common_sched_info = &sel_common_sched_info;
 
   current_sched_info = &sched_sel_haifa_sched_info;
-  current_sched_info->sched_max_insns_priority = 
+  current_sched_info->sched_max_insns_priority =
     get_rgn_sched_max_insns_priority ();
-  
+
   sel_set_sched_flags ();
 }
 
@@ -5550,14 +5624,14 @@
 
   /* FIXME: This will work only when EBBs are not created.  */
   if (new_rgn_number != 0)
-    RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) + 
+    RGN_BLOCKS (new_rgn_number) = RGN_BLOCKS (new_rgn_number - 1) +
       RGN_NR_BLOCKS (new_rgn_number - 1);
   else
     RGN_BLOCKS (new_rgn_number) = 0;
 
   /* Set the blocks of the next region so the other functions may
      calculate the number of blocks in the region.  */
-  RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) + 
+  RGN_BLOCKS (new_rgn_number + 1) = RGN_BLOCKS (new_rgn_number) +
     RGN_NR_BLOCKS (new_rgn_number);
 
   nr_regions++;
@@ -5573,8 +5647,8 @@
   basic_block bb1 = *(const basic_block *) x;
   basic_block bb2 = *(const basic_block *) y;
 
-  gcc_assert (bb1 == bb2 
-	      || rev_top_order_index[bb1->index] 
+  gcc_assert (bb1 == bb2
+	      || rev_top_order_index[bb1->index]
 		 != rev_top_order_index[bb2->index]);
 
   /* It's a reverse topological order in REV_TOP_ORDER_INDEX, so
@@ -5585,7 +5659,7 @@
     return 1;
 }
 
-/* Create a region for LOOP and return its number.  If we don't want 
+/* Create a region for LOOP and return its number.  If we don't want
    to pipeline LOOP, return -1.  */
 static int
 make_region_from_loop (struct loop *loop)
@@ -5599,10 +5673,10 @@
   basic_block *loop_blocks;
   basic_block preheader_block;
 
-  if (loop->num_nodes 
+  if (loop->num_nodes
       > (unsigned) PARAM_VALUE (PARAM_MAX_PIPELINE_REGION_BLOCKS))
     return -1;
-  
+
   /* Don't pipeline loops whose latch belongs to some of its inner loops.  */
   for (inner = loop->inner; inner; inner = inner->inner)
     if (flow_bb_inside_loop_p (inner, loop->latch))
@@ -5634,14 +5708,14 @@
     {
       /* Add only those blocks that haven't been scheduled in the inner loop.
 	 The exception is the basic blocks with bookkeeping code - they should
-	 be added to the region (and they actually don't belong to the loop 
+	 be added to the region (and they actually don't belong to the loop
 	 body, but to the region containing that loop body).  */
 
       gcc_assert (new_rgn_number >= 0);
 
       if (! TEST_BIT (bbs_in_loop_rgns, loop_blocks[i]->index))
 	{
-	  sel_add_block_to_region (loop_blocks[i], &bb_ord_index, 
+	  sel_add_block_to_region (loop_blocks[i], &bb_ord_index,
                                    new_rgn_number);
 	  SET_BIT (bbs_in_loop_rgns, loop_blocks[i]->index);
 	}
@@ -5679,11 +5753,11 @@
 
 
 /* Create region(s) from loop nest LOOP, such that inner loops will be
-   pipelined before outer loops.  Returns true when a region for LOOP 
+   pipelined before outer loops.  Returns true when a region for LOOP
    is created.  */
 static bool
 make_regions_from_loop_nest (struct loop *loop)
-{   
+{
   struct loop *cur_loop;
   int rgn_number;
 
@@ -5739,10 +5813,10 @@
   if (loop_depth (loop) == 0)
     return false;
 
-  /* Now, the loop could be too large or irreducible.  Check whether its 
-     region is in LOOP_NESTS.  
-     We determine the region number of LOOP as the region number of its 
-     latch.  We can't use header here, because this header could be 
+  /* Now, the loop could be too large or irreducible.  Check whether its
+     region is in LOOP_NESTS.
+     We determine the region number of LOOP as the region number of its
+     latch.  We can't use header here, because this header could be
      just removed preheader and it will give us the wrong region number.
      Latch can't be used because it could be in the inner loop too.  */
   if (LOOP_MARKED_FOR_PIPELINING_P (loop) && pipelining_p)
@@ -5752,11 +5826,11 @@
       gcc_assert ((unsigned) rgn < VEC_length (loop_p, loop_nests));
       return true;
     }
-  
+
   return false;
 }
 
-/* Makes regions from the rest of the blocks, after loops are chosen 
+/* Makes regions from the rest of the blocks, after loops are chosen
    for pipelining.  */
 static void
 make_regions_from_the_rest (void)
@@ -5769,13 +5843,11 @@
   edge e;
   edge_iterator ei;
   int *degree;
-  int new_regions;
 
   /* Index in rgn_bb_table where to start allocating new regions.  */
   cur_rgn_blocks = nr_regions ? RGN_BLOCKS (nr_regions) : 0;
-  new_regions = nr_regions;
-
-  /* Make regions from all the rest basic blocks - those that don't belong to 
+
+  /* Make regions from all the rest basic blocks - those that don't belong to
      any loop or belong to irreducible loops.  Prepare the data structures
      for extend_rgns.  */
 
@@ -5798,10 +5870,10 @@
 	loop_hdr[bb->index] = bb->loop_father->num;
     }
 
-  /* For each basic block degree is calculated as the number of incoming 
+  /* For each basic block degree is calculated as the number of incoming
      edges, that are going out of bbs that are not yet scheduled.
      The basic blocks that are scheduled have degree value of zero.  */
-  FOR_EACH_BB (bb) 
+  FOR_EACH_BB (bb)
     {
       degree[bb->index] = 0;
 
@@ -5853,9 +5925,9 @@
   rev_top_order_index = NULL;
 }
 
-/* This function replaces the find_rgns when 
+/* This function replaces the find_rgns when
    FLAG_SEL_SCHED_PIPELINING_OUTER_LOOPS is set.  */
-void 
+void
 sel_find_rgns (void)
 {
   sel_init_pipelining ();
@@ -5873,7 +5945,7 @@
     }
 
   /* Make regions from all the rest basic blocks and schedule them.
-     These blocks include blocks that don't belong to any loop or belong  
+     These blocks include blocks that don't belong to any loop or belong
      to irreducible loops.  */
   make_regions_from_the_rest ();
 
@@ -5882,15 +5954,15 @@
   bbs_in_loop_rgns = NULL;
 }
 
-/* Adds the preheader blocks from previous loop to current region taking 
-   it from LOOP_PREHEADER_BLOCKS (current_loop_nest).  
+/* Adds the preheader blocks from previous loop to current region taking
+   it from LOOP_PREHEADER_BLOCKS (current_loop_nest).
    This function is only used with -fsel-sched-pipelining-outer-loops.  */
 void
 sel_add_loop_preheaders (void)
 {
   int i;
   basic_block bb;
-  VEC(basic_block, heap) *preheader_blocks 
+  VEC(basic_block, heap) *preheader_blocks
     = LOOP_PREHEADER_BLOCKS (current_loop_nest);
 
   for (i = 0;
@@ -5901,9 +5973,9 @@
   VEC_free (basic_block, heap, preheader_blocks);
 }
 
-/* While pipelining outer loops, returns TRUE if BB is a loop preheader.  
-   Please note that the function should also work when pipelining_p is 
-   false, because it is used when deciding whether we should or should 
+/* While pipelining outer loops, returns TRUE if BB is a loop preheader.
+   Please note that the function should also work when pipelining_p is
+   false, because it is used when deciding whether we should or should
    not reschedule pipelined code.  */
 bool
 sel_is_loop_preheader_p (basic_block bb)
@@ -5923,7 +5995,7 @@
          Check that the above code is equivalent to what we did before.  */
 
       if (in_current_region_p (current_loop_nest->header))
-	gcc_assert (!(BLOCK_TO_BB (bb->index) 
+	gcc_assert (!(BLOCK_TO_BB (bb->index)
 		      < BLOCK_TO_BB (current_loop_nest->header->index)));
 
       /* Support the situation when the latch block of outer loop
@@ -5944,13 +6016,13 @@
 {
   basic_block jump_bb = BLOCK_FOR_INSN (jump);
 
-  /* It is not jump, jump with side-effects or jump can lead to several 
+  /* It is not jump, jump with side-effects or jump can lead to several
      basic blocks.  */
   if (!onlyjump_p (jump)
       || !any_uncondjump_p (jump))
     return false;
 
-  /* Several outgoing edges, abnormal edge or destination of jump is 
+  /* Several outgoing edges, abnormal edge or destination of jump is
      not DEST_BB.  */
   if (EDGE_COUNT (jump_bb->succs) != 1
       || EDGE_SUCC (jump_bb, 0)->flags & EDGE_ABNORMAL
@@ -5962,7 +6034,7 @@
 }
 
 /* Removes the loop preheader from the current region and saves it in
-   PREHEADER_BLOCKS of the father loop, so they will be added later to 
+   PREHEADER_BLOCKS of the father loop, so they will be added later to
    region that represents an outer loop.  */
 static void
 sel_remove_loop_preheader (void)
@@ -5971,7 +6043,7 @@
   int cur_rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
   basic_block bb;
   bool all_empty_p = true;
-  VEC(basic_block, heap) *preheader_blocks 
+  VEC(basic_block, heap) *preheader_blocks
     = LOOP_PREHEADER_BLOCKS (loop_outer (current_loop_nest));
 
   gcc_assert (current_loop_nest);
@@ -5982,7 +6054,7 @@
     {
       bb = BASIC_BLOCK (BB_TO_BLOCK (i));
 
-      /* If the basic block belongs to region, but doesn't belong to 
+      /* If the basic block belongs to region, but doesn't belong to
 	 corresponding loop, then it should be a preheader.  */
       if (sel_is_loop_preheader_p (bb))
         {
@@ -5991,13 +6063,13 @@
             all_empty_p = false;
         }
     }
-  
+
   /* Remove these blocks only after iterating over the whole region.  */
   for (i = VEC_length (basic_block, preheader_blocks) - 1;
        i >= old_len;
        i--)
     {
-      bb =  VEC_index (basic_block, preheader_blocks, i); 
+      bb =  VEC_index (basic_block, preheader_blocks, i);
       sel_remove_bb (bb, false);
     }
 
@@ -6027,9 +6099,9 @@
               gcc_assert (BB_NOTE_LIST (bb) == NULL);
               delete_and_free_basic_block (bb);
 
-              /* Check if after deleting preheader there is a nonconditional 
-                 jump in PREV_BB that leads to the next basic block NEXT_BB.  
-                 If it is so - delete this jump and clear data sets of its 
+              /* Check if after deleting preheader there is a nonconditional
+                 jump in PREV_BB that leads to the next basic block NEXT_BB.
+                 If it is so - delete this jump and clear data sets of its
                  basic block if it becomes empty.  */
 	      if (next_bb->prev_bb == prev_bb
                   && prev_bb != ENTRY_BLOCK_PTR