diff gcc/ira-lives.c @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents f6334be47118
children 84e7813d76e9
line wrap: on
line diff
--- a/gcc/ira-lives.c	Sun Aug 21 07:07:55 2011 +0900
+++ b/gcc/ira-lives.c	Fri Oct 27 22:46:09 2017 +0900
@@ -1,6 +1,5 @@
 /* IRA processing allocno lives to build allocno live ranges.
-   Copyright (C) 2006, 2007, 2008, 2009, 2010
-   Free Software Foundation, Inc.
+   Copyright (C) 2006-2017 Free Software Foundation, Inc.
    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
 
 This file is part of GCC.
@@ -22,23 +21,18 @@
 #include "config.h"
 #include "system.h"
 #include "coretypes.h"
-#include "tm.h"
-#include "regs.h"
-#include "rtl.h"
-#include "tm_p.h"
+#include "backend.h"
 #include "target.h"
-#include "flags.h"
-#include "except.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
+#include "rtl.h"
+#include "predict.h"
+#include "df.h"
+#include "memmodel.h"
+#include "tm_p.h"
 #include "insn-config.h"
-#include "recog.h"
-#include "diagnostic-core.h"
-#include "params.h"
-#include "df.h"
-#include "sbitmap.h"
+#include "regs.h"
+#include "ira.h"
+#include "ira-int.h"
 #include "sparseset.h"
-#include "ira-int.h"
 
 /* The code in this file is similar to one in global but the code
    works on the allocno basis and creates live ranges instead of
@@ -64,8 +58,8 @@
    register pressure excess.  Excess pressure for a register class at
    some point means that there are more allocnos of given register
    class living at the point than number of hard-registers of the
-   class available for the allocation.  It is defined only for cover
-   classes.  */
+   class available for the allocation.  It is defined only for
+   pressure classes.  */
 static int high_pressure_start_point[N_REG_CLASSES];
 
 /* Objects live at current point in the scan.  */
@@ -86,6 +80,10 @@
 /* The number of last call at which given allocno was saved.  */
 static int *allocno_saved_at_call;
 
+/* The value of get_preferred_alternatives for the current instruction,
+   supplemental to recog_data.  */
+static alternative_mask preferred_alternatives;
+
 /* Record the birth of hard register REGNO, updating hard_regs_live and
    hard reg conflict information for living allocnos.  */
 static void
@@ -97,6 +95,7 @@
   EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
     {
       ira_object_t obj = ira_object_id_map[i];
+
       SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
       SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
     }
@@ -134,14 +133,17 @@
 {
   ira_allocno_t a = OBJECT_ALLOCNO (obj);
   int start, i;
-  enum reg_class cover_class, cl;
+  enum reg_class aclass, pclass, cl;
   live_range_t p;
 
-  cover_class = ALLOCNO_COVER_CLASS (a);
+  aclass = ALLOCNO_CLASS (a);
+  pclass = ira_pressure_class_translate[aclass];
   for (i = 0;
-       (cl = ira_reg_class_super_classes[cover_class][i]) != LIM_REG_CLASSES;
+       (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
        i++)
     {
+      if (! ira_reg_pressure_class_p[cl])
+	continue;
       if (high_pressure_start_point[cl] < 0)
 	continue;
       p = OBJECT_LIVE_RANGES (obj);
@@ -166,40 +168,42 @@
   update_allocno_pressure_excess_length (obj);
 }
 
-/* The current register pressures for each cover class for the current
+/* The current register pressures for each pressure class for the current
    basic block.  */
 static int curr_reg_pressure[N_REG_CLASSES];
 
-/* Record that register pressure for COVER_CLASS increased by N
-   registers.  Update the current register pressure, maximal register
-   pressure for the current BB and the start point of the register
-   pressure excess.  */
+/* Record that register pressure for PCLASS increased by N registers.
+   Update the current register pressure, maximal register pressure for
+   the current BB and the start point of the register pressure
+   excess.  */
 static void
-inc_register_pressure (enum reg_class cover_class, int n)
+inc_register_pressure (enum reg_class pclass, int n)
 {
   int i;
   enum reg_class cl;
 
   for (i = 0;
-       (cl = ira_reg_class_super_classes[cover_class][i]) != LIM_REG_CLASSES;
+       (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
        i++)
     {
+      if (! ira_reg_pressure_class_p[cl])
+	continue;
       curr_reg_pressure[cl] += n;
       if (high_pressure_start_point[cl] < 0
-	  && (curr_reg_pressure[cl] > ira_available_class_regs[cl]))
+	  && (curr_reg_pressure[cl] > ira_class_hard_regs_num[cl]))
 	high_pressure_start_point[cl] = curr_point;
       if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
 	curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
     }
 }
 
-/* Record that register pressure for COVER_CLASS has decreased by
-   NREGS registers; update current register pressure, start point of
-   the register pressure excess, and register pressure excess length
-   for living allocnos.  */
+/* Record that register pressure for PCLASS has decreased by NREGS
+   registers; update current register pressure, start point of the
+   register pressure excess, and register pressure excess length for
+   living allocnos.  */
 
 static void
-dec_register_pressure (enum reg_class cover_class, int nregs)
+dec_register_pressure (enum reg_class pclass, int nregs)
 {
   int i;
   unsigned int j;
@@ -207,13 +211,15 @@
   bool set_p = false;
 
   for (i = 0;
-       (cl = ira_reg_class_super_classes[cover_class][i]) != LIM_REG_CLASSES;
+       (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
        i++)
     {
+      if (! ira_reg_pressure_class_p[cl])
+	continue;
       curr_reg_pressure[cl] -= nregs;
       ira_assert (curr_reg_pressure[cl] >= 0);
       if (high_pressure_start_point[cl] >= 0
-	  && curr_reg_pressure[cl] <= ira_available_class_regs[cl])
+	  && curr_reg_pressure[cl] <= ira_class_hard_regs_num[cl])
 	set_p = true;
     }
   if (set_p)
@@ -221,23 +227,44 @@
       EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)
 	update_allocno_pressure_excess_length (ira_object_id_map[j]);
       for (i = 0;
-	   (cl = ira_reg_class_super_classes[cover_class][i])
-	     != LIM_REG_CLASSES;
+	   (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
 	   i++)
-	if (high_pressure_start_point[cl] >= 0
-	    && curr_reg_pressure[cl] <= ira_available_class_regs[cl])
-	  high_pressure_start_point[cl] = -1;
+	{
+	  if (! ira_reg_pressure_class_p[cl])
+	    continue;
+	  if (high_pressure_start_point[cl] >= 0
+	      && curr_reg_pressure[cl] <= ira_class_hard_regs_num[cl])
+	    high_pressure_start_point[cl] = -1;
+	}
     }
 }
 
+/* Determine from the objects_live bitmap whether REGNO is currently live,
+   and occupies only one object.  Return false if we have no information.  */
+static bool
+pseudo_regno_single_word_and_live_p (int regno)
+{
+  ira_allocno_t a = ira_curr_regno_allocno_map[regno];
+  ira_object_t obj;
+
+  if (a == NULL)
+    return false;
+  if (ALLOCNO_NUM_OBJECTS (a) > 1)
+    return false;
+
+  obj = ALLOCNO_OBJECT (a, 0);
+
+  return sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj));
+}
+
 /* Mark the pseudo register REGNO as live.  Update all information about
    live ranges and register pressure.  */
 static void
 mark_pseudo_regno_live (int regno)
 {
   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
+  enum reg_class pclass;
   int i, n, nregs;
-  enum reg_class cl;
 
   if (a == NULL)
     return;
@@ -246,8 +273,8 @@
   allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
 
   n = ALLOCNO_NUM_OBJECTS (a);
-  cl = ALLOCNO_COVER_CLASS (a);
-  nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
+  pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
+  nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
   if (n > 1)
     {
       /* We track every subobject separately.  */
@@ -258,10 +285,11 @@
   for (i = 0; i < n; i++)
     {
       ira_object_t obj = ALLOCNO_OBJECT (a, i);
+
       if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
 	continue;
 
-      inc_register_pressure (cl, nregs);
+      inc_register_pressure (pclass, nregs);
       make_object_born (obj);
     }
 }
@@ -273,8 +301,8 @@
 mark_pseudo_regno_subword_live (int regno, int subword)
 {
   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
-  int n, nregs;
-  enum reg_class cl;
+  int n;
+  enum reg_class pclass;
   ira_object_t obj;
 
   if (a == NULL)
@@ -290,15 +318,15 @@
       return;
     }
 
-  cl = ALLOCNO_COVER_CLASS (a);
-  nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
-  gcc_assert (nregs == n);
+  pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
+  gcc_assert
+    (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
   obj = ALLOCNO_OBJECT (a, subword);
 
   if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
     return;
 
-  inc_register_pressure (cl, nregs);
+  inc_register_pressure (pclass, 1);
   make_object_born (obj);
 }
 
@@ -312,15 +340,17 @@
 
   if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
     {
-      int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
+      int last = END_REGNO (reg);
+      enum reg_class aclass, pclass;
 
       while (regno < last)
 	{
 	  if (! TEST_HARD_REG_BIT (hard_regs_live, regno)
 	      && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
 	    {
-	      enum reg_class cover_class = ira_hard_regno_cover_class[regno];
-	      inc_register_pressure (cover_class, 1);
+	      aclass = ira_hard_regno_allocno_class[regno];
+	      pclass = ira_pressure_class_translate[aclass];
+	      inc_register_pressure (pclass, 1);
 	      make_hard_regno_born (regno);
 	    }
 	  regno++;
@@ -334,7 +364,7 @@
 static void
 mark_pseudo_reg_live (rtx orig_reg, unsigned regno)
 {
-  if (df_read_modify_subreg_p (orig_reg))
+  if (read_modify_subreg_p (orig_reg))
     {
       mark_pseudo_regno_subword_live (regno,
 				      subreg_lowpart_p (orig_reg) ? 0 : 1);
@@ -375,8 +405,8 @@
   allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
 
   n = ALLOCNO_NUM_OBJECTS (a);
-  cl = ALLOCNO_COVER_CLASS (a);
-  nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
+  cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
+  nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
   if (n > 1)
     {
       /* We track every subobject separately.  */
@@ -400,7 +430,7 @@
 mark_pseudo_regno_subword_dead (int regno, int subword)
 {
   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
-  int n, nregs;
+  int n;
   enum reg_class cl;
   ira_object_t obj;
 
@@ -415,9 +445,9 @@
     /* The allocno as a whole doesn't die in this case.  */
     return;
 
-  cl = ALLOCNO_COVER_CLASS (a);
-  nregs = ira_reg_class_nregs[cl][ALLOCNO_MODE (a)];
-  gcc_assert (nregs == n);
+  cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
+  gcc_assert
+    (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
 
   obj = ALLOCNO_OBJECT (a, subword);
   if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
@@ -436,14 +466,16 @@
 
   if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
     {
-      int last = regno + hard_regno_nregs[regno][GET_MODE (reg)];
+      int last = END_REGNO (reg);
+      enum reg_class aclass, pclass;
 
       while (regno < last)
 	{
 	  if (TEST_HARD_REG_BIT (hard_regs_live, regno))
 	    {
-	      enum reg_class cover_class = ira_hard_regno_cover_class[regno];
-	      dec_register_pressure (cover_class, 1);
+	      aclass = ira_hard_regno_allocno_class[regno];
+	      pclass = ira_pressure_class_translate[aclass];
+	      dec_register_pressure (pclass, 1);
 	      make_hard_regno_dead (regno);
 	    }
 	  regno++;
@@ -457,7 +489,7 @@
 static void
 mark_pseudo_reg_dead (rtx orig_reg, unsigned regno)
 {
-  if (df_read_modify_subreg_p (orig_reg))
+  if (read_modify_subreg_p (orig_reg))
     {
       mark_pseudo_regno_subword_dead (regno,
 				      subreg_lowpart_p (orig_reg) ? 0 : 1);
@@ -483,7 +515,7 @@
   if (DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL)
       && (GET_CODE (orig_reg) != SUBREG
 	  || REGNO (reg) < FIRST_PSEUDO_REGISTER
-	  || !df_read_modify_subreg_p (orig_reg)))
+	  || !read_modify_subreg_p (orig_reg)))
     return;
 
   if (REGNO (reg) >= FIRST_PSEUDO_REGISTER)
@@ -494,7 +526,7 @@
 
 /* If REG is a pseudo or a subreg of it, and the class of its allocno
    intersects CL, make a conflict with pseudo DREG.  ORIG_DREG is the
-   rtx actually accessed, it may be indentical to DREG or a subreg of it.
+   rtx actually accessed, it may be identical to DREG or a subreg of it.
    Advance the current program point before making the conflict if
    ADVANCE_P.  Return TRUE if we will need to advance the current
    program point.  */
@@ -512,7 +544,7 @@
     return advance_p;
 
   a = ira_curr_regno_allocno_map[REGNO (reg)];
-  if (! reg_classes_intersect_p (cl, ALLOCNO_COVER_CLASS (a)))
+  if (! reg_classes_intersect_p (cl, ALLOCNO_CLASS (a)))
     return advance_p;
 
   if (advance_p)
@@ -528,7 +560,7 @@
 
 /* Check and make if necessary conflicts for pseudo DREG of class
    DEF_CL of the current insn with input operand USE of class USE_CL.
-   ORIG_DREG is the rtx actually accessed, it may be indentical to
+   ORIG_DREG is the rtx actually accessed, it may be identical to
    DREG or a subreg of it.  Advance the current program point before
    making the conflict if ADVANCE_P.  Return TRUE if we will need to
    advance the current program point.  */
@@ -585,36 +617,44 @@
     return;
 
   a = ira_curr_regno_allocno_map[REGNO (dreg)];
-  acl = ALLOCNO_COVER_CLASS (a);
+  acl = ALLOCNO_CLASS (a);
   if (! reg_classes_intersect_p (acl, def_cl))
     return;
 
   advance_p = true;
 
-  for (use = 0; use < recog_data.n_operands; use++)
+  int n_operands = recog_data.n_operands;
+  const operand_alternative *op_alt = &recog_op_alt[alt * n_operands];
+  for (use = 0; use < n_operands; use++)
     {
       int alt1;
 
       if (use == def || recog_data.operand_type[use] == OP_OUT)
 	continue;
 
-      if (recog_op_alt[use][alt].anything_ok)
+      if (op_alt[use].anything_ok)
 	use_cl = ALL_REGS;
       else
-	use_cl = recog_op_alt[use][alt].cl;
+	use_cl = op_alt[use].cl;
 
       /* If there's any alternative that allows USE to match DEF, do not
 	 record a conflict.  If that causes us to create an invalid
 	 instruction due to the earlyclobber, reload must fix it up.  */
       for (alt1 = 0; alt1 < recog_data.n_alternatives; alt1++)
-	if (recog_op_alt[use][alt1].matches == def
-	    || (use < recog_data.n_operands - 1
-		&& recog_data.constraints[use][0] == '%'
-		&& recog_op_alt[use + 1][alt1].matches == def)
-	    || (use >= 1
-		&& recog_data.constraints[use - 1][0] == '%'
-		&& recog_op_alt[use - 1][alt1].matches == def))
-	  break;
+	{
+	  if (!TEST_BIT (preferred_alternatives, alt1))
+	    continue;
+	  const operand_alternative *op_alt1
+	    = &recog_op_alt[alt1 * n_operands];
+	  if (op_alt1[use].matches == def
+	      || (use < n_operands - 1
+		  && recog_data.constraints[use][0] == '%'
+		  && op_alt1[use + 1].matches == def)
+	      || (use >= 1
+		  && recog_data.constraints[use - 1][0] == '%'
+		  && op_alt1[use - 1].matches == def))
+	    break;
+	}
 
       if (alt1 < recog_data.n_alternatives)
 	continue;
@@ -622,15 +662,15 @@
       advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl,
 						   use, use_cl, advance_p);
 
-      if ((use_match = recog_op_alt[use][alt].matches) >= 0)
+      if ((use_match = op_alt[use].matches) >= 0)
 	{
 	  if (use_match == def)
 	    continue;
 
-	  if (recog_op_alt[use_match][alt].anything_ok)
+	  if (op_alt[use_match].anything_ok)
 	    use_cl = ALL_REGS;
 	  else
-	    use_cl = recog_op_alt[use_match][alt].cl;
+	    use_cl = op_alt[use_match].cl;
 	  advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl,
 						       use, use_cl, advance_p);
 	}
@@ -648,43 +688,47 @@
   int def, def_match;
   enum reg_class def_cl;
 
-  for (alt = 0; alt < recog_data.n_alternatives; alt++)
-    for (def = 0; def < recog_data.n_operands; def++)
-      {
-	def_cl = NO_REGS;
-	if (recog_op_alt[def][alt].earlyclobber)
-	  {
-	    if (recog_op_alt[def][alt].anything_ok)
-	      def_cl = ALL_REGS;
-	    else
-	      def_cl = recog_op_alt[def][alt].cl;
-	    check_and_make_def_conflict (alt, def, def_cl);
-	  }
-	if ((def_match = recog_op_alt[def][alt].matches) >= 0
-	    && (recog_op_alt[def_match][alt].earlyclobber
-		|| recog_op_alt[def][alt].earlyclobber))
-	  {
-	    if (recog_op_alt[def_match][alt].anything_ok)
-	      def_cl = ALL_REGS;
-	    else
-	      def_cl = recog_op_alt[def_match][alt].cl;
-	    check_and_make_def_conflict (alt, def, def_cl);
-	  }
-      }
+  int n_alternatives = recog_data.n_alternatives;
+  int n_operands = recog_data.n_operands;
+  const operand_alternative *op_alt = recog_op_alt;
+  for (alt = 0; alt < n_alternatives; alt++, op_alt += n_operands)
+    if (TEST_BIT (preferred_alternatives, alt))
+      for (def = 0; def < n_operands; def++)
+	{
+	  def_cl = NO_REGS;
+	  if (op_alt[def].earlyclobber)
+	    {
+	      if (op_alt[def].anything_ok)
+		def_cl = ALL_REGS;
+	      else
+		def_cl = op_alt[def].cl;
+	      check_and_make_def_conflict (alt, def, def_cl);
+	    }
+	  if ((def_match = op_alt[def].matches) >= 0
+	      && (op_alt[def_match].earlyclobber
+		  || op_alt[def].earlyclobber))
+	    {
+	      if (op_alt[def_match].anything_ok)
+		def_cl = ALL_REGS;
+	      else
+		def_cl = op_alt[def_match].cl;
+	      check_and_make_def_conflict (alt, def, def_cl);
+	    }
+	}
 }
 
 /* Mark early clobber hard registers of the current INSN as live (if
    LIVE_P) or dead.  Return true if there are such registers.  */
 static bool
-mark_hard_reg_early_clobbers (rtx insn, bool live_p)
+mark_hard_reg_early_clobbers (rtx_insn *insn, bool live_p)
 {
-  df_ref *def_rec;
+  df_ref def;
   bool set_p = false;
 
-  for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
-    if (DF_REF_FLAGS_IS_SET (*def_rec, DF_REF_MUST_CLOBBER))
+  FOR_EACH_INSN_DEF (def, insn)
+    if (DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER))
       {
-	rtx dreg = DF_REF_REG (*def_rec);
+	rtx dreg = DF_REF_REG (def);
 
 	if (GET_CODE (dreg) == SUBREG)
 	  dreg = SUBREG_REG (dreg);
@@ -695,9 +739,9 @@
 	   because there is no way to say that non-operand hard
 	   register clobbers are not early ones.  */
 	if (live_p)
-	  mark_ref_live (*def_rec);
+	  mark_ref_live (def);
 	else
-	  mark_ref_dead (*def_rec);
+	  mark_ref_dead (def);
 	set_p = true;
       }
 
@@ -710,112 +754,42 @@
 static enum reg_class
 single_reg_class (const char *constraints, rtx op, rtx equiv_const)
 {
-  int ignore_p;
+  int c;
   enum reg_class cl, next_cl;
-  int c;
+  enum constraint_num cn;
 
   cl = NO_REGS;
-  for (ignore_p = false;
-       (c = *constraints);
-       constraints += CONSTRAINT_LEN (c, constraints))
+  alternative_mask preferred = preferred_alternatives;
+  for (; (c = *constraints); constraints += CONSTRAINT_LEN (c, constraints))
     if (c == '#')
-      ignore_p = true;
+      preferred &= ~ALTERNATIVE_BIT (0);
     else if (c == ',')
-      ignore_p = false;
-    else if (! ignore_p)
+      preferred >>= 1;
+    else if (preferred & 1)
       switch (c)
 	{
-	case ' ':
-	case '\t':
-	case '=':
-	case '+':
-	case '*':
-	case '&':
-	case '%':
-	case '!':
-	case '?':
-	  break;
-	case 'i':
-	  if (CONSTANT_P (op)
-	      || (equiv_const != NULL_RTX && CONSTANT_P (equiv_const)))
+	case 'g':
+	  return NO_REGS;
+
+	default:
+	  /* ??? Is this the best way to handle memory constraints?  */
+	  cn = lookup_constraint (constraints);
+	  if (insn_extra_memory_constraint (cn)
+	      || insn_extra_special_memory_constraint (cn)
+	      || insn_extra_address_constraint (cn))
 	    return NO_REGS;
-	  break;
-
-	case 'n':
-	  if (CONST_INT_P (op)
-	      || (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
-	      || (equiv_const != NULL_RTX
-		  && (CONST_INT_P (equiv_const)
-		      || (GET_CODE (equiv_const) == CONST_DOUBLE
-			  && GET_MODE (equiv_const) == VOIDmode))))
-	    return NO_REGS;
-	  break;
-
-	case 's':
-	  if ((CONSTANT_P (op) && !CONST_INT_P (op)
-	       && (GET_CODE (op) != CONST_DOUBLE || GET_MODE (op) != VOIDmode))
+	  if (constraint_satisfied_p (op, cn)
 	      || (equiv_const != NULL_RTX
 		  && CONSTANT_P (equiv_const)
-		  && !CONST_INT_P (equiv_const)
-		  && (GET_CODE (equiv_const) != CONST_DOUBLE
-		      || GET_MODE (equiv_const) != VOIDmode)))
-	    return NO_REGS;
-	  break;
-
-	case 'I':
-	case 'J':
-	case 'K':
-	case 'L':
-	case 'M':
-	case 'N':
-	case 'O':
-	case 'P':
-	  if ((CONST_INT_P (op)
-	       && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), c, constraints))
-	      || (equiv_const != NULL_RTX
-		  && CONST_INT_P (equiv_const)
-		  && CONST_OK_FOR_CONSTRAINT_P (INTVAL (equiv_const),
-						c, constraints)))
+		  && constraint_satisfied_p (equiv_const, cn)))
 	    return NO_REGS;
-	  break;
-
-	case 'E':
-	case 'F':
-	  if (GET_CODE (op) == CONST_DOUBLE
-	      || (GET_CODE (op) == CONST_VECTOR
-		  && GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_FLOAT)
-	      || (equiv_const != NULL_RTX
-		  && (GET_CODE (equiv_const) == CONST_DOUBLE
-		      || (GET_CODE (equiv_const) == CONST_VECTOR
-			  && (GET_MODE_CLASS (GET_MODE (equiv_const))
-			      == MODE_VECTOR_FLOAT)))))
-	    return NO_REGS;
-	  break;
-
-	case 'G':
-	case 'H':
-	  if ((GET_CODE (op) == CONST_DOUBLE
-	       && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, c, constraints))
-	      || (equiv_const != NULL_RTX
-		  && GET_CODE (equiv_const) == CONST_DOUBLE
-		  && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (equiv_const,
-						       c, constraints)))
-	    return NO_REGS;
-	  /* ??? what about memory */
-	case 'r':
-	case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
-	case 'h': case 'j': case 'k': case 'l':
-	case 'q': case 't': case 'u':
-	case 'v': case 'w': case 'x': case 'y': case 'z':
-	case 'A': case 'B': case 'C': case 'D':
-	case 'Q': case 'R': case 'S': case 'T': case 'U':
-	case 'W': case 'Y': case 'Z':
-	  next_cl = (c == 'r'
-		     ? GENERAL_REGS
-		     : REG_CLASS_FROM_CONSTRAINT (c, constraints));
-	  if ((cl != NO_REGS && next_cl != cl)
-	      || (ira_available_class_regs[next_cl]
-		  > ira_reg_class_nregs[next_cl][GET_MODE (op)]))
+	  next_cl = reg_class_for_constraint (cn);
+	  if (next_cl == NO_REGS)
+	    break;
+	  if (cl == NO_REGS
+	      ? ira_class_singleton[next_cl][GET_MODE (op)] < 0
+	      : (ira_class_singleton[cl][GET_MODE (op)]
+		 != ira_class_singleton[next_cl][GET_MODE (op)]))
 	    return NO_REGS;
 	  cl = next_cl;
 	  break;
@@ -825,16 +799,13 @@
 	  next_cl
 	    = single_reg_class (recog_data.constraints[c - '0'],
 				recog_data.operand[c - '0'], NULL_RTX);
-	  if ((cl != NO_REGS && next_cl != cl)
-	      || next_cl == NO_REGS
-	      || (ira_available_class_regs[next_cl]
-		  > ira_reg_class_nregs[next_cl][GET_MODE (op)]))
+	  if (cl == NO_REGS
+	      ? ira_class_singleton[next_cl][GET_MODE (op)] < 0
+	      : (ira_class_singleton[cl][GET_MODE (op)]
+		 != ira_class_singleton[next_cl][GET_MODE (op)]))
 	    return NO_REGS;
 	  cl = next_cl;
 	  break;
-
-	default:
-	  return NO_REGS;
 	}
   return cl;
 }
@@ -855,13 +826,13 @@
    might be used by insn reloads because the constraints are too
    strict.  */
 void
-ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set)
+ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set,
+				   alternative_mask preferred)
 {
   int i, c, regno = 0;
-  bool ignore_p;
   enum reg_class cl;
   rtx op;
-  enum machine_mode mode;
+  machine_mode mode;
 
   CLEAR_HARD_REG_SET (*set);
   for (i = 0; i < recog_data.n_operands; i++)
@@ -879,34 +850,23 @@
 	  mode = (GET_CODE (op) == SCRATCH
 		  ? GET_MODE (op) : PSEUDO_REGNO_MODE (regno));
 	  cl = NO_REGS;
-	  for (ignore_p = false; (c = *p); p += CONSTRAINT_LEN (c, p))
+	  for (; (c = *p); p += CONSTRAINT_LEN (c, p))
 	    if (c == '#')
-	      ignore_p = true;
+	      preferred &= ~ALTERNATIVE_BIT (0);
 	    else if (c == ',')
-	      ignore_p = false;
-	    else if (! ignore_p)
-	      switch (c)
-		{
-		case 'r':
-		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
-		case 'h': case 'j': case 'k': case 'l':
-		case 'q': case 't': case 'u':
-		case 'v': case 'w': case 'x': case 'y': case 'z':
-		case 'A': case 'B': case 'C': case 'D':
-		case 'Q': case 'R': case 'S': case 'T': case 'U':
-		case 'W': case 'Y': case 'Z':
-		  cl = (c == 'r'
-			? GENERAL_REGS
-			: REG_CLASS_FROM_CONSTRAINT (c, p));
-		  if (cl != NO_REGS
-		      /* There is no register pressure problem if all of the
-			 regs in this class are fixed.  */
-		      && ira_available_class_regs[cl] != 0
-		      && (ira_available_class_regs[cl]
-			  <= ira_reg_class_nregs[cl][mode]))
-		    IOR_HARD_REG_SET (*set, reg_class_contents[cl]);
-		  break;
-		}
+	      preferred >>= 1;
+	    else if (preferred & 1)
+	      {
+		cl = reg_class_for_constraint (lookup_constraint (p));
+		if (cl != NO_REGS)
+		  {
+		    /* There is no register pressure problem if all of the
+		       regs in this class are fixed.  */
+		    int regno = ira_class_singleton[cl][mode];
+		    if (regno >= 0)
+		      add_to_hard_reg_set (set, mode, regno);
+		  }
+	      }
 	}
     }
 }
@@ -944,12 +904,11 @@
       if (REG_P (operand)
 	  && (regno = REGNO (operand)) >= FIRST_PSEUDO_REGISTER)
 	{
-	  enum reg_class cover_class;
+	  enum reg_class aclass;
 
 	  operand_a = ira_curr_regno_allocno_map[regno];
-	  cover_class = ALLOCNO_COVER_CLASS (operand_a);
-	  if (ira_class_subset_p[cl][cover_class]
-	      && ira_class_hard_regs_num[cl] != 0)
+	  aclass = ALLOCNO_CLASS (operand_a);
+	  if (ira_class_subset_p[cl][aclass])
 	    {
 	      /* View the desired allocation of OPERAND as:
 
@@ -958,31 +917,30 @@
 		 a simplification of:
 
 		    (subreg:YMODE (reg:XMODE XREGNO) OFFSET).  */
-	      enum machine_mode ymode, xmode;
+	      machine_mode ymode, xmode;
 	      int xregno, yregno;
 	      HOST_WIDE_INT offset;
 
 	      xmode = recog_data.operand_mode[i];
-	      xregno = ira_class_hard_regs[cl][0];
+	      xregno = ira_class_singleton[cl][xmode];
+	      gcc_assert (xregno >= 0);
 	      ymode = ALLOCNO_MODE (operand_a);
 	      offset = subreg_lowpart_offset (ymode, xmode);
 	      yregno = simplify_subreg_regno (xregno, xmode, offset, ymode);
 	      if (yregno >= 0
-		  && ira_class_hard_reg_index[cover_class][yregno] >= 0)
+		  && ira_class_hard_reg_index[aclass][yregno] >= 0)
 		{
 		  int cost;
 
 		  ira_allocate_and_set_costs
 		    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a),
-		     cover_class, 0);
-		  cost
-		    = (freq
-		       * (in_p
-			  ? ira_get_register_move_cost (xmode, cover_class, cl)
-			  : ira_get_register_move_cost (xmode, cl,
-							cover_class)));
+		     aclass, 0);
+		  ira_init_register_move_cost_if_necessary (xmode);
+		  cost = freq * (in_p
+				 ? ira_register_move_cost[xmode][aclass][cl]
+				 : ira_register_move_cost[xmode][cl][aclass]);
 		  ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)
-		    [ira_class_hard_reg_index[cover_class][yregno]] -= cost;
+		    [ira_class_hard_reg_index[aclass][yregno]] -= cost;
 		}
 	    }
 	}
@@ -1005,21 +963,66 @@
     }
 }
 
-/* Return true when one of the predecessor edges of BB is marked with
-   EDGE_ABNORMAL_CALL or EDGE_EH.  */
-static bool
-bb_has_abnormal_call_pred (basic_block bb)
+/* Look through the CALL_INSN_FUNCTION_USAGE of a call insn INSN, and see if
+   we find a SET rtx that we can use to deduce that a register can be cheaply
+   caller-saved.  Return such a register, or NULL_RTX if none is found.  */
+static rtx
+find_call_crossed_cheap_reg (rtx_insn *insn)
 {
-  edge e;
-  edge_iterator ei;
+  rtx cheap_reg = NULL_RTX;
+  rtx exp = CALL_INSN_FUNCTION_USAGE (insn);
 
-  FOR_EACH_EDGE (e, ei, bb->preds)
+  while (exp != NULL)
+    {
+      rtx x = XEXP (exp, 0);
+      if (GET_CODE (x) == SET)
+	{
+	  exp = x;
+	  break;
+	}
+      exp = XEXP (exp, 1);
+    }
+  if (exp != NULL)
     {
-      if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
-	return true;
+      basic_block bb = BLOCK_FOR_INSN (insn);
+      rtx reg = SET_SRC (exp);
+      rtx_insn *prev = PREV_INSN (insn);
+      while (prev && !(INSN_P (prev)
+		       && BLOCK_FOR_INSN (prev) != bb))
+	{
+	  if (NONDEBUG_INSN_P (prev))
+	    {
+	      rtx set = single_set (prev);
+
+	      if (set && rtx_equal_p (SET_DEST (set), reg))
+		{
+		  rtx src = SET_SRC (set);
+		  if (!REG_P (src) || HARD_REGISTER_P (src)
+		      || !pseudo_regno_single_word_and_live_p (REGNO (src)))
+		    break;
+		  if (!modified_between_p (src, prev, insn))
+		    cheap_reg = src;
+		  break;
+		}
+	      if (set && rtx_equal_p (SET_SRC (set), reg))
+		{
+		  rtx dest = SET_DEST (set);
+		  if (!REG_P (dest) || HARD_REGISTER_P (dest)
+		      || !pseudo_regno_single_word_and_live_p (REGNO (dest)))
+		    break;
+		  if (!modified_between_p (dest, prev, insn))
+		    cheap_reg = dest;
+		  break;
+		}
+
+	      if (reg_set_p (reg, prev))
+		break;
+	    }
+	  prev = PREV_INSN (prev);
+	}
     }
-  return false;
-}
+  return cheap_reg;
+}  
 
 /* Process insns of the basic block given by its LOOP_TREE_NODE to
    update allocno live ranges, allocno hard register conflicts,
@@ -1031,7 +1034,7 @@
   int i, freq;
   unsigned int j;
   basic_block bb;
-  rtx insn;
+  rtx_insn *insn;
   bitmap_iterator bi;
   bitmap reg_live_out;
   unsigned int px;
@@ -1040,13 +1043,13 @@
   bb = loop_tree_node->bb;
   if (bb != NULL)
     {
-      for (i = 0; i < ira_reg_class_cover_size; i++)
+      for (i = 0; i < ira_pressure_classes_num; i++)
 	{
-	  curr_reg_pressure[ira_reg_class_cover[i]] = 0;
-	  high_pressure_start_point[ira_reg_class_cover[i]] = -1;
+	  curr_reg_pressure[ira_pressure_classes[i]] = 0;
+	  high_pressure_start_point[ira_pressure_classes[i]] = -1;
 	}
       curr_bb_node = loop_tree_node;
-      reg_live_out = DF_LR_OUT (bb);
+      reg_live_out = df_get_live_out (bb);
       sparseset_clear (objects_live);
       REG_SET_TO_HARD_REG_SET (hard_regs_live, reg_live_out);
       AND_COMPL_HARD_REG_SET (hard_regs_live, eliminable_regset);
@@ -1054,19 +1057,22 @@
       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
 	if (TEST_HARD_REG_BIT (hard_regs_live, i))
 	  {
-	    enum reg_class cover_class, cl;
+	    enum reg_class aclass, pclass, cl;
 
-	    cover_class = ira_class_translate[REGNO_REG_CLASS (i)];
+	    aclass = ira_allocno_class_translate[REGNO_REG_CLASS (i)];
+	    pclass = ira_pressure_class_translate[aclass];
 	    for (j = 0;
-		 (cl = ira_reg_class_super_classes[cover_class][j])
+		 (cl = ira_reg_class_super_classes[pclass][j])
 		   != LIM_REG_CLASSES;
 		 j++)
 	      {
+		if (! ira_reg_pressure_class_p[cl])
+		  continue;
 		curr_reg_pressure[cl]++;
 		if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
 		  curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
 		ira_assert (curr_reg_pressure[cl]
-			    <= ira_available_class_regs[cl]);
+			    <= ira_class_hard_regs_num[cl]);
 	      }
 	  }
       EXECUTE_IF_SET_IN_BITMAP (reg_live_out, FIRST_PSEUDO_REGISTER, j, bi)
@@ -1090,7 +1096,8 @@
 	 pessimistic, but it probably doesn't matter much in practice.  */
       FOR_BB_INSNS_REVERSE (bb, insn)
 	{
-	  df_ref *def_rec, *use_rec;
+	  ira_allocno_t a;
+	  df_ref def, use;
 	  bool call_p;
 
 	  if (!NONDEBUG_INSN_P (insn))
@@ -1098,9 +1105,27 @@
 
 	  if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
 	    fprintf (ira_dump_file, "   Insn %u(l%d): point = %d\n",
-		     INSN_UID (insn), loop_tree_node->parent->loop->num,
+		     INSN_UID (insn), loop_tree_node->parent->loop_num,
 		     curr_point);
 
+	  call_p = CALL_P (insn);
+#ifdef REAL_PIC_OFFSET_TABLE_REGNUM
+	  int regno;
+	  bool clear_pic_use_conflict_p = false;
+	  /* Processing insn usage in call insn can create conflict
+	     with pic pseudo and pic hard reg and that is wrong.
+	     Check this situation and fix it at the end of the insn
+	     processing.  */
+	  if (call_p && pic_offset_table_rtx != NULL_RTX
+	      && (regno = REGNO (pic_offset_table_rtx)) >= FIRST_PSEUDO_REGISTER
+	      && (a = ira_curr_regno_allocno_map[regno]) != NULL)
+	    clear_pic_use_conflict_p
+		= (find_regno_fusage (insn, USE, REAL_PIC_OFFSET_TABLE_REGNUM)
+		   && ! TEST_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS
+					   (ALLOCNO_OBJECT (a, 0)),
+					   REAL_PIC_OFFSET_TABLE_REGNUM));
+#endif
+
 	  /* Mark each defined value as live.  We need to do this for
 	     unused values because they still conflict with quantities
 	     that are live at the time of the definition.
@@ -1110,10 +1135,9 @@
 	     on a call-clobbered register.  Marking the register as
 	     live would stop us from allocating it to a call-crossing
 	     allocno.  */
-	  call_p = CALL_P (insn);
-	  for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
-	    if (!call_p || !DF_REF_FLAGS_IS_SET (*def_rec, DF_REF_MAY_CLOBBER))
-	      mark_ref_live (*def_rec);
+	  FOR_EACH_INSN_DEF (def, insn)
+	    if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
+	      mark_ref_live (def);
 
 	  /* If INSN has multiple outputs, then any value used in one
 	     of the outputs conflicts with the other outputs.  Model this
@@ -1127,12 +1151,12 @@
 	     to the same hard register as an unused output we could
 	     set the hard register before the output reload insn.  */
 	  if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
-	    for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
+	    FOR_EACH_INSN_USE (use, insn)
 	      {
 		int i;
 		rtx reg;
 
-		reg = DF_REF_REG (*use_rec);
+		reg = DF_REF_REG (use);
 		for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
 		  {
 		    rtx set;
@@ -1143,31 +1167,43 @@
 		      {
 			/* After the previous loop, this is a no-op if
 			   REG is contained within SET_DEST (SET).  */
-			mark_ref_live (*use_rec);
+			mark_ref_live (use);
 			break;
 		      }
 		  }
 	      }
 
 	  extract_insn (insn);
-	  preprocess_constraints ();
+	  preferred_alternatives = get_preferred_alternatives (insn);
+	  preprocess_constraints (insn);
 	  process_single_reg_class_operands (false, freq);
 
 	  /* See which defined values die here.  */
-	  for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
-	    if (!call_p || !DF_REF_FLAGS_IS_SET (*def_rec, DF_REF_MAY_CLOBBER))
-	      mark_ref_dead (*def_rec);
+	  FOR_EACH_INSN_DEF (def, insn)
+	    if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
+	      mark_ref_dead (def);
 
 	  if (call_p)
 	    {
+	      /* Try to find a SET in the CALL_INSN_FUNCTION_USAGE, and from
+		 there, try to find a pseudo that is live across the call but
+		 can be cheaply reconstructed from the return value.  */
+	      rtx cheap_reg = find_call_crossed_cheap_reg (insn);
+	      if (cheap_reg != NULL_RTX)
+		add_reg_note (insn, REG_RETURNED, cheap_reg);
+
 	      last_call_num++;
 	      sparseset_clear (allocnos_processed);
 	      /* The current set of live allocnos are live across the call.  */
 	      EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
 	        {
 		  ira_object_t obj = ira_object_id_map[i];
-		  ira_allocno_t a = OBJECT_ALLOCNO (obj);
+		  a = OBJECT_ALLOCNO (obj);
 		  int num = ALLOCNO_NUM (a);
+		  HARD_REG_SET this_call_used_reg_set;
+
+		  get_call_reg_set_usage (insn, &this_call_used_reg_set,
+					  call_used_reg_set);
 
 		  /* Don't allocate allocnos that cross setjmps or any
 		     call, if this function receives a nonlocal
@@ -1182,9 +1218,9 @@
 		  if (can_throw_internal (insn))
 		    {
 		      IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
-					call_used_reg_set);
+					this_call_used_reg_set);
 		      IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
-					call_used_reg_set);
+					this_call_used_reg_set);
 		    }
 
 		  if (sparseset_bit_p (allocnos_processed, num))
@@ -1192,7 +1228,7 @@
 		  sparseset_set_bit (allocnos_processed, num);
 
 		  if (allocno_saved_at_call[num] != last_call_num)
-		    /* Here we are mimicking caller-save.c behaviour
+		    /* Here we are mimicking caller-save.c behavior
 		       which does not save hard register at a call if
 		       it was saved on previous call in the same basic
 		       block and the hard register was not mentioned
@@ -1201,16 +1237,21 @@
 		  /* Mark it as saved at the next call.  */
 		  allocno_saved_at_call[num] = last_call_num + 1;
 		  ALLOCNO_CALLS_CROSSED_NUM (a)++;
+		  IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
+				    this_call_used_reg_set);
+		  if (cheap_reg != NULL_RTX
+		      && ALLOCNO_REGNO (a) == (int) REGNO (cheap_reg))
+		    ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)++;
 		}
 	    }
 
 	  make_early_clobber_and_input_conflicts ();
 
 	  curr_point++;
-
+	  
 	  /* Mark each used value as live.  */
-	  for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
-	    mark_ref_live (*use_rec);
+	  FOR_EACH_INSN_USE (use, insn)
+	    mark_ref_live (use);
 
 	  process_single_reg_class_operands (true, freq);
 
@@ -1223,23 +1264,34 @@
 	      /* Mark each hard reg as live again.  For example, a
 		 hard register can be in clobber and in an insn
 		 input.  */
-	      for (use_rec = DF_INSN_USES (insn); *use_rec; use_rec++)
+	      FOR_EACH_INSN_USE (use, insn)
 		{
-		  rtx ureg = DF_REF_REG (*use_rec);
+		  rtx ureg = DF_REF_REG (use);
 
 		  if (GET_CODE (ureg) == SUBREG)
 		    ureg = SUBREG_REG (ureg);
 		  if (! REG_P (ureg) || REGNO (ureg) >= FIRST_PSEUDO_REGISTER)
 		    continue;
 
-		  mark_ref_live (*use_rec);
+		  mark_ref_live (use);
 		}
 	    }
 
+#ifdef REAL_PIC_OFFSET_TABLE_REGNUM
+	  if (clear_pic_use_conflict_p)
+	    {
+	      regno = REGNO (pic_offset_table_rtx);
+	      a = ira_curr_regno_allocno_map[regno];
+	      CLEAR_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (ALLOCNO_OBJECT (a, 0)),
+				  REAL_PIC_OFFSET_TABLE_REGNUM);
+	      CLEAR_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS
+				  (ALLOCNO_OBJECT (a, 0)),
+				  REAL_PIC_OFFSET_TABLE_REGNUM);
+	    }
+#endif
 	  curr_point++;
 	}
 
-#ifdef EH_RETURN_DATA_REGNO
       if (bb_has_eh_pred (bb))
 	for (j = 0; ; ++j)
 	  {
@@ -1248,7 +1300,6 @@
 	      break;
 	    make_hard_regno_born (regno);
 	  }
-#endif
 
       /* Allocnos can't go in stack regs at the start of a basic block
 	 that is reached by an abnormal edge. Likewise for call
@@ -1261,6 +1312,7 @@
 	  EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)
 	    {
 	      ira_allocno_t a = OBJECT_ALLOCNO (ira_object_id_map[px]);
+
 	      ALLOCNO_NO_STACK_REG_P (a) = true;
 	      ALLOCNO_TOTAL_NO_STACK_REG_P (a) = true;
 	    }
@@ -1270,9 +1322,24 @@
 	  /* No need to record conflicts for call clobbered regs if we
 	     have nonlocal labels around, as we don't ever try to
 	     allocate such regs in this case.  */
-	  if (!cfun->has_nonlocal_label && bb_has_abnormal_call_pred (bb))
+	  if (!cfun->has_nonlocal_label
+	      && has_abnormal_call_or_eh_pred_edge_p (bb))
 	    for (px = 0; px < FIRST_PSEUDO_REGISTER; px++)
-	      if (call_used_regs[px])
+	      if (call_used_regs[px]
+#ifdef REAL_PIC_OFFSET_TABLE_REGNUM
+		  /* We should create a conflict of PIC pseudo with
+		     PIC hard reg as PIC hard reg can have a wrong
+		     value after jump described by the abnormal edge.
+		     In this case we can not allocate PIC hard reg to
+		     PIC pseudo as PIC pseudo will also have a wrong
+		     value.  This code is not critical as LRA can fix
+		     it but it is better to have the right allocation
+		     earlier.  */
+		  || (px == REAL_PIC_OFFSET_TABLE_REGNUM
+		      && pic_offset_table_rtx != NULL_RTX
+		      && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
+#endif
+		  )
 		make_hard_regno_born (px);
 	}
 
@@ -1282,17 +1349,17 @@
       curr_point++;
 
     }
-  /* Propagate register pressure to upper loop tree nodes: */
+  /* Propagate register pressure to upper loop tree nodes.  */
   if (loop_tree_node != ira_loop_tree_root)
-    for (i = 0; i < ira_reg_class_cover_size; i++)
+    for (i = 0; i < ira_pressure_classes_num; i++)
       {
-	enum reg_class cover_class;
+	enum reg_class pclass;
 
-	cover_class = ira_reg_class_cover[i];
-	if (loop_tree_node->reg_pressure[cover_class]
-	    > loop_tree_node->parent->reg_pressure[cover_class])
-	  loop_tree_node->parent->reg_pressure[cover_class]
-	    = loop_tree_node->reg_pressure[cover_class];
+	pclass = ira_pressure_classes[i];
+	if (loop_tree_node->reg_pressure[pclass]
+	    > loop_tree_node->parent->reg_pressure[pclass])
+	  loop_tree_node->parent->reg_pressure[pclass]
+	    = loop_tree_node->reg_pressure[pclass];
       }
 }
 
@@ -1342,32 +1409,31 @@
   int *map;
   ira_object_t obj;
   ira_object_iterator oi;
-  live_range_t r;
-  sbitmap born_or_dead, born, dead;
+  live_range_t r, prev_r, next_r;
   sbitmap_iterator sbi;
   bool born_p, dead_p, prev_born_p, prev_dead_p;
   
-  born = sbitmap_alloc (ira_max_point);
-  dead = sbitmap_alloc (ira_max_point);
-  sbitmap_zero (born);
-  sbitmap_zero (dead);
+  auto_sbitmap born (ira_max_point);
+  auto_sbitmap dead (ira_max_point);
+  bitmap_clear (born);
+  bitmap_clear (dead);
   FOR_EACH_OBJECT (obj, oi)
     for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
       {
 	ira_assert (r->start <= r->finish);
-	SET_BIT (born, r->start);
-	SET_BIT (dead, r->finish);
+	bitmap_set_bit (born, r->start);
+	bitmap_set_bit (dead, r->finish);
       }
 
-  born_or_dead = sbitmap_alloc (ira_max_point);
-  sbitmap_a_or_b (born_or_dead, born, dead);
+  auto_sbitmap born_or_dead (ira_max_point);
+  bitmap_ior (born_or_dead, born, dead);
   map = (int *) ira_allocate (sizeof (int) * ira_max_point);
   n = -1;
   prev_born_p = prev_dead_p = false;
-  EXECUTE_IF_SET_IN_SBITMAP (born_or_dead, 0, i, sbi)
+  EXECUTE_IF_SET_IN_BITMAP (born_or_dead, 0, i, sbi)
     {
-      born_p = TEST_BIT (born, i);
-      dead_p = TEST_BIT (dead, i);
+      born_p = bitmap_bit_p (born, i);
+      dead_p = bitmap_bit_p (dead, i);
       if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
 	  || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
 	map[i] = n;
@@ -1376,9 +1442,7 @@
       prev_born_p = born_p;
       prev_dead_p = dead_p;
     }
-  sbitmap_free (born_or_dead);
-  sbitmap_free (born);
-  sbitmap_free (dead);
+
   n++;
   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
     fprintf (ira_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
@@ -1386,10 +1450,19 @@
   ira_max_point = n;
 
   FOR_EACH_OBJECT (obj, oi)
-    for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
+    for (r = OBJECT_LIVE_RANGES (obj), prev_r = NULL; r != NULL; r = next_r)
       {
+	next_r = r->next;
 	r->start = map[r->start];
 	r->finish = map[r->finish];
+	if (prev_r == NULL || prev_r->start > r->finish + 1)
+	  {
+	    prev_r = r;
+	    continue;
+	  }
+	prev_r->start = r->start;
+	prev_r->next = next_r;
+	ira_finish_live_range (r);
       }
 
   ira_free (map);
@@ -1404,6 +1477,21 @@
   fprintf (f, "\n");
 }
 
+DEBUG_FUNCTION void
+debug (live_range &ref)
+{
+  ira_print_live_range_list (stderr, &ref);
+}
+
+DEBUG_FUNCTION void
+debug (live_range *ptr)
+{
+  if (ptr)
+    debug (*ptr);
+  else
+    fprintf (stderr, "<nil>\n");
+}
+
 /* Print live ranges R to stderr.  */
 void
 ira_debug_live_range_list (live_range_t r)
@@ -1424,6 +1512,7 @@
 {
   int n = ALLOCNO_NUM_OBJECTS (a);
   int i;
+
   for (i = 0; i < n; i++)
     {
       fprintf (f, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));