diff gcc/emit-rtl.c @ 131:84e7813d76e9

gcc-8.2
author mir3636
date Thu, 25 Oct 2018 07:37:49 +0900
parents 04ced10e8804
children 1830386684a0
line wrap: on
line diff
--- a/gcc/emit-rtl.c	Fri Oct 27 22:46:09 2017 +0900
+++ b/gcc/emit-rtl.c	Thu Oct 25 07:37:49 2018 +0900
@@ -1,5 +1,5 @@
 /* Emit RTL for the GCC expander.
-   Copyright (C) 1987-2017 Free Software Foundation, Inc.
+   Copyright (C) 1987-2018 Free Software Foundation, Inc.
 
 This file is part of GCC.
 
@@ -60,6 +60,7 @@
 #include "stor-layout.h"
 #include "opts.h"
 #include "predict.h"
+#include "rtx-vector-builder.h"
 
 struct target_rtl default_target_rtl;
 #if SWITCHABLE_TARGET
@@ -148,6 +149,16 @@
 
 static GTY ((cache)) hash_table<const_wide_int_hasher> *const_wide_int_htab;
 
+struct const_poly_int_hasher : ggc_cache_ptr_hash<rtx_def>
+{
+  typedef std::pair<machine_mode, poly_wide_int_ref> compare_type;
+
+  static hashval_t hash (rtx x);
+  static bool equal (rtx x, const compare_type &y);
+};
+
+static GTY ((cache)) hash_table<const_poly_int_hasher> *const_poly_int_htab;
+
 /* A hash table storing register attribute structures.  */
 struct reg_attr_hasher : ggc_cache_ptr_hash<reg_attrs>
 {
@@ -186,7 +197,6 @@
 #endif
 static rtx lookup_const_double (rtx);
 static rtx lookup_const_fixed (rtx);
-static reg_attrs *get_reg_attrs (tree, int);
 static rtx gen_const_vector (machine_mode, int);
 static void copy_rtx_if_shared_1 (rtx *orig);
 
@@ -248,6 +258,31 @@
 }
 #endif
 
+/* Returns a hash code for CONST_POLY_INT X.  */
+
+hashval_t
+const_poly_int_hasher::hash (rtx x)
+{
+  inchash::hash h;
+  h.add_int (GET_MODE (x));
+  for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+    h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
+  return h.end ();
+}
+
+/* Returns nonzero if CONST_POLY_INT X is an rtx representation of Y.  */
+
+bool
+const_poly_int_hasher::equal (rtx x, const compare_type &y)
+{
+  if (GET_MODE (x) != y.first)
+    return false;
+  for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+    if (CONST_POLY_INT_COEFFS (x)[i] != y.second.coeffs[i])
+      return false;
+  return true;
+}
+
 /* Returns a hash code for X (which is really a CONST_DOUBLE).  */
 hashval_t
 const_double_hasher::hash (rtx x)
@@ -321,9 +356,9 @@
     return false;
   return (p->alias == q->alias
 	  && p->offset_known_p == q->offset_known_p
-	  && (!p->offset_known_p || p->offset == q->offset)
+	  && (!p->offset_known_p || known_eq (p->offset, q->offset))
 	  && p->size_known_p == q->size_known_p
-	  && (!p->size_known_p || p->size == q->size)
+	  && (!p->size_known_p || known_eq (p->size, q->size))
 	  && p->align == q->align
 	  && p->addrspace == q->addrspace
 	  && (p->expr == q->expr
@@ -358,7 +393,10 @@
 {
   const reg_attrs *const p = x;
 
-  return ((p->offset * 1000) ^ (intptr_t) p->decl);
+  inchash::hash h;
+  h.add_ptr (p->decl);
+  h.add_poly_hwi (p->offset);
+  return h.end ();
 }
 
 /* Returns nonzero if the value represented by X  is the same as that given by
@@ -370,19 +408,19 @@
   const reg_attrs *const p = x;
   const reg_attrs *const q = y;
 
-  return (p->decl == q->decl && p->offset == q->offset);
+  return (p->decl == q->decl && known_eq (p->offset, q->offset));
 }
 /* Allocate a new reg_attrs structure and insert it into the hash table if
    one identical to it is not already in the table.  We are doing this for
    MEM of mode MODE.  */
 
 static reg_attrs *
-get_reg_attrs (tree decl, int offset)
+get_reg_attrs (tree decl, poly_int64 offset)
 {
   reg_attrs attrs;
 
   /* If everything is the default, we can just return zero.  */
-  if (decl == 0 && offset == 0)
+  if (decl == 0 && known_eq (offset, 0))
     return 0;
 
   attrs.decl = decl;
@@ -489,9 +527,13 @@
 }
 
 rtx
-gen_int_mode (HOST_WIDE_INT c, machine_mode mode)
-{
-  return GEN_INT (trunc_int_for_mode (c, mode));
+gen_int_mode (poly_int64 c, machine_mode mode)
+{
+  c = trunc_int_for_mode (c, mode);
+  if (c.is_constant ())
+    return GEN_INT (c.coeffs[0]);
+  unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+  return immed_wide_int_const (poly_wide_int::from (c, prec, SIGNED), mode);
 }
 
 /* CONST_DOUBLEs might be created from pairs of integers, or from
@@ -595,8 +637,8 @@
    a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT
    (if TARGET_SUPPORTS_WIDE_INT).  */
 
-rtx
-immed_wide_int_const (const wide_int_ref &v, machine_mode mode)
+static rtx
+immed_wide_int_const_1 (const wide_int_ref &v, machine_mode mode)
 {
   unsigned int len = v.get_len ();
   /* Not scalar_int_mode because we also allow pointer bound modes.  */
@@ -683,6 +725,53 @@
 }
 #endif
 
+/* Return an rtx representation of C in mode MODE.  */
+
+rtx
+immed_wide_int_const (const poly_wide_int_ref &c, machine_mode mode)
+{
+  if (c.is_constant ())
+    return immed_wide_int_const_1 (c.coeffs[0], mode);
+
+  /* Not scalar_int_mode because we also allow pointer bound modes.  */
+  unsigned int prec = GET_MODE_PRECISION (as_a <scalar_mode> (mode));
+
+  /* Allow truncation but not extension since we do not know if the
+     number is signed or unsigned.  */
+  gcc_assert (prec <= c.coeffs[0].get_precision ());
+  poly_wide_int newc = poly_wide_int::from (c, prec, SIGNED);
+
+  /* See whether we already have an rtx for this constant.  */
+  inchash::hash h;
+  h.add_int (mode);
+  for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+    h.add_wide_int (newc.coeffs[i]);
+  const_poly_int_hasher::compare_type typed_value (mode, newc);
+  rtx *slot = const_poly_int_htab->find_slot_with_hash (typed_value,
+							h.end (), INSERT);
+  rtx x = *slot;
+  if (x)
+    return x;
+
+  /* Create a new rtx.  There's a choice to be made here between installing
+     the actual mode of the rtx or leaving it as VOIDmode (for consistency
+     with CONST_INT).  In practice the handling of the codes is different
+     enough that we get no benefit from using VOIDmode, and various places
+     assume that VOIDmode implies CONST_INT.  Using the real mode seems like
+     the right long-term direction anyway.  */
+  typedef trailing_wide_ints<NUM_POLY_INT_COEFFS> twi;
+  size_t extra_size = twi::extra_size (prec);
+  x = rtx_alloc_v (CONST_POLY_INT,
+		   sizeof (struct const_poly_int_def) + extra_size);
+  PUT_MODE (x, mode);
+  CONST_POLY_INT_COEFFS (x).set_precision (prec);
+  for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
+    CONST_POLY_INT_COEFFS (x)[i] = newc.coeffs[i];
+
+  *slot = x;
+  return x;
+}
+
 rtx
 gen_rtx_REG (machine_mode mode, unsigned int regno)
 {
@@ -803,19 +892,26 @@
 
 bool
 validate_subreg (machine_mode omode, machine_mode imode,
-		 const_rtx reg, unsigned int offset)
-{
-  unsigned int isize = GET_MODE_SIZE (imode);
-  unsigned int osize = GET_MODE_SIZE (omode);
+		 const_rtx reg, poly_uint64 offset)
+{
+  poly_uint64 isize = GET_MODE_SIZE (imode);
+  poly_uint64 osize = GET_MODE_SIZE (omode);
+
+  /* The sizes must be ordered, so that we know whether the subreg
+     is partial, paradoxical or complete.  */
+  if (!ordered_p (isize, osize))
+    return false;
 
   /* All subregs must be aligned.  */
-  if (offset % osize != 0)
+  if (!multiple_p (offset, osize))
     return false;
 
   /* The subreg offset cannot be outside the inner object.  */
-  if (offset >= isize)
+  if (maybe_ge (offset, isize))
     return false;
 
+  poly_uint64 regsize = REGMODE_NATURAL_SIZE (imode);
+
   /* ??? This should not be here.  Temporarily continue to allow word_mode
      subregs of anything.  The most common offender is (subreg:SI (reg:DF)).
      Generally, backends are doing something sketchy but it'll take time to
@@ -824,7 +920,7 @@
     ;
   /* ??? Similarly, e.g. with (subreg:DF (reg:TI)).  Though store_bit_field
      is the culprit here, and not the backends.  */
-  else if (osize >= UNITS_PER_WORD && isize >= osize)
+  else if (known_ge (osize, regsize) && known_ge (isize, osize))
     ;
   /* Allow component subregs of complex and vector.  Though given the below
      extraction rules, it's not always clear what that means.  */
@@ -843,7 +939,7 @@
      (subreg:SI (reg:DF) 0) isn't.  */
   else if (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))
     {
-      if (! (isize == osize
+      if (! (known_eq (isize, osize)
 	     /* LRA can use subreg to store a floating point value in
 		an integer mode.  Although the floating point and the
 		integer modes need the same number of hard registers,
@@ -855,8 +951,8 @@
     }
 
   /* Paradoxical subregs must have offset zero.  */
-  if (osize > isize)
-    return offset == 0;
+  if (maybe_gt (osize, isize))
+    return known_eq (offset, 0U);
 
   /* This is a normal subreg.  Verify that the offset is representable.  */
 
@@ -875,25 +971,41 @@
       return subreg_offset_representable_p (regno, imode, offset, omode);
     }
 
+  /* The outer size must be ordered wrt the register size, otherwise
+     we wouldn't know at compile time how many registers the outer
+     mode occupies.  */
+  if (!ordered_p (osize, regsize))
+    return false;
+
   /* For pseudo registers, we want most of the same checks.  Namely:
-     If the register no larger than a word, the subreg must be lowpart.
-     If the register is larger than a word, the subreg must be the lowpart
-     of a subword.  A subreg does *not* perform arbitrary bit extraction.
-     Given that we've already checked mode/offset alignment, we only have
-     to check subword subregs here.  */
-  if (osize < UNITS_PER_WORD
+
+     Assume that the pseudo register will be allocated to hard registers
+     that can hold REGSIZE bytes each.  If OSIZE is not a multiple of REGSIZE,
+     the remainder must correspond to the lowpart of the containing hard
+     register.  If BYTES_BIG_ENDIAN, the lowpart is at the highest offset,
+     otherwise it is at the lowest offset.
+
+     Given that we've already checked the mode and offset alignment,
+     we only have to check subblock subregs here.  */
+  if (maybe_lt (osize, regsize)
       && ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode))))
     {
-      machine_mode wmode = isize > UNITS_PER_WORD ? word_mode : imode;
-      unsigned int low_off = subreg_lowpart_offset (omode, wmode);
-      if (offset % UNITS_PER_WORD != low_off)
+      /* It is invalid for the target to pick a register size for a mode
+	 that isn't ordered wrt to the size of that mode.  */
+      poly_uint64 block_size = ordered_min (isize, regsize);
+      unsigned int start_reg;
+      poly_uint64 offset_within_reg;
+      if (!can_div_trunc_p (offset, block_size, &start_reg, &offset_within_reg)
+	  || (BYTES_BIG_ENDIAN
+	      ? maybe_ne (offset_within_reg, block_size - osize)
+	      : maybe_ne (offset_within_reg, 0U)))
 	return false;
     }
   return true;
 }
 
 rtx
-gen_rtx_SUBREG (machine_mode mode, rtx reg, int offset)
+gen_rtx_SUBREG (machine_mode mode, rtx reg, poly_uint64 offset)
 {
   gcc_assert (validate_subreg (mode, GET_MODE (reg), reg, offset));
   return gen_rtx_raw_SUBREG (mode, reg, offset);
@@ -994,7 +1106,7 @@
    paradoxical lowpart, in which case the offset will be negative
    on big-endian targets.  */
 
-int
+poly_int64
 byte_lowpart_offset (machine_mode outer_mode,
 		     machine_mode inner_mode)
 {
@@ -1008,13 +1120,13 @@
    from address X.  For paradoxical big-endian subregs this is a
    negative value, otherwise it's the same as OFFSET.  */
 
-int
+poly_int64
 subreg_memory_offset (machine_mode outer_mode, machine_mode inner_mode,
-		      unsigned int offset)
+		      poly_uint64 offset)
 {
   if (paradoxical_subreg_p (outer_mode, inner_mode))
     {
-      gcc_assert (offset == 0);
+      gcc_assert (known_eq (offset, 0U));
       return -subreg_lowpart_offset (inner_mode, outer_mode);
     }
   return offset;
@@ -1024,7 +1136,7 @@
    if SUBREG_REG (X) were stored in memory.  The only significant thing
    about the current SUBREG_REG is its mode.  */
 
-int
+poly_int64
 subreg_memory_offset (const_rtx x)
 {
   return subreg_memory_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
@@ -1124,10 +1236,10 @@
    to the REG_OFFSET.  */
 
 static void
-update_reg_offset (rtx new_rtx, rtx reg, int offset)
+update_reg_offset (rtx new_rtx, rtx reg, poly_int64 offset)
 {
   REG_ATTRS (new_rtx) = get_reg_attrs (REG_EXPR (reg),
-				   REG_OFFSET (reg) + offset);
+				       REG_OFFSET (reg) + offset);
 }
 
 /* Generate a register with same attributes as REG, but with OFFSET
@@ -1135,7 +1247,7 @@
 
 rtx
 gen_rtx_REG_offset (rtx reg, machine_mode mode, unsigned int regno,
-		    int offset)
+		    poly_int64 offset)
 {
   rtx new_rtx = gen_rtx_REG (mode, regno);
 
@@ -1171,7 +1283,7 @@
 void
 set_reg_attrs_from_value (rtx reg, rtx x)
 {
-  int offset;
+  poly_int64 offset;
   bool can_be_reg_pointer = true;
 
   /* Don't call mark_reg_pointer for incompatible pointer sign
@@ -1419,34 +1531,45 @@
 rtx
 gen_lowpart_common (machine_mode mode, rtx x)
 {
-  int msize = GET_MODE_SIZE (mode);
-  int xsize;
+  poly_uint64 msize = GET_MODE_SIZE (mode);
   machine_mode innermode;
 
   /* Unfortunately, this routine doesn't take a parameter for the mode of X,
      so we have to make one up.  Yuk.  */
   innermode = GET_MODE (x);
   if (CONST_INT_P (x)
-      && msize * BITS_PER_UNIT <= HOST_BITS_PER_WIDE_INT)
+      && known_le (msize * BITS_PER_UNIT,
+		   (unsigned HOST_WIDE_INT) HOST_BITS_PER_WIDE_INT))
     innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
   else if (innermode == VOIDmode)
     innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, 0).require ();
 
-  xsize = GET_MODE_SIZE (innermode);
-
   gcc_assert (innermode != VOIDmode && innermode != BLKmode);
 
   if (innermode == mode)
     return x;
 
-  /* MODE must occupy no more words than the mode of X.  */
-  if ((msize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD
-      > ((xsize + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
+  /* The size of the outer and inner modes must be ordered.  */
+  poly_uint64 xsize = GET_MODE_SIZE (innermode);
+  if (!ordered_p (msize, xsize))
     return 0;
 
-  /* Don't allow generating paradoxical FLOAT_MODE subregs.  */
-  if (SCALAR_FLOAT_MODE_P (mode) && msize > xsize)
-    return 0;
+  if (SCALAR_FLOAT_MODE_P (mode))
+    {
+      /* Don't allow paradoxical FLOAT_MODE subregs.  */
+      if (maybe_gt (msize, xsize))
+	return 0;
+    }
+  else
+    {
+      /* MODE must occupy no more of the underlying registers than X.  */
+      poly_uint64 regsize = REGMODE_NATURAL_SIZE (innermode);
+      unsigned int mregs, xregs;
+      if (!can_div_away_from_zero_p (msize, regsize, &mregs)
+	  || !can_div_away_from_zero_p (xsize, regsize, &xregs)
+	  || mregs > xregs)
+	return 0;
+    }
 
   scalar_int_mode int_mode, int_innermode, from_mode;
   if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
@@ -1471,7 +1594,8 @@
     }
   else if (GET_CODE (x) == SUBREG || REG_P (x)
 	   || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR
-	   || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x))
+	   || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x)
+	   || CONST_POLY_INT_P (x))
     return lowpart_subreg (mode, x, innermode);
 
   /* Otherwise, we can't do this.  */
@@ -1481,13 +1605,13 @@
 rtx
 gen_highpart (machine_mode mode, rtx x)
 {
-  unsigned int msize = GET_MODE_SIZE (mode);
+  poly_uint64 msize = GET_MODE_SIZE (mode);
   rtx result;
 
   /* This case loses if X is a subreg.  To catch bugs early,
      complain if an invalid MODE is used even in other cases.  */
-  gcc_assert (msize <= UNITS_PER_WORD
-	      || msize == (unsigned int) GET_MODE_UNIT_SIZE (GET_MODE (x)));
+  gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD)
+	      || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x))));
 
   result = simplify_gen_subreg (mode, x, GET_MODE (x),
 				subreg_highpart_offset (mode, GET_MODE (x)));
@@ -1522,10 +1646,11 @@
 /* Return the SUBREG_BYTE for a lowpart subreg whose outer mode has
    OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes.  */
 
-unsigned int
-subreg_size_lowpart_offset (unsigned int outer_bytes, unsigned int inner_bytes)
-{
-  if (outer_bytes > inner_bytes)
+poly_uint64
+subreg_size_lowpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
+{
+  gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
+  if (maybe_gt (outer_bytes, inner_bytes))
     /* Paradoxical subregs always have a SUBREG_BYTE of 0.  */
     return 0;
 
@@ -1540,11 +1665,10 @@
 /* Return the SUBREG_BYTE for a highpart subreg whose outer mode has
    OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes.  */
 
-unsigned int
-subreg_size_highpart_offset (unsigned int outer_bytes,
-			     unsigned int inner_bytes)
-{
-  gcc_assert (inner_bytes >= outer_bytes);
+poly_uint64
+subreg_size_highpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes)
+{
+  gcc_assert (known_ge (inner_bytes, outer_bytes));
 
   if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN)
     return 0;
@@ -1568,8 +1692,9 @@
   else if (GET_MODE (SUBREG_REG (x)) == VOIDmode)
     return 0;
 
-  return (subreg_lowpart_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)))
-	  == SUBREG_BYTE (x));
+  return known_eq (subreg_lowpart_offset (GET_MODE (x),
+					  GET_MODE (SUBREG_REG (x))),
+		   SUBREG_BYTE (x));
 }
 
 /* Return subword OFFSET of operand OP.
@@ -1598,7 +1723,8 @@
  */
 
 rtx
-operand_subword (rtx op, unsigned int offset, int validate_address, machine_mode mode)
+operand_subword (rtx op, poly_uint64 offset, int validate_address,
+		 machine_mode mode)
 {
   if (mode == VOIDmode)
     mode = GET_MODE (op);
@@ -1607,12 +1733,12 @@
 
   /* If OP is narrower than a word, fail.  */
   if (mode != BLKmode
-      && (GET_MODE_SIZE (mode) < UNITS_PER_WORD))
+      && maybe_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD))
     return 0;
 
   /* If we want a word outside OP, return zero.  */
   if (mode != BLKmode
-      && (offset + 1) * UNITS_PER_WORD > GET_MODE_SIZE (mode))
+      && maybe_gt ((offset + 1) * UNITS_PER_WORD, GET_MODE_SIZE (mode)))
     return const0_rtx;
 
   /* Form a new MEM at the requested address.  */
@@ -1646,7 +1772,7 @@
    MODE is the mode of OP, in case it is CONST_INT.  */
 
 rtx
-operand_subword_force (rtx op, unsigned int offset, machine_mode mode)
+operand_subword_force (rtx op, poly_uint64 offset, machine_mode mode)
 {
   rtx result = operand_subword (op, offset, 1, mode);
 
@@ -1669,6 +1795,17 @@
   return result;
 }
 
+mem_attrs::mem_attrs ()
+  : expr (NULL_TREE),
+    offset (0),
+    size (0),
+    alias (0),
+    align (0),
+    addrspace (ADDR_SPACE_GENERIC),
+    offset_known_p (false),
+    size_known_p (false)
+{}
+
 /* Returns 1 if both MEM_EXPR can be considered equal
    and 0 otherwise.  */
 
@@ -1695,7 +1832,7 @@
 get_mem_align_offset (rtx mem, unsigned int align)
 {
   tree expr;
-  unsigned HOST_WIDE_INT offset;
+  poly_uint64 offset;
 
   /* This function can't use
      if (!MEM_EXPR (mem) || !MEM_OFFSET_KNOWN_P (mem)
@@ -1737,12 +1874,13 @@
 	  tree byte_offset = component_ref_field_offset (expr);
 	  tree bit_offset = DECL_FIELD_BIT_OFFSET (field);
 
+	  poly_uint64 suboffset;
 	  if (!byte_offset
-	      || !tree_fits_uhwi_p (byte_offset)
+	      || !poly_int_tree_p (byte_offset, &suboffset)
 	      || !tree_fits_uhwi_p (bit_offset))
 	    return -1;
 
-	  offset += tree_to_uhwi (byte_offset);
+	  offset += suboffset;
 	  offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT;
 
 	  if (inner == NULL_TREE)
@@ -1766,7 +1904,10 @@
   else
     return -1;
 
-  return offset & ((align / BITS_PER_UNIT) - 1);
+  HOST_WIDE_INT misalign;
+  if (!known_misalignment (offset, align / BITS_PER_UNIT, &misalign))
+    return -1;
+  return misalign;
 }
 
 /* Given REF (a MEM) and T, either the type of X or the expression
@@ -1776,9 +1917,9 @@
 
 void
 set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
-				 HOST_WIDE_INT bitpos)
-{
-  HOST_WIDE_INT apply_bitpos = 0;
+				 poly_int64 bitpos)
+{
+  poly_int64 apply_bitpos = 0;
   tree type;
   struct mem_attrs attrs, *defattrs, *refattrs;
   addr_space_t as;
@@ -1799,8 +1940,6 @@
      set_mem_attributes.  */
   gcc_assert (!DECL_P (t) || ref != DECL_RTL_IF_SET (t));
 
-  memset (&attrs, 0, sizeof (attrs));
-
   /* Get the alias set from the expression or type (perhaps using a
      front-end routine) and use it.  */
   attrs.alias = get_alias_set (t);
@@ -1970,10 +2109,9 @@
 	    {
 	      attrs.expr = t2;
 	      attrs.offset_known_p = false;
-	      if (tree_fits_uhwi_p (off_tree))
+	      if (poly_int_tree_p (off_tree, &attrs.offset))
 		{
 		  attrs.offset_known_p = true;
-		  attrs.offset = tree_to_uhwi (off_tree);
 		  apply_bitpos = bitpos;
 		}
 	    }
@@ -1994,27 +2132,29 @@
       unsigned int obj_align;
       unsigned HOST_WIDE_INT obj_bitpos;
       get_object_alignment_1 (t, &obj_align, &obj_bitpos);
-      obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1);
-      if (obj_bitpos != 0)
-	obj_align = least_bit_hwi (obj_bitpos);
+      unsigned int diff_align = known_alignment (obj_bitpos - bitpos);
+      if (diff_align != 0)
+	obj_align = MIN (obj_align, diff_align);
       attrs.align = MAX (attrs.align, obj_align);
     }
 
-  if (tree_fits_uhwi_p (new_size))
+  poly_uint64 const_size;
+  if (poly_int_tree_p (new_size, &const_size))
     {
       attrs.size_known_p = true;
-      attrs.size = tree_to_uhwi (new_size);
+      attrs.size = const_size;
     }
 
   /* If we modified OFFSET based on T, then subtract the outstanding
      bit position offset.  Similarly, increase the size of the accessed
      object to contain the negative offset.  */
-  if (apply_bitpos)
+  if (maybe_ne (apply_bitpos, 0))
     {
       gcc_assert (attrs.offset_known_p);
-      attrs.offset -= apply_bitpos / BITS_PER_UNIT;
+      poly_int64 bytepos = bits_to_bytes_round_down (apply_bitpos);
+      attrs.offset -= bytepos;
       if (attrs.size_known_p)
-	attrs.size += apply_bitpos / BITS_PER_UNIT;
+	attrs.size += bytepos;
     }
 
   /* Now set the attributes we computed above.  */
@@ -2033,11 +2173,9 @@
 void
 set_mem_alias_set (rtx mem, alias_set_type set)
 {
-  struct mem_attrs attrs;
-
   /* If the new and old alias sets don't conflict, something is wrong.  */
   gcc_checking_assert (alias_sets_conflict_p (set, MEM_ALIAS_SET (mem)));
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.alias = set;
   set_mem_attrs (mem, &attrs);
 }
@@ -2047,9 +2185,7 @@
 void
 set_mem_addr_space (rtx mem, addr_space_t addrspace)
 {
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.addrspace = addrspace;
   set_mem_attrs (mem, &attrs);
 }
@@ -2059,9 +2195,7 @@
 void
 set_mem_align (rtx mem, unsigned int align)
 {
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.align = align;
   set_mem_attrs (mem, &attrs);
 }
@@ -2071,9 +2205,7 @@
 void
 set_mem_expr (rtx mem, tree expr)
 {
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.expr = expr;
   set_mem_attrs (mem, &attrs);
 }
@@ -2081,11 +2213,9 @@
 /* Set the offset of MEM to OFFSET.  */
 
 void
-set_mem_offset (rtx mem, HOST_WIDE_INT offset)
-{
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+set_mem_offset (rtx mem, poly_int64 offset)
+{
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.offset_known_p = true;
   attrs.offset = offset;
   set_mem_attrs (mem, &attrs);
@@ -2096,9 +2226,7 @@
 void
 clear_mem_offset (rtx mem)
 {
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.offset_known_p = false;
   set_mem_attrs (mem, &attrs);
 }
@@ -2106,11 +2234,9 @@
 /* Set the size of MEM to SIZE.  */
 
 void
-set_mem_size (rtx mem, HOST_WIDE_INT size)
-{
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+set_mem_size (rtx mem, poly_int64 size)
+{
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.size_known_p = true;
   attrs.size = size;
   set_mem_attrs (mem, &attrs);
@@ -2121,9 +2247,7 @@
 void
 clear_mem_size (rtx mem)
 {
-  struct mem_attrs attrs;
-
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.size_known_p = false;
   set_mem_attrs (mem, &attrs);
 }
@@ -2186,9 +2310,9 @@
 {
   rtx new_rtx = change_address_1 (memref, mode, addr, 1, false);
   machine_mode mmode = GET_MODE (new_rtx);
-  struct mem_attrs attrs, *defattrs;
-
-  attrs = *get_mem_attrs (memref);
+  struct mem_attrs *defattrs;
+
+  mem_attrs attrs (*get_mem_attrs (memref));
   defattrs = mode_mem_attrs[(int) mmode];
   attrs.expr = NULL_TREE;
   attrs.offset_known_p = false;
@@ -2223,15 +2347,14 @@
    has no inherent size.  */
 
 rtx
-adjust_address_1 (rtx memref, machine_mode mode, HOST_WIDE_INT offset,
+adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset,
 		  int validate, int adjust_address, int adjust_object,
-		  HOST_WIDE_INT size)
+		  poly_int64 size)
 {
   rtx addr = XEXP (memref, 0);
   rtx new_rtx;
   scalar_int_mode address_mode;
-  int pbits;
-  struct mem_attrs attrs = *get_mem_attrs (memref), *defattrs;
+  struct mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
   unsigned HOST_WIDE_INT max_align;
 #ifdef POINTERS_EXTEND_UNSIGNED
   scalar_int_mode pointer_mode
@@ -2248,8 +2371,10 @@
     size = defattrs->size;
 
   /* If there are no changes, just return the original memory reference.  */
-  if (mode == GET_MODE (memref) && !offset
-      && (size == 0 || (attrs.size_known_p && attrs.size == size))
+  if (mode == GET_MODE (memref)
+      && known_eq (offset, 0)
+      && (known_eq (size, 0)
+	  || (attrs.size_known_p && known_eq (attrs.size, size)))
       && (!validate || memory_address_addr_space_p (mode, addr,
 						    attrs.addrspace)))
     return memref;
@@ -2262,22 +2387,17 @@
   /* Convert a possibly large offset to a signed value within the
      range of the target address space.  */
   address_mode = get_address_mode (memref);
-  pbits = GET_MODE_BITSIZE (address_mode);
-  if (HOST_BITS_PER_WIDE_INT > pbits)
-    {
-      int shift = HOST_BITS_PER_WIDE_INT - pbits;
-      offset = (((HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) offset << shift))
-		>> shift);
-    }
+  offset = trunc_int_for_mode (offset, address_mode);
 
   if (adjust_address)
     {
       /* If MEMREF is a LO_SUM and the offset is within the alignment of the
 	 object, we can merge it into the LO_SUM.  */
-      if (GET_MODE (memref) != BLKmode && GET_CODE (addr) == LO_SUM
-	  && offset >= 0
-	  && (unsigned HOST_WIDE_INT) offset
-	      < GET_MODE_ALIGNMENT (GET_MODE (memref)) / BITS_PER_UNIT)
+      if (GET_MODE (memref) != BLKmode
+	  && GET_CODE (addr) == LO_SUM
+	  && known_in_range_p (offset,
+			       0, (GET_MODE_ALIGNMENT (GET_MODE (memref))
+				   / BITS_PER_UNIT)))
 	addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0),
 			       plus_constant (address_mode,
 					      XEXP (addr, 1), offset));
@@ -2288,7 +2408,7 @@
       else if (POINTERS_EXTEND_UNSIGNED > 0
 	       && GET_CODE (addr) == ZERO_EXTEND
 	       && GET_MODE (XEXP (addr, 0)) == pointer_mode
-	       && trunc_int_for_mode (offset, pointer_mode) == offset)
+	       && known_eq (trunc_int_for_mode (offset, pointer_mode), offset))
 	addr = gen_rtx_ZERO_EXTEND (address_mode,
 				    plus_constant (pointer_mode,
 						   XEXP (addr, 0), offset));
@@ -2301,7 +2421,7 @@
 
   /* If the address is a REG, change_address_1 rightfully returns memref,
      but this would destroy memref's MEM_ATTRS.  */
-  if (new_rtx == memref && offset != 0)
+  if (new_rtx == memref && maybe_ne (offset, 0))
     new_rtx = copy_rtx (new_rtx);
 
   /* Conservatively drop the object if we don't know where we start from.  */
@@ -2318,7 +2438,7 @@
       attrs.offset += offset;
 
       /* Drop the object if the new left end is not within its bounds.  */
-      if (adjust_object && attrs.offset < 0)
+      if (adjust_object && maybe_lt (attrs.offset, 0))
 	{
 	  attrs.expr = NULL_TREE;
 	  attrs.alias = 0;
@@ -2328,16 +2448,16 @@
   /* Compute the new alignment by taking the MIN of the alignment and the
      lowest-order set bit in OFFSET, but don't change the alignment if OFFSET
      if zero.  */
-  if (offset != 0)
+  if (maybe_ne (offset, 0))
     {
-      max_align = least_bit_hwi (offset) * BITS_PER_UNIT;
+      max_align = known_alignment (offset) * BITS_PER_UNIT;
       attrs.align = MIN (attrs.align, max_align);
     }
 
-  if (size)
+  if (maybe_ne (size, 0))
     {
       /* Drop the object if the new right end is not within its bounds.  */
-      if (adjust_object && (offset + size) > attrs.size)
+      if (adjust_object && maybe_gt (offset + size, attrs.size))
 	{
 	  attrs.expr = NULL_TREE;
 	  attrs.alias = 0;
@@ -2365,7 +2485,7 @@
 
 rtx
 adjust_automodify_address_1 (rtx memref, machine_mode mode, rtx addr,
-			     HOST_WIDE_INT offset, int validate)
+			     poly_int64 offset, int validate)
 {
   memref = change_address_1 (memref, VOIDmode, addr, validate, false);
   return adjust_address_1 (memref, mode, offset, validate, 0, 0, 0);
@@ -2380,9 +2500,9 @@
 {
   rtx new_rtx, addr = XEXP (memref, 0);
   machine_mode address_mode;
-  struct mem_attrs attrs, *defattrs;
-
-  attrs = *get_mem_attrs (memref);
+  struct mem_attrs *defattrs;
+
+  mem_attrs attrs (*get_mem_attrs (memref));
   address_mode = get_address_mode (memref);
   new_rtx = simplify_gen_binary (PLUS, address_mode, addr, offset);
 
@@ -2450,17 +2570,16 @@
    operations plus masking logic.  */
 
 rtx
-widen_memory_access (rtx memref, machine_mode mode, HOST_WIDE_INT offset)
+widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset)
 {
   rtx new_rtx = adjust_address_1 (memref, mode, offset, 1, 1, 0, 0);
-  struct mem_attrs attrs;
-  unsigned int size = GET_MODE_SIZE (mode);
+  poly_uint64 size = GET_MODE_SIZE (mode);
 
   /* If there are no changes, just return the original memory reference.  */
   if (new_rtx == memref)
     return new_rtx;
 
-  attrs = *get_mem_attrs (new_rtx);
+  mem_attrs attrs (*get_mem_attrs (new_rtx));
 
   /* If we don't know what offset we were at within the expression, then
      we can't know if we've overstepped the bounds.  */
@@ -2482,28 +2601,30 @@
 
 	  /* Is the field at least as large as the access?  If so, ok,
 	     otherwise strip back to the containing structure.  */
-	  if (TREE_CODE (DECL_SIZE_UNIT (field)) == INTEGER_CST
-	      && compare_tree_int (DECL_SIZE_UNIT (field), size) >= 0
-	      && attrs.offset >= 0)
+	  if (poly_int_tree_p (DECL_SIZE_UNIT (field))
+	      && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (field)), size)
+	      && known_ge (attrs.offset, 0))
 	    break;
 
-	  if (! tree_fits_uhwi_p (offset))
+	  poly_uint64 suboffset;
+	  if (!poly_int_tree_p (offset, &suboffset))
 	    {
 	      attrs.expr = NULL_TREE;
 	      break;
 	    }
 
 	  attrs.expr = TREE_OPERAND (attrs.expr, 0);
-	  attrs.offset += tree_to_uhwi (offset);
+	  attrs.offset += suboffset;
 	  attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
 			   / BITS_PER_UNIT);
 	}
       /* Similarly for the decl.  */
       else if (DECL_P (attrs.expr)
 	       && DECL_SIZE_UNIT (attrs.expr)
-	       && TREE_CODE (DECL_SIZE_UNIT (attrs.expr)) == INTEGER_CST
-	       && compare_tree_int (DECL_SIZE_UNIT (attrs.expr), size) >= 0
-	       && (! attrs.offset_known_p || attrs.offset >= 0))
+	       && poly_int_tree_p (DECL_SIZE_UNIT (attrs.expr))
+	       && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (attrs.expr)),
+			   size)
+	       && known_ge (attrs.offset, 0))
 	break;
       else
 	{
@@ -2534,7 +2655,6 @@
 {
   tree d = spill_slot_decl;
   rtx rd;
-  struct mem_attrs attrs;
 
   if (d || !force_build_p)
     return d;
@@ -2548,7 +2668,7 @@
 
   rd = gen_rtx_MEM (BLKmode, frame_pointer_rtx);
   MEM_NOTRAP_P (rd) = 1;
-  attrs = *mode_mem_attrs[(int) BLKmode];
+  mem_attrs attrs (*mode_mem_attrs[(int) BLKmode]);
   attrs.alias = new_alias_set ();
   attrs.expr = d;
   set_mem_attrs (rd, &attrs);
@@ -2566,10 +2686,9 @@
 void
 set_mem_attrs_for_spill (rtx mem)
 {
-  struct mem_attrs attrs;
   rtx addr;
 
-  attrs = *get_mem_attrs (mem);
+  mem_attrs attrs (*get_mem_attrs (mem));
   attrs.expr = get_spill_slot_decl (true);
   attrs.alias = MEM_ALIAS_SET (DECL_RTL (attrs.expr));
   attrs.addrspace = ADDR_SPACE_GENERIC;
@@ -2579,10 +2698,7 @@
      with perhaps the plus missing for offset = 0.  */
   addr = XEXP (mem, 0);
   attrs.offset_known_p = true;
-  attrs.offset = 0;
-  if (GET_CODE (addr) == PLUS
-      && CONST_INT_P (XEXP (addr, 1)))
-    attrs.offset = INTVAL (XEXP (addr, 1));
+  strip_offset (addr, &attrs.offset);
 
   set_mem_attrs (mem, &attrs);
   MEM_NOTRAP_P (mem) = 1;
@@ -2749,6 +2865,7 @@
       /* SCRATCH must be shared because they represent distinct values.  */
       return;
     case CLOBBER:
+    case CLOBBER_HIGH:
       /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
          clobbers or clobbers of hard registers that originated as pseudos.
          This is needed to allow safe register renaming.  */
@@ -3002,6 +3119,7 @@
       /* SCRATCH must be shared because they represent distinct values.  */
       return;
     case CLOBBER:
+    case CLOBBER_HIGH:
       /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
          clobbers or clobbers of hard registers that originated as pseudos.
          This is needed to allow safe register renaming.  */
@@ -3355,20 +3473,17 @@
   return insn;
 }
 
-/* Return the next insn after INSN that is not a NOTE, but stop the
-   search before we enter another basic block.  This routine does not
-   look inside SEQUENCEs.  */
+/* Return the next insn after INSN that is not a DEBUG_INSN.  This
+   routine does not look inside SEQUENCEs.  */
 
 rtx_insn *
-next_nonnote_insn_bb (rtx_insn *insn)
+next_nondebug_insn (rtx_insn *insn)
 {
   while (insn)
     {
       insn = NEXT_INSN (insn);
-      if (insn == 0 || !NOTE_P (insn))
+      if (insn == 0 || !DEBUG_INSN_P (insn))
 	break;
-      if (NOTE_INSN_BASIC_BLOCK_P (insn))
-	return NULL;
     }
 
   return insn;
@@ -3390,42 +3505,6 @@
   return insn;
 }
 
-/* Return the previous insn before INSN that is not a NOTE, but stop
-   the search before we enter another basic block.  This routine does
-   not look inside SEQUENCEs.  */
-
-rtx_insn *
-prev_nonnote_insn_bb (rtx_insn *insn)
-{
-
-  while (insn)
-    {
-      insn = PREV_INSN (insn);
-      if (insn == 0 || !NOTE_P (insn))
-	break;
-      if (NOTE_INSN_BASIC_BLOCK_P (insn))
-	return NULL;
-    }
-
-  return insn;
-}
-
-/* Return the next insn after INSN that is not a DEBUG_INSN.  This
-   routine does not look inside SEQUENCEs.  */
-
-rtx_insn *
-next_nondebug_insn (rtx_insn *insn)
-{
-  while (insn)
-    {
-      insn = NEXT_INSN (insn);
-      if (insn == 0 || !DEBUG_INSN_P (insn))
-	break;
-    }
-
-  return insn;
-}
-
 /* Return the previous insn before INSN that is not a DEBUG_INSN.
    This routine does not look inside SEQUENCEs.  */
 
@@ -3458,6 +3537,29 @@
   return insn;
 }
 
+/* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN,
+   but stop the search before we enter another basic block.  This
+   routine does not look inside SEQUENCEs.  */
+
+rtx_insn *
+next_nonnote_nondebug_insn_bb (rtx_insn *insn)
+{
+  while (insn)
+    {
+      insn = NEXT_INSN (insn);
+      if (insn == 0)
+	break;
+      if (DEBUG_INSN_P (insn))
+	continue;
+      if (!NOTE_P (insn))
+	break;
+      if (NOTE_INSN_BASIC_BLOCK_P (insn))
+	return NULL;
+    }
+
+  return insn;
+}
+
 /* Return the previous insn before INSN that is not a NOTE nor DEBUG_INSN.
    This routine does not look inside SEQUENCEs.  */
 
@@ -3474,19 +3576,76 @@
   return insn;
 }
 
+/* Return the previous insn before INSN that is not a NOTE nor
+   DEBUG_INSN, but stop the search before we enter another basic
+   block.  This routine does not look inside SEQUENCEs.  */
+
+rtx_insn *
+prev_nonnote_nondebug_insn_bb (rtx_insn *insn)
+{
+  while (insn)
+    {
+      insn = PREV_INSN (insn);
+      if (insn == 0)
+	break;
+      if (DEBUG_INSN_P (insn))
+	continue;
+      if (!NOTE_P (insn))
+	break;
+      if (NOTE_INSN_BASIC_BLOCK_P (insn))
+	return NULL;
+    }
+
+  return insn;
+}
+
+/* Return the next INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN after INSN;
+   or 0, if there is none.  This routine does not look inside
+   SEQUENCEs.  */
+
+rtx_insn *
+next_real_insn (rtx_insn *insn)
+{
+  while (insn)
+    {
+      insn = NEXT_INSN (insn);
+      if (insn == 0 || INSN_P (insn))
+	break;
+    }
+
+  return insn;
+}
+
+/* Return the last INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN before INSN;
+   or 0, if there is none.  This routine does not look inside
+   SEQUENCEs.  */
+
+rtx_insn *
+prev_real_insn (rtx_insn *insn)
+{
+  while (insn)
+    {
+      insn = PREV_INSN (insn);
+      if (insn == 0 || INSN_P (insn))
+	break;
+    }
+
+  return insn;
+}
+
 /* Return the next INSN, CALL_INSN or JUMP_INSN after INSN;
    or 0, if there is none.  This routine does not look inside
    SEQUENCEs.  */
 
 rtx_insn *
-next_real_insn (rtx uncast_insn)
+next_real_nondebug_insn (rtx uncast_insn)
 {
   rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
 
   while (insn)
     {
       insn = NEXT_INSN (insn);
-      if (insn == 0 || INSN_P (insn))
+      if (insn == 0 || NONDEBUG_INSN_P (insn))
 	break;
     }
 
@@ -3498,12 +3657,12 @@
    SEQUENCEs.  */
 
 rtx_insn *
-prev_real_insn (rtx_insn *insn)
+prev_real_nondebug_insn (rtx_insn *insn)
 {
   while (insn)
     {
       insn = PREV_INSN (insn);
-      if (insn == 0 || INSN_P (insn))
+      if (insn == 0 || NONDEBUG_INSN_P (insn))
 	break;
     }
 
@@ -3743,15 +3902,12 @@
       for (insn = insn_last; insn ; insn = PREV_INSN (insn))
 	if (CALL_P (insn))
 	  {
-	    rtx_insn *next;
-	    rtx *p;
-
 	    gcc_assert (call_insn == NULL_RTX);
 	    call_insn = insn;
 
 	    /* Add the old CALL_INSN_FUNCTION_USAGE to whatever the
 	       target may have explicitly specified.  */
-	    p = &CALL_INSN_FUNCTION_USAGE (insn);
+	    rtx *p = &CALL_INSN_FUNCTION_USAGE (insn);
 	    while (*p)
 	      p = &XEXP (*p, 1);
 	    *p = CALL_INSN_FUNCTION_USAGE (trial);
@@ -3759,21 +3915,6 @@
 	    /* If the old call was a sibling call, the new one must
 	       be too.  */
 	    SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial);
-
-	    /* If the new call is the last instruction in the sequence,
-	       it will effectively replace the old call in-situ.  Otherwise
-	       we must move any following NOTE_INSN_CALL_ARG_LOCATION note
-	       so that it comes immediately after the new call.  */
-	    if (NEXT_INSN (insn))
-	      for (next = NEXT_INSN (trial);
-		   next && NOTE_P (next);
-		   next = NEXT_INSN (next))
-		if (NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
-		  {
-		    remove_insn (next);
-		    add_insn_after (next, insn, NULL);
-		    break;
-		  }
 	  }
     }
 
@@ -3790,6 +3931,7 @@
 	case REG_SETJMP:
 	case REG_TM:
 	case REG_CALL_NOCF_CHECK:
+	case REG_CALL_ARG_LOCATION:
 	  for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
 	    {
 	      if (CALL_P (insn))
@@ -3819,7 +3961,7 @@
 	  break;
 
 	case REG_ARGS_SIZE:
-	  fixup_args_size_notes (NULL, insn_last, INTVAL (XEXP (note, 0)));
+	  fixup_args_size_notes (NULL, insn_last, get_args_size (note));
 	  break;
 
 	case REG_CALL_DECL:
@@ -4025,7 +4167,7 @@
 {
   rtx_insn *prev = get_last_insn ();
   link_insn_into_chain (insn, prev, NULL);
-  if (NULL == get_insns ())
+  if (get_insns () == NULL)
     set_first_insn (insn);
   set_last_insn (insn);
 }
@@ -4088,10 +4230,8 @@
    they know how to update a SEQUENCE. */
 
 void
-add_insn_after (rtx uncast_insn, rtx uncast_after, basic_block bb)
-{
-  rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
-  rtx_insn *after = as_a <rtx_insn *> (uncast_after);
+add_insn_after (rtx_insn *insn, rtx_insn *after, basic_block bb)
+{
   add_insn_after_nobb (insn, after);
   if (!BARRIER_P (after)
       && !BARRIER_P (insn)
@@ -4118,10 +4258,8 @@
    they know how to update a SEQUENCE. */
 
 void
-add_insn_before (rtx uncast_insn, rtx uncast_before, basic_block bb)
-{
-  rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
-  rtx_insn *before = as_a <rtx_insn *> (uncast_before);
+add_insn_before (rtx_insn *insn, rtx_insn *before, basic_block bb)
+{
   add_insn_before_nobb (insn, before);
 
   if (!bb
@@ -4146,10 +4284,10 @@
 /* Replace insn with an deleted instruction note.  */
 
 void
-set_insn_deleted (rtx insn)
+set_insn_deleted (rtx_insn *insn)
 {
   if (INSN_P (insn))
-    df_insn_delete (as_a <rtx_insn *> (insn));
+    df_insn_delete (insn);
   PUT_CODE (insn, NOTE);
   NOTE_KIND (insn) = NOTE_INSN_DELETED;
 }
@@ -4171,9 +4309,8 @@
    To really delete an insn and related DF information, use delete_insn.  */
 
 void
-remove_insn (rtx uncast_insn)
-{
-  rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
+remove_insn (rtx_insn *insn)
+{
   rtx_insn *next = NEXT_INSN (insn);
   rtx_insn *prev = PREV_INSN (insn);
   basic_block bb;
@@ -4376,7 +4513,8 @@
    generated would almost certainly die right after it was created.  */
 
 static rtx_insn *
-emit_pattern_before_noloc (rtx x, rtx before, rtx last, basic_block bb,
+emit_pattern_before_noloc (rtx x, rtx_insn *before, rtx_insn *last,
+			   basic_block bb,
                            rtx_insn *(*make_raw) (rtx))
 {
   rtx_insn *insn;
@@ -4384,7 +4522,7 @@
   gcc_assert (before);
 
   if (x == NULL_RTX)
-    return safe_as_a <rtx_insn *> (last);
+    return last;
 
   switch (GET_CODE (x))
     {
@@ -4417,7 +4555,7 @@
       break;
     }
 
-  return safe_as_a <rtx_insn *> (last);
+  return last;
 }
 
 /* Make X be output before the instruction BEFORE.  */
@@ -4435,7 +4573,7 @@
 emit_jump_insn_before_noloc (rtx x, rtx_insn *before)
 {
   return as_a <rtx_jump_insn *> (
-		emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+		emit_pattern_before_noloc (x, before, NULL, NULL,
 					   make_jump_insn_raw));
 }
 
@@ -4445,7 +4583,7 @@
 rtx_insn *
 emit_call_insn_before_noloc (rtx x, rtx_insn *before)
 {
-  return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+  return emit_pattern_before_noloc (x, before, NULL, NULL,
 				    make_call_insn_raw);
 }
 
@@ -4453,9 +4591,9 @@
    and output it before the instruction BEFORE.  */
 
 rtx_insn *
-emit_debug_insn_before_noloc (rtx x, rtx before)
-{
-  return emit_pattern_before_noloc (x, before, NULL_RTX, NULL,
+emit_debug_insn_before_noloc (rtx x, rtx_insn *before)
+{
+  return emit_pattern_before_noloc (x, before, NULL, NULL,
 				    make_debug_insn_raw);
 }
 
@@ -4463,7 +4601,7 @@
    and output it before the insn BEFORE.  */
 
 rtx_barrier *
-emit_barrier_before (rtx before)
+emit_barrier_before (rtx_insn *before)
 {
   rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
 
@@ -4476,21 +4614,20 @@
 /* Emit the label LABEL before the insn BEFORE.  */
 
 rtx_code_label *
-emit_label_before (rtx label, rtx_insn *before)
+emit_label_before (rtx_code_label *label, rtx_insn *before)
 {
   gcc_checking_assert (INSN_UID (label) == 0);
   INSN_UID (label) = cur_insn_uid++;
   add_insn_before (label, before, NULL);
-  return as_a <rtx_code_label *> (label);
+  return label;
 }
 
 /* Helper for emit_insn_after, handles lists of instructions
    efficiently.  */
 
 static rtx_insn *
-emit_insn_after_1 (rtx_insn *first, rtx uncast_after, basic_block bb)
-{
-  rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
+emit_insn_after_1 (rtx_insn *first, rtx_insn *after, basic_block bb)
+{
   rtx_insn *last;
   rtx_insn *after_after;
   if (!bb && !BARRIER_P (after))
@@ -4532,10 +4669,9 @@
 }
 
 static rtx_insn *
-emit_pattern_after_noloc (rtx x, rtx uncast_after, basic_block bb,
+emit_pattern_after_noloc (rtx x, rtx_insn *after, basic_block bb,
 			  rtx_insn *(*make_raw)(rtx))
 {
-  rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
   rtx_insn *last = after;
 
   gcc_assert (after);
@@ -4574,7 +4710,7 @@
    BB is NULL, an attempt is made to infer the BB from AFTER.  */
 
 rtx_insn *
-emit_insn_after_noloc (rtx x, rtx after, basic_block bb)
+emit_insn_after_noloc (rtx x, rtx_insn *after, basic_block bb)
 {
   return emit_pattern_after_noloc (x, after, bb, make_insn_raw);
 }
@@ -4584,7 +4720,7 @@
    and output it after the insn AFTER.  */
 
 rtx_jump_insn *
-emit_jump_insn_after_noloc (rtx x, rtx after)
+emit_jump_insn_after_noloc (rtx x, rtx_insn *after)
 {
   return as_a <rtx_jump_insn *> (
 		emit_pattern_after_noloc (x, after, NULL, make_jump_insn_raw));
@@ -4594,7 +4730,7 @@
    and output it after the instruction AFTER.  */
 
 rtx_insn *
-emit_call_insn_after_noloc (rtx x, rtx after)
+emit_call_insn_after_noloc (rtx x, rtx_insn *after)
 {
   return emit_pattern_after_noloc (x, after, NULL, make_call_insn_raw);
 }
@@ -4603,7 +4739,7 @@
    and output it after the instruction AFTER.  */
 
 rtx_insn *
-emit_debug_insn_after_noloc (rtx x, rtx after)
+emit_debug_insn_after_noloc (rtx x, rtx_insn *after)
 {
   return emit_pattern_after_noloc (x, after, NULL, make_debug_insn_raw);
 }
@@ -4612,7 +4748,7 @@
    and output it after the insn AFTER.  */
 
 rtx_barrier *
-emit_barrier_after (rtx after)
+emit_barrier_after (rtx_insn *after)
 {
   rtx_barrier *insn = as_a <rtx_barrier *> (rtx_alloc (BARRIER));
 
@@ -4625,12 +4761,12 @@
 /* Emit the label LABEL after the insn AFTER.  */
 
 rtx_insn *
-emit_label_after (rtx label, rtx_insn *after)
+emit_label_after (rtx_insn *label, rtx_insn *after)
 {
   gcc_checking_assert (INSN_UID (label) == 0);
   INSN_UID (label) = cur_insn_uid++;
   add_insn_after (label, after, NULL);
-  return as_a <rtx_insn *> (label);
+  return label;
 }
 
 /* Notes require a bit of special handling: Some notes need to have their
@@ -4654,7 +4790,6 @@
 	 inside basic blocks.  If the caller is emitting on the basic block
 	 boundary, do not set BLOCK_FOR_INSN on the new note.  */
       case NOTE_INSN_VAR_LOCATION:
-      case NOTE_INSN_CALL_ARG_LOCATION:
       case NOTE_INSN_EH_REGION_BEG:
       case NOTE_INSN_EH_REGION_END:
 	return on_bb_boundary_p;
@@ -4701,10 +4836,9 @@
    MAKE_RAW indicates how to turn PATTERN into a real insn.  */
 
 static rtx_insn *
-emit_pattern_after_setloc (rtx pattern, rtx uncast_after, int loc,
+emit_pattern_after_setloc (rtx pattern, rtx_insn *after, location_t loc,
 			   rtx_insn *(*make_raw) (rtx))
 {
-  rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
   rtx_insn *last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
 
   if (pattern == NULL_RTX || !loc)
@@ -4729,10 +4863,9 @@
    any DEBUG_INSNs.  */
 
 static rtx_insn *
-emit_pattern_after (rtx pattern, rtx uncast_after, bool skip_debug_insns,
+emit_pattern_after (rtx pattern, rtx_insn *after, bool skip_debug_insns,
 		    rtx_insn *(*make_raw) (rtx))
 {
-  rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
   rtx_insn *prev = after;
 
   if (skip_debug_insns)
@@ -4748,21 +4881,21 @@
 
 /* Like emit_insn_after_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_insn *
-emit_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
 {
   return emit_pattern_after_setloc (pattern, after, loc, make_insn_raw);
 }
 
 /* Like emit_insn_after_noloc, but set INSN_LOCATION according to AFTER.  */
 rtx_insn *
-emit_insn_after (rtx pattern, rtx after)
+emit_insn_after (rtx pattern, rtx_insn *after)
 {
   return emit_pattern_after (pattern, after, true, make_insn_raw);
 }
 
 /* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_jump_insn *
-emit_jump_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_jump_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
 {
   return as_a <rtx_jump_insn *> (
 	emit_pattern_after_setloc (pattern, after, loc, make_jump_insn_raw));
@@ -4770,7 +4903,7 @@
 
 /* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to AFTER.  */
 rtx_jump_insn *
-emit_jump_insn_after (rtx pattern, rtx after)
+emit_jump_insn_after (rtx pattern, rtx_insn *after)
 {
   return as_a <rtx_jump_insn *> (
 	emit_pattern_after (pattern, after, true, make_jump_insn_raw));
@@ -4778,28 +4911,28 @@
 
 /* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_insn *
-emit_call_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_call_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
 {
   return emit_pattern_after_setloc (pattern, after, loc, make_call_insn_raw);
 }
 
 /* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to AFTER.  */
 rtx_insn *
-emit_call_insn_after (rtx pattern, rtx after)
+emit_call_insn_after (rtx pattern, rtx_insn *after)
 {
   return emit_pattern_after (pattern, after, true, make_call_insn_raw);
 }
 
 /* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_insn *
-emit_debug_insn_after_setloc (rtx pattern, rtx after, int loc)
+emit_debug_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc)
 {
   return emit_pattern_after_setloc (pattern, after, loc, make_debug_insn_raw);
 }
 
 /* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to AFTER.  */
 rtx_insn *
-emit_debug_insn_after (rtx pattern, rtx after)
+emit_debug_insn_after (rtx pattern, rtx_insn *after)
 {
   return emit_pattern_after (pattern, after, false, make_debug_insn_raw);
 }
@@ -4810,13 +4943,12 @@
    CALL_INSN, etc.  */
 
 static rtx_insn *
-emit_pattern_before_setloc (rtx pattern, rtx uncast_before, int loc, bool insnp,
-			    rtx_insn *(*make_raw) (rtx))
-{
-  rtx_insn *before = as_a <rtx_insn *> (uncast_before);
+emit_pattern_before_setloc (rtx pattern, rtx_insn *before, location_t loc,
+			    bool insnp, rtx_insn *(*make_raw) (rtx))
+{
   rtx_insn *first = PREV_INSN (before);
   rtx_insn *last = emit_pattern_before_noloc (pattern, before,
-					      insnp ? before : NULL_RTX,
+					      insnp ? before : NULL,
 					      NULL, make_raw);
 
   if (pattern == NULL_RTX || !loc)
@@ -4845,10 +4977,9 @@
    INSN as opposed to a JUMP_INSN, CALL_INSN, etc.  */
 
 static rtx_insn *
-emit_pattern_before (rtx pattern, rtx uncast_before, bool skip_debug_insns,
+emit_pattern_before (rtx pattern, rtx_insn *before, bool skip_debug_insns,
 		     bool insnp, rtx_insn *(*make_raw) (rtx))
 {
-  rtx_insn *before = safe_as_a <rtx_insn *> (uncast_before);
   rtx_insn *next = before;
 
   if (skip_debug_insns)
@@ -4860,13 +4991,13 @@
 				       insnp, make_raw);
   else
     return emit_pattern_before_noloc (pattern, before,
-				      insnp ? before : NULL_RTX,
+				      insnp ? before : NULL,
                                       NULL, make_raw);
 }
 
 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_insn *
-emit_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
 {
   return emit_pattern_before_setloc (pattern, before, loc, true,
 				     make_insn_raw);
@@ -4874,14 +5005,14 @@
 
 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to BEFORE.  */
 rtx_insn *
-emit_insn_before (rtx pattern, rtx before)
+emit_insn_before (rtx pattern, rtx_insn *before)
 {
   return emit_pattern_before (pattern, before, true, true, make_insn_raw);
 }
 
 /* like emit_insn_before_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_jump_insn *
-emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
 {
   return as_a <rtx_jump_insn *> (
 	emit_pattern_before_setloc (pattern, before, loc, false,
@@ -4890,7 +5021,7 @@
 
 /* Like emit_jump_insn_before_noloc, but set INSN_LOCATION according to BEFORE.  */
 rtx_jump_insn *
-emit_jump_insn_before (rtx pattern, rtx before)
+emit_jump_insn_before (rtx pattern, rtx_insn *before)
 {
   return as_a <rtx_jump_insn *> (
 	emit_pattern_before (pattern, before, true, false,
@@ -4899,7 +5030,7 @@
 
 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_insn *
-emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, int loc)
+emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
 {
   return emit_pattern_before_setloc (pattern, before, loc, false,
 				     make_call_insn_raw);
@@ -4916,7 +5047,7 @@
 
 /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC.  */
 rtx_insn *
-emit_debug_insn_before_setloc (rtx pattern, rtx before, int loc)
+emit_debug_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc)
 {
   return emit_pattern_before_setloc (pattern, before, loc, false,
 				     make_debug_insn_raw);
@@ -5561,6 +5692,7 @@
     case SIMPLE_RETURN:
       return orig;
     case CLOBBER:
+    case CLOBBER_HIGH:
       /* Share clobbers of hard registers (like cc0), but do not share pseudo reg
          clobbers or clobbers of hard registers that originated as pseudos.
          This is needed to allow safe register renaming.  */
@@ -5631,6 +5763,7 @@
       case 't':
       case 'w':
       case 'i':
+      case 'p':
       case 's':
       case 'S':
       case 'u':
@@ -5742,8 +5875,6 @@
   REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
   REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY;
 
-  /* ??? These are problematic (for example, 3 out of 4 are wrong on
-     32-bit SPARC and cannot be all fixed because of the ABI).  */
   REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM) = STACK_BOUNDARY;
   REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM) = STACK_BOUNDARY;
   REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM) = STACK_BOUNDARY;
@@ -5757,32 +5888,188 @@
 #endif
 }
 
-/* Generate a vector constant for mode MODE and constant value CONSTANT.  */
+/* Return the value of element I of CONST_VECTOR X as a wide_int.  */
+
+wide_int
+const_vector_int_elt (const_rtx x, unsigned int i)
+{
+  /* First handle elements that are directly encoded.  */
+  machine_mode elt_mode = GET_MODE_INNER (GET_MODE (x));
+  if (i < (unsigned int) XVECLEN (x, 0))
+    return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, i), elt_mode);
+
+  /* Identify the pattern that contains element I and work out the index of
+     the last encoded element for that pattern.  */
+  unsigned int encoded_nelts = const_vector_encoded_nelts (x);
+  unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
+  unsigned int count = i / npatterns;
+  unsigned int pattern = i % npatterns;
+  unsigned int final_i = encoded_nelts - npatterns + pattern;
+
+  /* If there are no steps, the final encoded value is the right one.  */
+  if (!CONST_VECTOR_STEPPED_P (x))
+    return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, final_i), elt_mode);
+
+  /* Otherwise work out the value from the last two encoded elements.  */
+  rtx v1 = CONST_VECTOR_ENCODED_ELT (x, final_i - npatterns);
+  rtx v2 = CONST_VECTOR_ENCODED_ELT (x, final_i);
+  wide_int diff = wi::sub (rtx_mode_t (v2, elt_mode),
+			   rtx_mode_t (v1, elt_mode));
+  return wi::add (rtx_mode_t (v2, elt_mode), (count - 2) * diff);
+}
+
+/* Return the value of element I of CONST_VECTOR X.  */
+
+rtx
+const_vector_elt (const_rtx x, unsigned int i)
+{
+  /* First handle elements that are directly encoded.  */
+  if (i < (unsigned int) XVECLEN (x, 0))
+    return CONST_VECTOR_ENCODED_ELT (x, i);
+
+  /* If there are no steps, the final encoded value is the right one.  */
+  if (!CONST_VECTOR_STEPPED_P (x))
+    {
+      /* Identify the pattern that contains element I and work out the index of
+	 the last encoded element for that pattern.  */
+      unsigned int encoded_nelts = const_vector_encoded_nelts (x);
+      unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
+      unsigned int pattern = i % npatterns;
+      unsigned int final_i = encoded_nelts - npatterns + pattern;
+      return CONST_VECTOR_ENCODED_ELT (x, final_i);
+    }
+
+  /* Otherwise work out the value from the last two encoded elements.  */
+  return immed_wide_int_const (const_vector_int_elt (x, i),
+			       GET_MODE_INNER (GET_MODE (x)));
+}
+
+/* Return true if X is a valid element for a CONST_VECTOR of the given
+  mode.  */
+
+bool
+valid_for_const_vector_p (machine_mode, rtx x)
+{
+  return (CONST_SCALAR_INT_P (x)
+	  || CONST_DOUBLE_AS_FLOAT_P (x)
+	  || CONST_FIXED_P (x));
+}
+
+/* Generate a vector constant of mode MODE in which every element has
+   value ELT.  */
+
+rtx
+gen_const_vec_duplicate (machine_mode mode, rtx elt)
+{
+  rtx_vector_builder builder (mode, 1, 1);
+  builder.quick_push (elt);
+  return builder.build ();
+}
+
+/* Return a vector rtx of mode MODE in which every element has value X.
+   The result will be a constant if X is constant.  */
+
+rtx
+gen_vec_duplicate (machine_mode mode, rtx x)
+{
+  if (valid_for_const_vector_p (mode, x))
+    return gen_const_vec_duplicate (mode, x);
+  return gen_rtx_VEC_DUPLICATE (mode, x);
+}
+
+/* A subroutine of const_vec_series_p that handles the case in which:
+
+     (GET_CODE (X) == CONST_VECTOR
+      && CONST_VECTOR_NPATTERNS (X) == 1
+      && !CONST_VECTOR_DUPLICATE_P (X))
+
+   is known to hold.  */
+
+bool
+const_vec_series_p_1 (const_rtx x, rtx *base_out, rtx *step_out)
+{
+  /* Stepped sequences are only defined for integers, to avoid specifying
+     rounding behavior.  */
+  if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT)
+    return false;
+
+  /* A non-duplicated vector with two elements can always be seen as a
+     series with a nonzero step.  Longer vectors must have a stepped
+     encoding.  */
+  if (maybe_ne (CONST_VECTOR_NUNITS (x), 2)
+      && !CONST_VECTOR_STEPPED_P (x))
+    return false;
+
+  /* Calculate the step between the first and second elements.  */
+  scalar_mode inner = GET_MODE_INNER (GET_MODE (x));
+  rtx base = CONST_VECTOR_ELT (x, 0);
+  rtx step = simplify_binary_operation (MINUS, inner,
+					CONST_VECTOR_ENCODED_ELT (x, 1), base);
+  if (rtx_equal_p (step, CONST0_RTX (inner)))
+    return false;
+
+  /* If we have a stepped encoding, check that the step between the
+     second and third elements is the same as STEP.  */
+  if (CONST_VECTOR_STEPPED_P (x))
+    {
+      rtx diff = simplify_binary_operation (MINUS, inner,
+					    CONST_VECTOR_ENCODED_ELT (x, 2),
+					    CONST_VECTOR_ENCODED_ELT (x, 1));
+      if (!rtx_equal_p (step, diff))
+	return false;
+    }
+
+  *base_out = base;
+  *step_out = step;
+  return true;
+}
+
+/* Generate a vector constant of mode MODE in which element I has
+   the value BASE + I * STEP.  */
+
+rtx
+gen_const_vec_series (machine_mode mode, rtx base, rtx step)
+{
+  gcc_assert (valid_for_const_vector_p (mode, base)
+	      && valid_for_const_vector_p (mode, step));
+
+  rtx_vector_builder builder (mode, 1, 3);
+  builder.quick_push (base);
+  for (int i = 1; i < 3; ++i)
+    builder.quick_push (simplify_gen_binary (PLUS, GET_MODE_INNER (mode),
+					     builder[i - 1], step));
+  return builder.build ();
+}
+
+/* Generate a vector of mode MODE in which element I has the value
+   BASE + I * STEP.  The result will be a constant if BASE and STEP
+   are both constants.  */
+
+rtx
+gen_vec_series (machine_mode mode, rtx base, rtx step)
+{
+  if (step == const0_rtx)
+    return gen_vec_duplicate (mode, base);
+  if (valid_for_const_vector_p (mode, base)
+      && valid_for_const_vector_p (mode, step))
+    return gen_const_vec_series (mode, base, step);
+  return gen_rtx_VEC_SERIES (mode, base, step);
+}
+
+/* Generate a new vector constant for mode MODE and constant value
+   CONSTANT.  */
 
 static rtx
 gen_const_vector (machine_mode mode, int constant)
 {
-  rtx tem;
-  rtvec v;
-  int units, i;
-  machine_mode inner;
-
-  units = GET_MODE_NUNITS (mode);
-  inner = GET_MODE_INNER (mode);
+  machine_mode inner = GET_MODE_INNER (mode);
 
   gcc_assert (!DECIMAL_FLOAT_MODE_P (inner));
 
-  v = rtvec_alloc (units);
-
-  /* We need to call this function after we set the scalar const_tiny_rtx
-     entries.  */
-  gcc_assert (const_tiny_rtx[constant][(int) inner]);
-
-  for (i = 0; i < units; ++i)
-    RTVEC_ELT (v, i) = const_tiny_rtx[constant][(int) inner];
-
-  tem = gen_rtx_raw_CONST_VECTOR (mode, v);
-  return tem;
+  rtx el = const_tiny_rtx[constant][(int) inner];
+  gcc_assert (el);
+
+  return gen_const_vec_duplicate (mode, el);
 }
 
 /* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when
@@ -5790,30 +6077,18 @@
 rtx
 gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v)
 {
-  machine_mode inner = GET_MODE_INNER (mode);
-  int nunits = GET_MODE_NUNITS (mode);
-  rtx x;
-  int i;
-
-  /* Check to see if all of the elements have the same value.  */
-  x = RTVEC_ELT (v, nunits - 1);
-  for (i = nunits - 2; i >= 0; i--)
-    if (RTVEC_ELT (v, i) != x)
-      break;
+  gcc_assert (known_eq (GET_MODE_NUNITS (mode), GET_NUM_ELEM (v)));
 
   /* If the values are all the same, check to see if we can use one of the
      standard constant vectors.  */
-  if (i == -1)
-    {
-      if (x == CONST0_RTX (inner))
-	return CONST0_RTX (mode);
-      else if (x == CONST1_RTX (inner))
-	return CONST1_RTX (mode);
-      else if (x == CONSTM1_RTX (inner))
-	return CONSTM1_RTX (mode);
-    }
-
-  return gen_rtx_raw_CONST_VECTOR (mode, v);
+  if (rtvec_all_equal_p (v))
+    return gen_const_vec_duplicate (mode, RTVEC_ELT (v, 0));
+
+  unsigned int nunits = GET_NUM_ELEM (v);
+  rtx_vector_builder builder (mode, nunits, 1);
+  for (unsigned int i = 0; i < nunits; ++i)
+    builder.quick_push (RTVEC_ELT (v, i));
+  return builder.build (v);
 }
 
 /* Initialise global register information required by all functions.  */
@@ -5868,7 +6143,7 @@
       attrs = ggc_cleared_alloc<mem_attrs> ();
       attrs->align = BITS_PER_UNIT;
       attrs->addrspace = ADDR_SPACE_GENERIC;
-      if (mode != BLKmode)
+      if (mode != BLKmode && mode != VOIDmode)
 	{
 	  attrs->size_known_p = true;
 	  attrs->size = GET_MODE_SIZE (mode);
@@ -5877,6 +6152,8 @@
 	}
       mode_mem_attrs[i] = attrs;
     }
+
+  split_branch_probability = profile_probability::uninitialized ();
 }
 
 /* Initialize global machine_mode variables.  */
@@ -5900,7 +6177,8 @@
 
   byte_mode = opt_byte_mode.require ();
   word_mode = opt_word_mode.require ();
-  ptr_mode = int_mode_for_size (POINTER_SIZE, 0).require ();
+  ptr_mode = as_a <scalar_int_mode>
+    (mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0).require ());
 }
 
 /* Create some permanent unique rtl objects shared between all functions.  */
@@ -5922,6 +6200,9 @@
 #endif
   const_double_htab = hash_table<const_double_hasher>::create_ggc (37);
 
+  if (NUM_POLY_INT_COEFFS > 1)
+    const_poly_int_htab = hash_table<const_poly_int_hasher>::create_ggc (37);
+
   const_fixed_htab = hash_table<const_fixed_hasher>::create_ggc (37);
 
   reg_attrs_htab = hash_table<reg_attr_hasher>::create_ggc (37);
@@ -5996,6 +6277,12 @@
   FOR_EACH_MODE_IN_CLASS (mode, MODE_INT)
     const_tiny_rtx[3][(int) mode] = constm1_rtx;
 
+  /* For BImode, 1 and -1 are unsigned and signed interpretations
+     of the same value.  */
+  const_tiny_rtx[0][(int) BImode] = const0_rtx;
+  const_tiny_rtx[1][(int) BImode] = const_true_rtx;
+  const_tiny_rtx[3][(int) BImode] = const_true_rtx;
+
   for (mode = MIN_MODE_PARTIAL_INT;
        mode <= MAX_MODE_PARTIAL_INT;
        mode = (machine_mode)((int)(mode) + 1))
@@ -6013,6 +6300,15 @@
       const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner);
     }
 
+  /* As for BImode, "all 1" and "all -1" are unsigned and signed
+     interpretations of the same value.  */
+  FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL)
+    {
+      const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
+      const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, 3);
+      const_tiny_rtx[1][(int) mode] = const_tiny_rtx[3][(int) mode];
+    }
+
   FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
     {
       const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, 0);
@@ -6114,17 +6410,6 @@
     if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC)
       const_tiny_rtx[0][i] = const0_rtx;
 
-  const_tiny_rtx[0][(int) BImode] = const0_rtx;
-  if (STORE_FLAG_VALUE == 1)
-    const_tiny_rtx[1][(int) BImode] = const1_rtx;
-
-  FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_POINTER_BOUNDS)
-    {
-      scalar_mode smode = smode_iter.require ();
-      wide_int wi_zero = wi::zero (GET_MODE_PRECISION (smode));
-      const_tiny_rtx[0][smode] = immed_wide_int_const (wi_zero, smode);
-    }
-
   pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
   ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode);
   simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode);
@@ -6219,6 +6504,21 @@
 	    gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (mode, regno)));
 }
 
+static GTY((deletable)) rtx
+hard_reg_clobbers_high[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+
+/* Return a CLOBBER_HIGH expression for register REGNO that clobbers MODE,
+   caching into HARD_REG_CLOBBERS_HIGH.  */
+rtx
+gen_hard_reg_clobber_high (machine_mode mode, unsigned int regno)
+{
+  if (hard_reg_clobbers_high[mode][regno])
+    return hard_reg_clobbers_high[mode][regno];
+  else
+    return (hard_reg_clobbers_high[mode][regno]
+	    = gen_rtx_CLOBBER_HIGH (VOIDmode, gen_rtx_REG (mode, regno)));
+}
+
 location_t prologue_location;
 location_t epilogue_location;
 
@@ -6309,6 +6609,22 @@
     }
 }
 
+/* Return a constant shift amount for shifting a value of mode MODE
+   by VALUE bits.  */
+
+rtx
+gen_int_shift_amount (machine_mode, poly_int64 value)
+{
+  /* Use a 64-bit mode, to avoid any truncation.
+
+     ??? Perhaps this should be automatically derived from the .md files
+     instead, or perhaps have a target hook.  */
+  scalar_int_mode shift_mode = (BITS_PER_UNIT == 8
+				? DImode
+				: int_mode_for_size (64, 0).require ());
+  return gen_int_mode (value, shift_mode);
+}
+
 /* Initialize fields of rtl_data related to stack alignment.  */
 
 void