diff gcc/tree-vect-data-refs.c @ 131:84e7813d76e9

gcc-8.2
author mir3636
date Thu, 25 Oct 2018 07:37:49 +0900
parents 04ced10e8804
children 1830386684a0
line wrap: on
line diff
--- a/gcc/tree-vect-data-refs.c	Fri Oct 27 22:46:09 2017 +0900
+++ b/gcc/tree-vect-data-refs.c	Thu Oct 25 07:37:49 2018 +0900
@@ -1,5 +1,5 @@
 /* Data References Analysis and Manipulation Utilities for Vectorization.
-   Copyright (C) 2003-2017 Free Software Foundation, Inc.
+   Copyright (C) 2003-2018 Free Software Foundation, Inc.
    Contributed by Dorit Naishlos <dorit@il.ibm.com>
    and Ira Rosen <irar@il.ibm.com>
 
@@ -52,6 +52,8 @@
 #include "params.h"
 #include "tree-cfg.h"
 #include "tree-hash-traits.h"
+#include "vec-perm-indices.h"
+#include "internal-fn.h"
 
 /* Return true if load- or store-lanes optab OPTAB is implemented for
    COUNT vectors of type VECTYPE.  NAME is the name of OPTAB.  */
@@ -60,20 +62,22 @@
 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
 			      tree vectype, unsigned HOST_WIDE_INT count)
 {
-  machine_mode mode;
-  scalar_int_mode array_mode;
+  machine_mode mode, array_mode;
   bool limit_p;
 
   mode = TYPE_MODE (vectype);
-  limit_p = !targetm.array_mode_supported_p (mode, count);
-  if (!int_mode_for_size (count * GET_MODE_BITSIZE (mode),
-			  limit_p).exists (&array_mode))
+  if (!targetm.array_mode (mode, count).exists (&array_mode))
     {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                         "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
-                         GET_MODE_NAME (mode), count);
-      return false;
+      poly_uint64 bits = count * GET_MODE_BITSIZE (mode);
+      limit_p = !targetm.array_mode_supported_p (mode, count);
+      if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
+	{
+	  if (dump_enabled_p ())
+	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			     "no array mode for %s[%wu]\n",
+			     GET_MODE_NAME (mode), count);
+	  return false;
+	}
     }
 
   if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
@@ -94,7 +98,7 @@
 }
 
 
-/* Return the smallest scalar part of STMT.
+/* Return the smallest scalar part of STMT_INFO.
    This is used to determine the vectype of the stmt.  We generally set the
    vectype according to the type of the result (lhs).  For stmts whose
    result-type is different than the type of the arguments (e.g., demotion,
@@ -112,10 +116,11 @@
    types.  */
 
 tree
-vect_get_smallest_scalar_type (gimple *stmt, HOST_WIDE_INT *lhs_size_unit,
-                               HOST_WIDE_INT *rhs_size_unit)
+vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
+			       HOST_WIDE_INT *lhs_size_unit,
+			       HOST_WIDE_INT *rhs_size_unit)
 {
-  tree scalar_type = gimple_expr_type (stmt);
+  tree scalar_type = gimple_expr_type (stmt_info->stmt);
   HOST_WIDE_INT lhs, rhs;
 
   /* During the analysis phase, this function is called on arbitrary
@@ -125,13 +130,16 @@
 
   lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
 
-  if (is_gimple_assign (stmt)
-      && (gimple_assign_cast_p (stmt)
-          || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
-          || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
-          || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
+  gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
+  if (assign
+      && (gimple_assign_cast_p (assign)
+	  || gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
+	  || gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
+	  || gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
+	  || gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
+	  || gimple_assign_rhs_code (assign) == FLOAT_EXPR))
     {
-      tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
+      tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
 
       rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
       if (rhs < lhs)
@@ -148,22 +156,81 @@
    tested at run-time.  Return TRUE if DDR was successfully inserted.
    Return false if versioning is not supported.  */
 
-static bool
+static opt_result
 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
 {
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
 
   if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
-    return false;
-
-  if (!runtime_alias_check_p (ddr, loop,
-			      optimize_loop_nest_for_speed_p (loop)))
-    return false;
+    return opt_result::failure_at (vect_location,
+				   "will not create alias checks, as"
+				   " --param vect-max-version-for-alias-checks"
+				   " == 0\n");
+
+  opt_result res
+    = runtime_alias_check_p (ddr, loop,
+			     optimize_loop_nest_for_speed_p (loop));
+  if (!res)
+    return res;
 
   LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
-  return true;
+  return opt_result::success ();
+}
+
+/* Record that loop LOOP_VINFO needs to check that VALUE is nonzero.  */
+
+static void
+vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
+{
+  vec<tree> checks = LOOP_VINFO_CHECK_NONZERO (loop_vinfo);
+  for (unsigned int i = 0; i < checks.length(); ++i)
+    if (checks[i] == value)
+      return;
+
+  if (dump_enabled_p ())
+    dump_printf_loc (MSG_NOTE, vect_location,
+		     "need run-time check that %T is nonzero\n",
+		     value);
+  LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
 }
 
+/* Return true if we know that the order of vectorized DR_INFO_A and
+   vectorized DR_INFO_B will be the same as the order of DR_INFO_A and
+   DR_INFO_B.  At least one of the accesses is a write.  */
+
+static bool
+vect_preserves_scalar_order_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b)
+{
+  stmt_vec_info stmtinfo_a = dr_info_a->stmt;
+  stmt_vec_info stmtinfo_b = dr_info_b->stmt;
+
+  /* Single statements are always kept in their original order.  */
+  if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
+      && !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
+    return true;
+
+  /* STMT_A and STMT_B belong to overlapping groups.  All loads in a
+     group are emitted at the position of the last scalar load and all
+     stores in a group are emitted at the position of the last scalar store.
+     Compute that position and check whether the resulting order matches
+     the current one.  */
+  stmt_vec_info last_a = DR_GROUP_FIRST_ELEMENT (stmtinfo_a);
+  if (last_a)
+    for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (last_a); s;
+	 s = DR_GROUP_NEXT_ELEMENT (s))
+      last_a = get_later_stmt (last_a, s);
+  else
+    last_a = stmtinfo_a;
+  stmt_vec_info last_b = DR_GROUP_FIRST_ELEMENT (stmtinfo_b);
+  if (last_b)
+    for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (last_b); s;
+	 s = DR_GROUP_NEXT_ELEMENT (s))
+      last_b = get_later_stmt (last_b, s);
+  else
+    last_b = stmtinfo_b;
+  return ((get_later_stmt (last_a, last_b) == last_a)
+	  == (get_later_stmt (stmtinfo_a, stmtinfo_b) == stmtinfo_a));
+}
 
 /* A subroutine of vect_analyze_data_ref_dependence.  Handle
    DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence
@@ -179,7 +246,7 @@
 static bool
 vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
 				       loop_vec_info loop_vinfo,
-				       int loop_depth, int *max_vf)
+				       int loop_depth, unsigned int *max_vf)
 {
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
   lambda_vector dist_v;
@@ -199,7 +266,7 @@
 	     would be a win.  */
 	  if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
 	    {
-	      if (loop->safelen < *max_vf)
+	      if ((unsigned int) loop->safelen < *max_vf)
 		*max_vf = loop->safelen;
 	      LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
 	      continue;
@@ -212,7 +279,11 @@
 
 	     Note that the alias checks will be removed if the VF ends up
 	     being small enough.  */
-	  return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
+	  dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
+	  dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
+	  return (!STMT_VINFO_GATHER_SCATTER_P (dr_info_a->stmt)
+		  && !STMT_VINFO_GATHER_SCATTER_P (dr_info_b->stmt)
+		  && vect_mark_for_runtime_alias_test (ddr, loop_vinfo));
 	}
     }
   return true;
@@ -221,21 +292,26 @@
 
 /* Function vect_analyze_data_ref_dependence.
 
-   Return TRUE if there (might) exist a dependence between a memory-reference
+   FIXME: I needed to change the sense of the returned flag.
+
+   Return FALSE if there (might) exist a dependence between a memory-reference
    DRA and a memory-reference DRB.  When versioning for alias may check a
-   dependence at run-time, return FALSE.  Adjust *MAX_VF according to
+   dependence at run-time, return TRUE.  Adjust *MAX_VF according to
    the data dependence.  */
 
-static bool
+static opt_result
 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
-                                  loop_vec_info loop_vinfo, int *max_vf)
+				  loop_vec_info loop_vinfo,
+				  unsigned int *max_vf)
 {
   unsigned int i;
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
   struct data_reference *dra = DDR_A (ddr);
   struct data_reference *drb = DDR_B (ddr);
-  stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
-  stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
+  dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
+  dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (drb);
+  stmt_vec_info stmtinfo_a = dr_info_a->stmt;
+  stmt_vec_info stmtinfo_b = dr_info_b->stmt;
   lambda_vector dist_v;
   unsigned int loop_depth;
 
@@ -246,17 +322,20 @@
 
   /* Independent data accesses.  */
   if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
-    return false;
+    return opt_result::success ();
 
   if (dra == drb
       || (DR_IS_READ (dra) && DR_IS_READ (drb)))
-    return false;
+    return opt_result::success ();
 
   /* We do not have to consider dependences between accesses that belong
-     to the same group.  */
-  if (GROUP_FIRST_ELEMENT (stmtinfo_a)
-      && GROUP_FIRST_ELEMENT (stmtinfo_a) == GROUP_FIRST_ELEMENT (stmtinfo_b))
-    return false;
+     to the same group, unless the stride could be smaller than the
+     group size.  */
+  if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
+      && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
+	  == DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
+      && !STMT_VINFO_STRIDED_P (stmtinfo_a))
+    return opt_result::success ();
 
   /* Even if we have an anti-dependence then, as the vectorized loop covers at
      least two scalar iterations, there is always also a true dependence.
@@ -268,7 +347,7 @@
        || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
       && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
 				 get_alias_set (DR_REF (drb))))
-    return false;
+    return opt_result::success ();
 
   /* Unknown data dependence.  */
   if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
@@ -277,45 +356,28 @@
 	 executed concurrently, assume independence.  */
       if (loop->safelen >= 2)
 	{
-	  if (loop->safelen < *max_vf)
+	  if ((unsigned int) loop->safelen < *max_vf)
 	    *max_vf = loop->safelen;
 	  LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
-	  return false;
+	  return opt_result::success ();
 	}
 
       if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
 	  || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
-	{
-	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "versioning for alias not supported for: "
-			       "can't determine dependence between ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (dra));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (drb));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	    }
-	  return true;
-	}
+	return opt_result::failure_at
+	  (stmtinfo_a->stmt,
+	   "versioning for alias not supported for: "
+	   "can't determine dependence between %T and %T\n",
+	   DR_REF (dra), DR_REF (drb));
 
       if (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "versioning for alias required: "
-			   "can't determine dependence between ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-			     DR_REF (dra));
-	  dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-			     DR_REF (drb));
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	}
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
+			 "versioning for alias required: "
+			 "can't determine dependence between %T and %T\n",
+			 DR_REF (dra), DR_REF (drb));
 
       /* Add to list of ddrs that need to be tested at run-time.  */
-      return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
+      return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
     }
 
   /* Known data dependence.  */
@@ -325,42 +387,27 @@
 	 executed concurrently, assume independence.  */
       if (loop->safelen >= 2)
 	{
-	  if (loop->safelen < *max_vf)
+	  if ((unsigned int) loop->safelen < *max_vf)
 	    *max_vf = loop->safelen;
 	  LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
-	  return false;
+	  return opt_result::success ();
 	}
 
       if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
 	  || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
-	{
-	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			       "versioning for alias not supported for: "
-			       "bad dist vector for ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (dra));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
-	      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-				 DR_REF (drb));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	    }
-	  return true;
-	}
+	return opt_result::failure_at
+	  (stmtinfo_a->stmt,
+	   "versioning for alias not supported for: "
+	   "bad dist vector for %T and %T\n",
+	   DR_REF (dra), DR_REF (drb));
 
       if (dump_enabled_p ())
-        {
-          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                           "versioning for alias required: "
-                           "bad dist vector for ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
-          dump_printf (MSG_MISSED_OPTIMIZATION,  " and ");
-          dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
-          dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-        }
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
+			 "versioning for alias required: "
+			 "bad dist vector for %T and %T\n",
+			 DR_REF (dra), DR_REF (drb));
       /* Add to list of ddrs that need to be tested at run-time.  */
-      return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
+      return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
     }
 
   loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
@@ -368,7 +415,7 @@
   if (DDR_COULD_BE_INDEPENDENT_P (ddr)
       && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo,
 						loop_depth, max_vf))
-    return false;
+    return opt_result::success ();
 
   FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
     {
@@ -381,14 +428,9 @@
       if (dist == 0)
 	{
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "dependence distance == 0 between ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-	      dump_printf (MSG_NOTE, " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-	      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	    }
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "dependence distance == 0 between %T and %T\n",
+			     DR_REF (dra), DR_REF (drb));
 
 	  /* When we perform grouped accesses and perform implicit CSE
 	     by detecting equal accesses and doing disambiguation with
@@ -408,22 +450,20 @@
 		... = a[i];
 		a[i+1] = ...;
 	     where loads from the group interleave with the store.  */
-	  if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
-	      || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
+	  if (!vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
+	    return opt_result::failure_at (stmtinfo_a->stmt,
+					   "READ_WRITE dependence"
+					   " in interleaving.\n");
+
+	  if (loop->safelen < 2)
 	    {
-	      gimple *earlier_stmt;
-	      earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
-	      if (DR_IS_WRITE
-		    (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
-		{
-		  if (dump_enabled_p ())
-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				     "READ_WRITE dependence in interleaving."
-				     "\n");
-		  return true;
-		}
+	      tree indicator = dr_zero_step_indicator (dra);
+	      if (!indicator || integer_zerop (indicator))
+		return opt_result::failure_at (stmtinfo_a->stmt,
+					       "access also has a zero step\n");
+	      else if (TREE_CODE (indicator) != INTEGER_CST)
+		vect_check_nonzero_value (loop_vinfo, indicator);
 	    }
-
 	  continue;
 	}
 
@@ -445,8 +485,8 @@
 	  continue;
 	}
 
-      if (abs (dist) >= 2
-	  && abs (dist) < *max_vf)
+      unsigned int abs_dist = abs (dist);
+      if (abs_dist >= 2 && abs_dist < *max_vf)
 	{
 	  /* The dependence distance requires reduction of the maximal
 	     vectorization factor.  */
@@ -457,7 +497,7 @@
 	                     *max_vf);
 	}
 
-      if (abs (dist) >= *max_vf)
+      if (abs_dist >= *max_vf)
 	{
 	  /* Dependence distance does not create dependence, as far as
 	     vectorization is concerned, in this case.  */
@@ -467,21 +507,13 @@
 	  continue;
 	}
 
-      if (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	               "not vectorized, possible dependence "
-	               "between data-refs ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-	  dump_printf (MSG_NOTE,  " and ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-	  dump_printf (MSG_NOTE,  "\n");
-	}
-
-      return true;
+      return opt_result::failure_at (stmtinfo_a->stmt,
+				     "not vectorized, possible dependence "
+				     "between data-refs %T and %T\n",
+				     DR_REF (dra), DR_REF (drb));
     }
 
-  return false;
+  return opt_result::success ();
 }
 
 /* Function vect_analyze_data_ref_dependences.
@@ -490,25 +522,30 @@
    exist any data dependences between them.  Set *MAX_VF according to
    the maximum vectorization factor the data dependences allow.  */
 
-bool
-vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
+opt_result
+vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
+				   unsigned int *max_vf)
 {
   unsigned int i;
   struct data_dependence_relation *ddr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_ref_dependences ===\n");
-
-  LOOP_VINFO_DDRS (loop_vinfo)
-    .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
-	     * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
+  DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
+
+  if (!LOOP_VINFO_DDRS (loop_vinfo).exists ())
+    {
+      LOOP_VINFO_DDRS (loop_vinfo)
+	.create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
+		 * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
+      /* We need read-read dependences to compute
+	 STMT_VINFO_SAME_ALIGN_REFS.  */
+      bool res = compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
+					  &LOOP_VINFO_DDRS (loop_vinfo),
+					  LOOP_VINFO_LOOP_NEST (loop_vinfo),
+					  true);
+      gcc_assert (res);
+    }
+
   LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
-  /* We need read-read dependences to compute STMT_VINFO_SAME_ALIGN_REFS.  */
-  if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
-				&LOOP_VINFO_DDRS (loop_vinfo),
-				LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
-    return false;
 
   /* For epilogues we either have no aliases or alias versioning
      was applied to original loop.  Therefore we may just get max_vf
@@ -517,25 +554,32 @@
     *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo);
   else
     FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
-      if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
-	return false;
-
-  return true;
+      {
+	opt_result res
+	  = vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf);
+	if (!res)
+	  return res;
+      }
+
+  return opt_result::success ();
 }
 
 
 /* Function vect_slp_analyze_data_ref_dependence.
 
    Return TRUE if there (might) exist a dependence between a memory-reference
-   DRA and a memory-reference DRB.  When versioning for alias may check a
-   dependence at run-time, return FALSE.  Adjust *MAX_VF according to
-   the data dependence.  */
+   DRA and a memory-reference DRB for VINFO.  When versioning for alias
+   may check a dependence at run-time, return FALSE.  Adjust *MAX_VF
+   according to the data dependence.  */
 
 static bool
-vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
+vect_slp_analyze_data_ref_dependence (vec_info *vinfo,
+				      struct data_dependence_relation *ddr)
 {
   struct data_reference *dra = DDR_A (ddr);
   struct data_reference *drb = DDR_B (ddr);
+  dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
+  dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
 
   /* We need to check dependences of statements marked as unvectorizable
      as well, they still can prohibit vectorization.  */
@@ -553,33 +597,23 @@
 
   /* If dra and drb are part of the same interleaving chain consider
      them independent.  */
-  if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
-      && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
-	  == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
+  if (STMT_VINFO_GROUPED_ACCESS (dr_info_a->stmt)
+      && (DR_GROUP_FIRST_ELEMENT (dr_info_a->stmt)
+	  == DR_GROUP_FIRST_ELEMENT (dr_info_b->stmt)))
     return false;
 
   /* Unknown data dependence.  */
   if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
     {
       if  (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			   "can't determine dependence between ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
-	  dump_printf (MSG_MISSED_OPTIMIZATION,  " and ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
-	  dump_printf (MSG_MISSED_OPTIMIZATION,  "\n");
-	}
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			 "can't determine dependence between %T and %T\n",
+			 DR_REF (dra), DR_REF (drb));
     }
   else if (dump_enabled_p ())
-    {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "determined dependence between ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-      dump_printf (MSG_NOTE, " and ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-      dump_printf (MSG_NOTE,  "\n");
-    }
+    dump_printf_loc (MSG_NOTE, vect_location,
+		     "determined dependence between %T and %T\n",
+		     DR_REF (dra), DR_REF (drb));
 
   return true;
 }
@@ -591,20 +625,24 @@
 
 static bool
 vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
-				   vec<gimple *> stores, gimple *last_store)
+				   vec<stmt_vec_info> stores,
+				   stmt_vec_info last_store_info)
 {
   /* This walks over all stmts involved in the SLP load/store done
      in NODE verifying we can sink them up to the last stmt in the
      group.  */
-  gimple *last_access = vect_find_last_scalar_stmt_in_slp (node);
+  stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node);
+  vec_info *vinfo = last_access_info->vinfo;
   for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
     {
-      gimple *access = SLP_TREE_SCALAR_STMTS (node)[k];
-      if (access == last_access)
+      stmt_vec_info access_info = SLP_TREE_SCALAR_STMTS (node)[k];
+      if (access_info == last_access_info)
 	continue;
-      data_reference *dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (access));
-      for (gimple_stmt_iterator gsi = gsi_for_stmt (access);
-	   gsi_stmt (gsi) != last_access; gsi_next (&gsi))
+      data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
+      ao_ref ref;
+      bool ref_initialized_p = false;
+      for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
+	   gsi_stmt (gsi) != last_access_info->stmt; gsi_next (&gsi))
 	{
 	  gimple *stmt = gsi_stmt (gsi);
 	  if (! gimple_vuse (stmt)
@@ -612,12 +650,20 @@
 	    continue;
 
 	  /* If we couldn't record a (single) data reference for this
-	     stmt we have to give up.  */
-	  /* ???  Here and below if dependence analysis fails we can resort
-	     to the alias oracle which can handle more kinds of stmts.  */
-	  data_reference *dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
+	     stmt we have to resort to the alias oracle.  */
+	  stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
+	  data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
 	  if (!dr_b)
-	    return false;
+	    {
+	      /* We are moving a store or sinking a load - this means
+	         we cannot use TBAA for disambiguation.  */
+	      if (!ref_initialized_p)
+		ao_ref_init (&ref, DR_REF (dr_a));
+	      if (stmt_may_clobber_ref_p_1 (stmt, &ref, false)
+		  || ref_maybe_used_by_stmt_p (stmt, &ref, false))
+		return false;
+	      continue;
+	    }
 
 	  bool dependent = false;
 	  /* If we run into a store of this same instance (we've just
@@ -626,17 +672,17 @@
 	     been sunk to (and we verify if we can do that as well).  */
 	  if (gimple_visited_p (stmt))
 	    {
-	      if (stmt != last_store)
+	      if (stmt_info != last_store_info)
 		continue;
 	      unsigned i;
-	      gimple *store;
-	      FOR_EACH_VEC_ELT (stores, i, store)
+	      stmt_vec_info store_info;
+	      FOR_EACH_VEC_ELT (stores, i, store_info)
 		{
-		  data_reference *store_dr
-		    = STMT_VINFO_DATA_REF (vinfo_for_stmt (store));
+		  data_reference *store_dr = STMT_VINFO_DATA_REF (store_info);
 		  ddr_p ddr = initialize_data_dependence_relation
 				(dr_a, store_dr, vNULL);
-		  dependent = vect_slp_analyze_data_ref_dependence (ddr);
+		  dependent
+		    = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
 		  free_dependence_relation (ddr);
 		  if (dependent)
 		    break;
@@ -646,7 +692,7 @@
 	    {
 	      ddr_p ddr = initialize_data_dependence_relation (dr_a,
 							       dr_b, vNULL);
-	      dependent = vect_slp_analyze_data_ref_dependence (ddr);
+	      dependent = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
 	      free_dependence_relation (ddr);
 	    }
 	  if (dependent)
@@ -666,26 +712,24 @@
 bool
 vect_slp_analyze_instance_dependence (slp_instance instance)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_slp_analyze_instance_dependence ===\n");
+  DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
 
   /* The stores of this instance are at the root of the SLP tree.  */
   slp_tree store = SLP_INSTANCE_TREE (instance);
-  if (! STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (store)[0])))
+  if (! STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (store)[0]))
     store = NULL;
 
   /* Verify we can sink stores to the vectorized stmt insert location.  */
-  gimple *last_store = NULL;
+  stmt_vec_info last_store_info = NULL;
   if (store)
     {
       if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL))
 	return false;
 
       /* Mark stores in this instance and remember the last one.  */
-      last_store = vect_find_last_scalar_stmt_in_slp (store);
+      last_store_info = vect_find_last_scalar_stmt_in_slp (store);
       for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
-	gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], true);
+	gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, true);
     }
 
   bool res = true;
@@ -698,7 +742,7 @@
     if (! vect_slp_analyze_node_dependences (instance, load,
 					     store
 					     ? SLP_TREE_SCALAR_STMTS (store)
-					     : vNULL, last_store))
+					     : vNULL, last_store_info))
       {
 	res = false;
 	break;
@@ -707,19 +751,19 @@
   /* Unset the visited flag.  */
   if (store)
     for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
-      gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k], false);
+      gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, false);
 
   return res;
 }
 
-/* Record in VINFO the base alignment guarantee given by DRB.  STMT is
-   the statement that contains DRB, which is useful for recording in the
-   dump file.  */
+/* Record the base alignment guarantee given by DRB, which occurs
+   in STMT_INFO.  */
 
 static void
-vect_record_base_alignment (vec_info *vinfo, gimple *stmt,
+vect_record_base_alignment (stmt_vec_info stmt_info,
 			    innermost_loop_behavior *drb)
 {
+  vec_info *vinfo = stmt_info->vinfo;
   bool existed;
   innermost_loop_behavior *&entry
     = vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
@@ -727,19 +771,15 @@
     {
       entry = drb;
       if (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "recording new base alignment for ");
-	  dump_generic_expr (MSG_NOTE, TDF_SLIM, drb->base_address);
-	  dump_printf (MSG_NOTE, "\n");
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "  alignment:    %d\n", drb->base_alignment);
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "  misalignment: %d\n", drb->base_misalignment);
-	  dump_printf_loc (MSG_NOTE, vect_location,
-			   "  based on:     ");
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
-	}
+	dump_printf_loc (MSG_NOTE, vect_location,
+			 "recording new base alignment for %T\n"
+			 "  alignment:    %d\n"
+			 "  misalignment: %d\n"
+			 "  based on:     %G",
+			 drb->base_address,
+			 drb->base_alignment,
+			 drb->base_misalignment,
+			 stmt_info->stmt);
     }
 }
 
@@ -758,55 +798,52 @@
   struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
   data_reference *dr;
   unsigned int i;
-  FOR_EACH_VEC_ELT (vinfo->datarefs, i, dr)
-    if (!DR_IS_CONDITIONAL_IN_STMT (dr))
-      {
-	gimple *stmt = DR_STMT (dr);
-	vect_record_base_alignment (vinfo, stmt, &DR_INNERMOST (dr));
-
-	/* If DR is nested in the loop that is being vectorized, we can also
-	   record the alignment of the base wrt the outer loop.  */
-	if (loop && nested_in_vect_loop_p (loop, stmt))
-	  {
-	    stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  FOR_EACH_VEC_ELT (vinfo->shared->datarefs, i, dr)
+    {
+      dr_vec_info *dr_info = vinfo->lookup_dr (dr);
+      stmt_vec_info stmt_info = dr_info->stmt;
+      if (!DR_IS_CONDITIONAL_IN_STMT (dr)
+	  && STMT_VINFO_VECTORIZABLE (stmt_info)
+	  && !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+	{
+	  vect_record_base_alignment (stmt_info, &DR_INNERMOST (dr));
+
+	  /* If DR is nested in the loop that is being vectorized, we can also
+	     record the alignment of the base wrt the outer loop.  */
+	  if (loop && nested_in_vect_loop_p (loop, stmt_info))
 	    vect_record_base_alignment
-	      (vinfo, stmt, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
-	  }
-      }
+	      (stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
+	}
+    }
 }
 
-/* Return the target alignment for the vectorized form of DR.  */
+/* Return the target alignment for the vectorized form of DR_INFO.  */
 
 static unsigned int
-vect_calculate_target_alignment (struct data_reference *dr)
+vect_calculate_target_alignment (dr_vec_info *dr_info)
 {
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
   return targetm.vectorize.preferred_vector_alignment (vectype);
 }
 
 /* Function vect_compute_data_ref_alignment
 
-   Compute the misalignment of the data reference DR.
+   Compute the misalignment of the data reference DR_INFO.
 
    Output:
-   1. If during the misalignment computation it is found that the data reference
-      cannot be vectorized then false is returned.
-   2. DR_MISALIGNMENT (DR) is defined.
+   1. DR_MISALIGNMENT (DR_INFO) is defined.
 
    FOR NOW: No analysis is actually performed. Misalignment is calculated
    only for trivial cases. TODO.  */
 
-bool
-vect_compute_data_ref_alignment (struct data_reference *dr)
+static void
+vect_compute_data_ref_alignment (dr_vec_info *dr_info)
 {
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  stmt_vec_info stmt_info = dr_info->stmt;
   vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *loop = NULL;
-  tree ref = DR_REF (dr);
+  tree ref = DR_REF (dr_info->dr);
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
 
   if (dump_enabled_p ())
@@ -817,14 +854,17 @@
     loop = LOOP_VINFO_LOOP (loop_vinfo);
 
   /* Initialize misalignment to unknown.  */
-  SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN);
-
-  innermost_loop_behavior *drb = vect_dr_behavior (dr);
+  SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
+
+  if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+    return;
+
+  innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
   bool step_preserves_misalignment_p;
 
   unsigned HOST_WIDE_INT vector_alignment
-    = vect_calculate_target_alignment (dr) / BITS_PER_UNIT;
-  DR_TARGET_ALIGNMENT (dr) = vector_alignment;
+    = vect_calculate_target_alignment (dr_info) / BITS_PER_UNIT;
+  DR_TARGET_ALIGNMENT (dr_info) = vector_alignment;
 
   /* No step for BB vectorization.  */
   if (!loop)
@@ -839,10 +879,10 @@
      stays the same throughout the execution of the inner-loop, which is why
      we have to check that the stride of the dataref in the inner-loop evenly
      divides by the vector alignment.  */
-  else if (nested_in_vect_loop_p (loop, stmt))
+  else if (nested_in_vect_loop_p (loop, stmt_info))
     {
       step_preserves_misalignment_p
-	= (DR_STEP_ALIGNMENT (dr) % vector_alignment) == 0;
+	= (DR_STEP_ALIGNMENT (dr_info->dr) % vector_alignment) == 0;
 
       if (dump_enabled_p ())
 	{
@@ -862,9 +902,9 @@
      the dataref evenly divides by the alignment.  */
   else
     {
-      unsigned vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+      poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
       step_preserves_misalignment_p
-	= ((DR_STEP_ALIGNMENT (dr) * vf) % vector_alignment) == 0;
+	= multiple_p (DR_STEP_ALIGNMENT (dr_info->dr) * vf, vector_alignment);
 
       if (!step_preserves_misalignment_p && dump_enabled_p ())
 	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -890,62 +930,38 @@
       || TREE_CODE (drb->step) != INTEGER_CST)
     {
       if (dump_enabled_p ())
-	{
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                   "Unknown alignment for access: ");
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	}
-      return true;
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			 "Unknown alignment for access: %T\n", ref);
+      return;
     }
 
   if (base_alignment < vector_alignment)
     {
-      tree base = drb->base_address;
-      if (TREE_CODE (base) == ADDR_EXPR)
-	base = TREE_OPERAND (base, 0);
-      if (!vect_can_force_dr_alignment_p (base,
-					  vector_alignment * BITS_PER_UNIT))
+      unsigned int max_alignment;
+      tree base = get_base_for_alignment (drb->base_address, &max_alignment);
+      if (max_alignment < vector_alignment
+	  || !vect_can_force_dr_alignment_p (base,
+					     vector_alignment * BITS_PER_UNIT))
 	{
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "can't force alignment of ref: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
-	      dump_printf (MSG_NOTE, "\n");
-	    }
-	  return true;
-	}
-
-      if (DECL_USER_ALIGN (base))
-	{
-	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "not forcing alignment of user-aligned "
-			       "variable: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, base);
-	      dump_printf (MSG_NOTE, "\n");
-	    }
-	  return true;
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "can't force alignment of ref: %T\n", ref);
+	  return;
 	}
 
       /* Force the alignment of the decl.
 	 NOTE: This is the only change to the code we make during
 	 the analysis phase, before deciding to vectorize the loop.  */
       if (dump_enabled_p ())
-        {
-          dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
-          dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
-          dump_printf (MSG_NOTE, "\n");
-        }
-
-      DR_VECT_AUX (dr)->base_decl = base;
-      DR_VECT_AUX (dr)->base_misaligned = true;
+	dump_printf_loc (MSG_NOTE, vect_location,
+			 "force alignment of %T\n", ref);
+
+      dr_info->base_decl = base;
+      dr_info->base_misaligned = true;
       base_misalignment = 0;
     }
-  unsigned int misalignment = (base_misalignment
-			       + TREE_INT_CST_LOW (drb->init));
+  poly_int64 misalignment
+    = base_misalignment + wi::to_poly_offset (drb->init).force_shwi ();
 
   /* If this is a backward running DR then first access in the larger
      vectype actually is N-1 elements before the address in the DR.
@@ -955,117 +971,113 @@
     misalignment += ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
 		     * TREE_INT_CST_LOW (drb->step));
 
-  SET_DR_MISALIGNMENT (dr, misalignment & (vector_alignment - 1));
+  unsigned int const_misalignment;
+  if (!known_misalignment (misalignment, vector_alignment,
+			   &const_misalignment))
+    {
+      if (dump_enabled_p ())
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			 "Non-constant misalignment for access: %T\n", ref);
+      return;
+    }
+
+  SET_DR_MISALIGNMENT (dr_info, const_misalignment);
 
   if (dump_enabled_p ())
-    {
-      dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                       "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
-      dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
-      dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-    }
-
-  return true;
+    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+		     "misalign = %d bytes of ref %T\n",
+		     DR_MISALIGNMENT (dr_info), ref);
+
+  return;
 }
 
 /* Function vect_update_misalignment_for_peel.
-   Sets DR's misalignment
-   - to 0 if it has the same alignment as DR_PEEL,
-   - to the misalignment computed using NPEEL if DR's salignment is known,
+   Sets DR_INFO's misalignment
+   - to 0 if it has the same alignment as DR_PEEL_INFO,
+   - to the misalignment computed using NPEEL if DR_INFO's salignment is known,
    - to -1 (unknown) otherwise.
 
-   DR - the data reference whose misalignment is to be adjusted.
-   DR_PEEL - the data reference whose misalignment is being made
-             zero in the vector loop by the peel.
+   DR_INFO - the data reference whose misalignment is to be adjusted.
+   DR_PEEL_INFO - the data reference whose misalignment is being made
+		  zero in the vector loop by the peel.
    NPEEL - the number of iterations in the peel loop if the misalignment
-           of DR_PEEL is known at compile time.  */
+           of DR_PEEL_INFO is known at compile time.  */
 
 static void
-vect_update_misalignment_for_peel (struct data_reference *dr,
-                                   struct data_reference *dr_peel, int npeel)
+vect_update_misalignment_for_peel (dr_vec_info *dr_info,
+				   dr_vec_info *dr_peel_info, int npeel)
 {
   unsigned int i;
   vec<dr_p> same_aligned_drs;
   struct data_reference *current_dr;
-  int dr_size = vect_get_scalar_dr_size (dr);
-  int dr_peel_size = vect_get_scalar_dr_size (dr_peel);
-  stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
-  stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
+  int dr_size = vect_get_scalar_dr_size (dr_info);
+  int dr_peel_size = vect_get_scalar_dr_size (dr_peel_info);
+  stmt_vec_info stmt_info = dr_info->stmt;
+  stmt_vec_info peel_stmt_info = dr_peel_info->stmt;
 
  /* For interleaved data accesses the step in the loop must be multiplied by
      the size of the interleaving group.  */
   if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
-    dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
+    dr_size *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
   if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
-    dr_peel_size *= GROUP_SIZE (peel_stmt_info);
+    dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info);
 
   /* It can be assumed that the data refs with the same alignment as dr_peel
      are aligned in the vector loop.  */
-  same_aligned_drs
-    = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
+  same_aligned_drs = STMT_VINFO_SAME_ALIGN_REFS (peel_stmt_info);
   FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
     {
-      if (current_dr != dr)
+      if (current_dr != dr_info->dr)
         continue;
-      gcc_assert (!known_alignment_for_access_p (dr)
-		  || !known_alignment_for_access_p (dr_peel)
-		  || (DR_MISALIGNMENT (dr) / dr_size
-		      == DR_MISALIGNMENT (dr_peel) / dr_peel_size));
-      SET_DR_MISALIGNMENT (dr, 0);
+      gcc_assert (!known_alignment_for_access_p (dr_info)
+		  || !known_alignment_for_access_p (dr_peel_info)
+		  || (DR_MISALIGNMENT (dr_info) / dr_size
+		      == DR_MISALIGNMENT (dr_peel_info) / dr_peel_size));
+      SET_DR_MISALIGNMENT (dr_info, 0);
       return;
     }
 
-  if (known_alignment_for_access_p (dr)
-      && known_alignment_for_access_p (dr_peel))
+  if (known_alignment_for_access_p (dr_info)
+      && known_alignment_for_access_p (dr_peel_info))
     {
-      bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
-      int misal = DR_MISALIGNMENT (dr);
+      bool negative = tree_int_cst_compare (DR_STEP (dr_info->dr),
+					    size_zero_node) < 0;
+      int misal = DR_MISALIGNMENT (dr_info);
       misal += negative ? -npeel * dr_size : npeel * dr_size;
-      misal &= DR_TARGET_ALIGNMENT (dr) - 1;
-      SET_DR_MISALIGNMENT (dr, misal);
+      misal &= DR_TARGET_ALIGNMENT (dr_info) - 1;
+      SET_DR_MISALIGNMENT (dr_info, misal);
       return;
     }
 
   if (dump_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
 		     "to unknown (-1).\n");
-  SET_DR_MISALIGNMENT (dr, DR_MISALIGNMENT_UNKNOWN);
+  SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
 }
 
 
 /* Function verify_data_ref_alignment
 
-   Return TRUE if DR can be handled with respect to alignment.  */
-
-static bool
-verify_data_ref_alignment (data_reference_p dr)
+   Return TRUE if DR_INFO can be handled with respect to alignment.  */
+
+static opt_result
+verify_data_ref_alignment (dr_vec_info *dr_info)
 {
   enum dr_alignment_support supportable_dr_alignment
-    = vect_supportable_dr_alignment (dr, false);
+    = vect_supportable_dr_alignment (dr_info, false);
   if (!supportable_dr_alignment)
-    {
-      if (dump_enabled_p ())
-	{
-	  if (DR_IS_READ (dr))
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: unsupported unaligned load.");
-	  else
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: unsupported unaligned "
-			     "store.");
-
-	  dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
-			     DR_REF (dr));
-	  dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
-	}
-      return false;
-    }
+    return opt_result::failure_at
+      (dr_info->stmt->stmt,
+       DR_IS_READ (dr_info->dr)
+	? "not vectorized: unsupported unaligned load: %T\n"
+	: "not vectorized: unsupported unaligned store: %T\n",
+       DR_REF (dr_info->dr));
 
   if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
 		     "Vectorizing an unaligned access.\n");
 
-  return true;
+  return opt_result::success ();
 }
 
 /* Function vect_verify_datarefs_alignment
@@ -1073,24 +1085,24 @@
    Return TRUE if all data references in the loop can be
    handled with respect to alignment.  */
 
-bool
+opt_result
 vect_verify_datarefs_alignment (loop_vec_info vinfo)
 {
-  vec<data_reference_p> datarefs = vinfo->datarefs;
+  vec<data_reference_p> datarefs = vinfo->shared->datarefs;
   struct data_reference *dr;
   unsigned int i;
 
   FOR_EACH_VEC_ELT (datarefs, i, dr)
     {
-      gimple *stmt = DR_STMT (dr);
-      stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+      dr_vec_info *dr_info = vinfo->lookup_dr (dr);
+      stmt_vec_info stmt_info = dr_info->stmt;
 
       if (!STMT_VINFO_RELEVANT_P (stmt_info))
 	continue;
 
       /* For interleaving, only the alignment of the first access matters.   */
       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-	  && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+	  && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
 	continue;
 
       /* Strided accesses perform only component accesses, alignment is
@@ -1099,11 +1111,12 @@
 	  && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
 	continue;
 
-      if (! verify_data_ref_alignment (dr))
-	return false;
+      opt_result res = verify_data_ref_alignment (dr_info);
+      if (!res)
+	return res;
     }
 
-  return true;
+  return opt_result::success ();
 }
 
 /* Given an memory reference EXP return whether its alignment is less
@@ -1121,14 +1134,13 @@
 
 /* Function vector_alignment_reachable_p
 
-   Return true if vector alignment for DR is reachable by peeling
+   Return true if vector alignment for DR_INFO is reachable by peeling
    a few loop iterations.  Return false otherwise.  */
 
 static bool
-vector_alignment_reachable_p (struct data_reference *dr)
+vector_alignment_reachable_p (dr_vec_info *dr_info)
 {
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  stmt_vec_info stmt_info = dr_info->stmt;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
 
   if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
@@ -1137,33 +1149,33 @@
 	 the prolog loop ({VF - misalignment}), is a multiple of the
 	 number of the interleaved accesses.  */
       int elem_size, mis_in_elements;
-      int nelements = TYPE_VECTOR_SUBPARTS (vectype);
 
       /* FORNOW: handle only known alignment.  */
-      if (!known_alignment_for_access_p (dr))
+      if (!known_alignment_for_access_p (dr_info))
 	return false;
 
-      elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
-      mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
-
-      if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
+      poly_uint64 nelements = TYPE_VECTOR_SUBPARTS (vectype);
+      poly_uint64 vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
+      elem_size = vector_element_size (vector_size, nelements);
+      mis_in_elements = DR_MISALIGNMENT (dr_info) / elem_size;
+
+      if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
 	return false;
     }
 
   /* If misalignment is known at the compile time then allow peeling
      only if natural alignment is reachable through peeling.  */
-  if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
+  if (known_alignment_for_access_p (dr_info) && !aligned_access_p (dr_info))
     {
       HOST_WIDE_INT elmsize =
 		int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
       if (dump_enabled_p ())
 	{
 	  dump_printf_loc (MSG_NOTE, vect_location,
-	                   "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
-	  dump_printf (MSG_NOTE,
-	               ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
+	                   "data size = %wd. misalignment = %d.\n", elmsize,
+			   DR_MISALIGNMENT (dr_info));
 	}
-      if (DR_MISALIGNMENT (dr) % elmsize)
+      if (DR_MISALIGNMENT (dr_info) % elmsize)
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -1172,10 +1184,10 @@
 	}
     }
 
-  if (!known_alignment_for_access_p (dr))
+  if (!known_alignment_for_access_p (dr_info))
     {
-      tree type = TREE_TYPE (DR_REF (dr));
-      bool is_packed = not_size_aligned (DR_REF (dr));
+      tree type = TREE_TYPE (DR_REF (dr_info->dr));
+      bool is_packed = not_size_aligned (DR_REF (dr_info->dr));
       if (dump_enabled_p ())
 	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 	                 "Unknown misalignment, %snaturally aligned\n",
@@ -1187,16 +1199,16 @@
 }
 
 
-/* Calculate the cost of the memory access represented by DR.  */
+/* Calculate the cost of the memory access represented by DR_INFO.  */
 
 static void
-vect_get_data_access_cost (struct data_reference *dr,
+vect_get_data_access_cost (dr_vec_info *dr_info,
                            unsigned int *inside_cost,
                            unsigned int *outside_cost,
-			   stmt_vector_for_cost *body_cost_vec)
+			   stmt_vector_for_cost *body_cost_vec,
+			   stmt_vector_for_cost *prologue_cost_vec)
 {
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  stmt_vec_info stmt_info = dr_info->stmt;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   int ncopies;
 
@@ -1205,11 +1217,11 @@
   else
     ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info));
 
-  if (DR_IS_READ (dr))
-    vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
-			NULL, body_cost_vec, false);
+  if (DR_IS_READ (dr_info->dr))
+    vect_get_load_cost (stmt_info, ncopies, true, inside_cost, outside_cost,
+			prologue_cost_vec, body_cost_vec, false);
   else
-    vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
+    vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec);
 
   if (dump_enabled_p ())
     dump_printf_loc (MSG_NOTE, vect_location,
@@ -1220,7 +1232,7 @@
 
 typedef struct _vect_peel_info
 {
-  struct data_reference *dr;
+  dr_vec_info *dr_info;
   int npeel;
   unsigned int count;
 } *vect_peel_info;
@@ -1254,16 +1266,17 @@
 }
 
 
-/* Insert DR into peeling hash table with NPEEL as key.  */
+/* Insert DR_INFO into peeling hash table with NPEEL as key.  */
 
 static void
 vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
-			  loop_vec_info loop_vinfo, struct data_reference *dr,
+			  loop_vec_info loop_vinfo, dr_vec_info *dr_info,
                           int npeel)
 {
   struct _vect_peel_info elem, *slot;
   _vect_peel_info **new_slot;
-  bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
+  bool supportable_dr_alignment
+    = vect_supportable_dr_alignment (dr_info, true);
 
   elem.npeel = npeel;
   slot = peeling_htab->find (&elem);
@@ -1273,7 +1286,7 @@
     {
       slot = XNEW (struct _vect_peel_info);
       slot->npeel = npeel;
-      slot->dr = dr;
+      slot->dr_info = dr_info;
       slot->count = 1;
       new_slot = peeling_htab->find_slot (slot, INSERT);
       *new_slot = slot;
@@ -1300,40 +1313,42 @@
     {
       max->peel_info.npeel = elem->npeel;
       max->peel_info.count = elem->count;
-      max->peel_info.dr = elem->dr;
+      max->peel_info.dr_info = elem->dr_info;
     }
 
   return 1;
 }
 
-/* Get the costs of peeling NPEEL iterations checking data access costs
-   for all data refs.  If UNKNOWN_MISALIGNMENT is true, we assume DR0's
-   misalignment will be zero after peeling.  */
+/* Get the costs of peeling NPEEL iterations for LOOP_VINFO, checking
+   data access costs for all data refs.  If UNKNOWN_MISALIGNMENT is true,
+   we assume DR0_INFO's misalignment will be zero after peeling.  */
 
 static void
-vect_get_peeling_costs_all_drs (vec<data_reference_p> datarefs,
-				struct data_reference *dr0,
+vect_get_peeling_costs_all_drs (loop_vec_info loop_vinfo,
+				dr_vec_info *dr0_info,
 				unsigned int *inside_cost,
 				unsigned int *outside_cost,
 				stmt_vector_for_cost *body_cost_vec,
+				stmt_vector_for_cost *prologue_cost_vec,
 				unsigned int npeel,
 				bool unknown_misalignment)
 {
+  vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
   unsigned i;
   data_reference *dr;
 
   FOR_EACH_VEC_ELT (datarefs, i, dr)
     {
-      gimple *stmt = DR_STMT (dr);
-      stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+      dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
+      stmt_vec_info stmt_info = dr_info->stmt;
       if (!STMT_VINFO_RELEVANT_P (stmt_info))
 	continue;
 
       /* For interleaving, only the alignment of the first access
          matters.  */
       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-          && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
-        continue;
+	  && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
+	continue;
 
       /* Strided accesses perform only component accesses, alignment is
          irrelevant for them.  */
@@ -1342,16 +1357,16 @@
 	continue;
 
       int save_misalignment;
-      save_misalignment = DR_MISALIGNMENT (dr);
+      save_misalignment = DR_MISALIGNMENT (dr_info);
       if (npeel == 0)
 	;
-      else if (unknown_misalignment && dr == dr0)
-	SET_DR_MISALIGNMENT (dr, 0);
+      else if (unknown_misalignment && dr_info == dr0_info)
+	SET_DR_MISALIGNMENT (dr_info, 0);
       else
-	vect_update_misalignment_for_peel (dr, dr0, npeel);
-      vect_get_data_access_cost (dr, inside_cost, outside_cost,
-				 body_cost_vec);
-      SET_DR_MISALIGNMENT (dr, save_misalignment);
+	vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
+      vect_get_data_access_cost (dr_info, inside_cost, outside_cost,
+				 body_cost_vec, prologue_cost_vec);
+      SET_DR_MISALIGNMENT (dr_info, save_misalignment);
     }
 }
 
@@ -1365,8 +1380,7 @@
   vect_peel_info elem = *slot;
   int dummy;
   unsigned int inside_cost = 0, outside_cost = 0;
-  gimple *stmt = DR_STMT (elem->dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  stmt_vec_info stmt_info = elem->dr_info->stmt;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
 		       epilogue_cost_vec;
@@ -1375,9 +1389,9 @@
   body_cost_vec.create (2);
   epilogue_cost_vec.create (2);
 
-  vect_get_peeling_costs_all_drs (LOOP_VINFO_DATAREFS (loop_vinfo),
-				  elem->dr, &inside_cost, &outside_cost,
-				  &body_cost_vec, elem->npeel, false);
+  vect_get_peeling_costs_all_drs (loop_vinfo, elem->dr_info, &inside_cost,
+				  &outside_cost, &body_cost_vec,
+				  &prologue_cost_vec, elem->npeel, false);
 
   body_cost_vec.release ();
 
@@ -1399,7 +1413,7 @@
     {
       min->inside_cost = inside_cost;
       min->outside_cost = outside_cost;
-      min->peel_info.dr = elem->dr;
+      min->peel_info.dr_info = elem->dr_info;
       min->peel_info.npeel = elem->npeel;
       min->peel_info.count = elem->count;
     }
@@ -1418,7 +1432,7 @@
 {
    struct _vect_peel_extended_info res;
 
-   res.peel_info.dr = NULL;
+   res.peel_info.dr_info = NULL;
 
    if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
      {
@@ -1442,14 +1456,12 @@
 /* Return true if the new peeling NPEEL is supported.  */
 
 static bool
-vect_peeling_supportable (loop_vec_info loop_vinfo, struct data_reference *dr0,
+vect_peeling_supportable (loop_vec_info loop_vinfo, dr_vec_info *dr0_info,
 			  unsigned npeel)
 {
   unsigned i;
   struct data_reference *dr = NULL;
   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
-  gimple *stmt;
-  stmt_vec_info stmt_info;
   enum dr_alignment_support supportable_dr_alignment;
 
   /* Ensure that all data refs can be vectorized after the peel.  */
@@ -1457,15 +1469,15 @@
     {
       int save_misalignment;
 
-      if (dr == dr0)
+      if (dr == dr0_info->dr)
 	continue;
 
-      stmt = DR_STMT (dr);
-      stmt_info = vinfo_for_stmt (stmt);
+      dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
+      stmt_vec_info stmt_info = dr_info->stmt;
       /* For interleaving, only the alignment of the first access
 	 matters.  */
       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-	  && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
+	  && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
 	continue;
 
       /* Strided accesses perform only component accesses, alignment is
@@ -1474,10 +1486,11 @@
 	  && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
 	continue;
 
-      save_misalignment = DR_MISALIGNMENT (dr);
-      vect_update_misalignment_for_peel (dr, dr0, npeel);
-      supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
-      SET_DR_MISALIGNMENT (dr, save_misalignment);
+      save_misalignment = DR_MISALIGNMENT (dr_info);
+      vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
+      supportable_dr_alignment
+	= vect_supportable_dr_alignment (dr_info, false);
+      SET_DR_MISALIGNMENT (dr_info, save_misalignment);
 
       if (!supportable_dr_alignment)
 	return false;
@@ -1577,34 +1590,30 @@
      (whether to generate regular loads/stores, or with special handling for
      misalignment).  */
 
-bool
+opt_result
 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
 {
   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
   enum dr_alignment_support supportable_dr_alignment;
-  struct data_reference *dr0 = NULL, *first_store = NULL;
+  dr_vec_info *first_store = NULL;
+  dr_vec_info *dr0_info = NULL;
   struct data_reference *dr;
   unsigned int i, j;
   bool do_peeling = false;
   bool do_versioning = false;
-  bool stat;
-  gimple *stmt;
-  stmt_vec_info stmt_info;
   unsigned int npeel = 0;
   bool one_misalignment_known = false;
   bool one_misalignment_unknown = false;
   bool one_dr_unsupportable = false;
-  struct data_reference *unsupportable_dr = NULL;
-  unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+  dr_vec_info *unsupportable_dr_info = NULL;
+  poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   unsigned possible_npeel_number = 1;
   tree vectype;
-  unsigned int nelements, mis, same_align_drs_max = 0;
+  unsigned int mis, same_align_drs_max = 0;
   hash_table<peel_info_hasher> peeling_htab (1);
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_enhance_data_refs_alignment ===\n");
+  DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
 
   /* Reset data so we can safely be called multiple times.  */
   LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
@@ -1644,8 +1653,8 @@
 
   FOR_EACH_VEC_ELT (datarefs, i, dr)
     {
-      stmt = DR_STMT (dr);
-      stmt_info = vinfo_for_stmt (stmt);
+      dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
+      stmt_vec_info stmt_info = dr_info->stmt;
 
       if (!STMT_VINFO_RELEVANT_P (stmt_info))
 	continue;
@@ -1653,11 +1662,13 @@
       /* For interleaving, only the alignment of the first access
          matters.  */
       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-          && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
-        continue;
-
-      /* For invariant accesses there is nothing to enhance.  */
-      if (integer_zerop (DR_STEP (dr)))
+	  && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
+	continue;
+
+      /* For scatter-gather or invariant accesses there is nothing
+	 to enhance.  */
+      if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
+	  || integer_zerop (DR_STEP (dr)))
 	continue;
 
       /* Strided accesses perform only component accesses, alignment is
@@ -1666,22 +1677,23 @@
 	  && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
 	continue;
 
-      supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
-      do_peeling = vector_alignment_reachable_p (dr);
+      supportable_dr_alignment = vect_supportable_dr_alignment (dr_info, true);
+      do_peeling = vector_alignment_reachable_p (dr_info);
       if (do_peeling)
         {
-          if (known_alignment_for_access_p (dr))
+          if (known_alignment_for_access_p (dr_info))
             {
 	      unsigned int npeel_tmp = 0;
 	      bool negative = tree_int_cst_compare (DR_STEP (dr),
 						    size_zero_node) < 0;
 
 	      vectype = STMT_VINFO_VECTYPE (stmt_info);
-	      nelements = TYPE_VECTOR_SUBPARTS (vectype);
-	      unsigned int target_align = DR_TARGET_ALIGNMENT (dr);
-	      unsigned int dr_size = vect_get_scalar_dr_size (dr);
-	      mis = (negative ? DR_MISALIGNMENT (dr) : -DR_MISALIGNMENT (dr));
-	      if (DR_MISALIGNMENT (dr) != 0)
+	      unsigned int target_align = DR_TARGET_ALIGNMENT (dr_info);
+	      unsigned int dr_size = vect_get_scalar_dr_size (dr_info);
+	      mis = (negative
+		     ? DR_MISALIGNMENT (dr_info)
+		     : -DR_MISALIGNMENT (dr_info));
+	      if (DR_MISALIGNMENT (dr_info) != 0)
 		npeel_tmp = (mis & (target_align - 1)) / dr_size;
 
               /* For multiple types, it is possible that the bigger type access
@@ -1699,15 +1711,14 @@
 		 cost for every peeling option.  */
               if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
 		{
-		  if (STMT_SLP_TYPE (stmt_info))
-		    possible_npeel_number
-		      = (vf * GROUP_SIZE (stmt_info)) / nelements;
-		  else
-		    possible_npeel_number = vf / nelements;
+		  poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
+					  ? vf * DR_GROUP_SIZE (stmt_info) : vf);
+		  possible_npeel_number
+		    = vect_get_num_vectors (nscalars, vectype);
 
 		  /* NPEEL_TMP is 0 when there is no misalignment, but also
 		     allow peeling NELEMENTS.  */
-		  if (DR_MISALIGNMENT (dr) == 0)
+		  if (DR_MISALIGNMENT (dr_info) == 0)
 		    possible_npeel_number++;
 		}
 
@@ -1716,7 +1727,7 @@
               for (j = 0; j < possible_npeel_number; j++)
                 {
                   vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
-					    dr, npeel_tmp);
+					    dr_info, npeel_tmp);
 		  npeel_tmp += target_align / dr_size;
                 }
 
@@ -1730,11 +1741,11 @@
                  stores over load.  */
 	      unsigned same_align_drs
 		= STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
-	      if (!dr0
+	      if (!dr0_info
 		  || same_align_drs_max < same_align_drs)
 		{
 		  same_align_drs_max = same_align_drs;
-		  dr0 = dr;
+		  dr0_info = dr_info;
 		}
 	      /* For data-refs with the same number of related
 		 accesses prefer the one where the misalign
@@ -1743,13 +1754,13 @@
 		{
 		  struct loop *ivloop0, *ivloop;
 		  ivloop0 = outermost_invariant_loop_for_expr
-		    (loop, DR_BASE_ADDRESS (dr0));
+		    (loop, DR_BASE_ADDRESS (dr0_info->dr));
 		  ivloop = outermost_invariant_loop_for_expr
 		    (loop, DR_BASE_ADDRESS (dr));
 		  if ((ivloop && !ivloop0)
 		      || (ivloop && ivloop0
 			  && flow_loop_nested_p (ivloop, ivloop0)))
-		    dr0 = dr;
+		    dr0_info = dr_info;
 		}
 
 	      one_misalignment_unknown = true;
@@ -1759,16 +1770,16 @@
 	      if (!supportable_dr_alignment)
 	      {
 		one_dr_unsupportable = true;
-		unsupportable_dr = dr;
+		unsupportable_dr_info = dr_info;
 	      }
 
 	      if (!first_store && DR_IS_WRITE (dr))
-		first_store = dr;
+		first_store = dr_info;
             }
         }
       else
         {
-          if (!aligned_access_p (dr))
+          if (!aligned_access_p (dr_info))
             {
               if (dump_enabled_p ())
                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -1802,22 +1813,24 @@
       unsigned int load_outside_cost = 0;
       unsigned int store_inside_cost = 0;
       unsigned int store_outside_cost = 0;
+      unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
 
       stmt_vector_for_cost dummy;
       dummy.create (2);
-      vect_get_peeling_costs_all_drs (datarefs, dr0,
+      vect_get_peeling_costs_all_drs (loop_vinfo, dr0_info,
 				      &load_inside_cost,
 				      &load_outside_cost,
-				      &dummy, vf / 2, true);
+				      &dummy, &dummy, estimated_npeels, true);
       dummy.release ();
 
       if (first_store)
 	{
 	  dummy.create (2);
-	  vect_get_peeling_costs_all_drs (datarefs, first_store,
+	  vect_get_peeling_costs_all_drs (loop_vinfo, first_store,
 					  &store_inside_cost,
 					  &store_outside_cost,
-					  &dummy, vf / 2, true);
+					  &dummy, &dummy,
+					  estimated_npeels, true);
 	  dummy.release ();
 	}
       else
@@ -1830,7 +1843,7 @@
 	  || (load_inside_cost == store_inside_cost
 	      && load_outside_cost > store_outside_cost))
 	{
-	  dr0 = first_store;
+	  dr0_info = first_store;
 	  peel_for_unknown_alignment.inside_cost = store_inside_cost;
 	  peel_for_unknown_alignment.outside_cost = store_outside_cost;
 	}
@@ -1846,7 +1859,7 @@
 
       int dummy2;
       peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost
-	(loop_vinfo, vf / 2, &dummy2,
+	(loop_vinfo, estimated_npeels, &dummy2,
 	 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
 	 &prologue_cost_vec, &epilogue_cost_vec);
 
@@ -1854,19 +1867,18 @@
       epilogue_cost_vec.release ();
 
       peel_for_unknown_alignment.peel_info.count = 1
-	+ STMT_VINFO_SAME_ALIGN_REFS
-	(vinfo_for_stmt (DR_STMT (dr0))).length ();
+	+ STMT_VINFO_SAME_ALIGN_REFS (dr0_info->stmt).length ();
     }
 
   peel_for_unknown_alignment.peel_info.npeel = 0;
-  peel_for_unknown_alignment.peel_info.dr = dr0;
+  peel_for_unknown_alignment.peel_info.dr_info = dr0_info;
 
   best_peel = peel_for_unknown_alignment;
 
   peel_for_known_alignment.inside_cost = INT_MAX;
   peel_for_known_alignment.outside_cost = INT_MAX;
   peel_for_known_alignment.peel_info.count = 0;
-  peel_for_known_alignment.peel_info.dr = NULL;
+  peel_for_known_alignment.peel_info.dr_info = NULL;
 
   if (do_peeling && one_misalignment_known)
     {
@@ -1878,7 +1890,7 @@
     }
 
   /* Compare costs of peeling for known and unknown alignment. */
-  if (peel_for_known_alignment.peel_info.dr != NULL
+  if (peel_for_known_alignment.peel_info.dr_info != NULL
       && peel_for_unknown_alignment.inside_cost
       >= peel_for_known_alignment.inside_cost)
     {
@@ -1895,7 +1907,7 @@
      since we'd have to discard a chosen peeling except when it accidentally
      aligned the unsupportable data ref.  */
   if (one_dr_unsupportable)
-    dr0 = unsupportable_dr;
+    dr0_info = unsupportable_dr_info;
   else if (do_peeling)
     {
       /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
@@ -1905,8 +1917,9 @@
 
       stmt_vector_for_cost dummy;
       dummy.create (2);
-      vect_get_peeling_costs_all_drs (datarefs, NULL, &nopeel_inside_cost,
-				      &nopeel_outside_cost, &dummy, 0, false);
+      vect_get_peeling_costs_all_drs (loop_vinfo, NULL, &nopeel_inside_cost,
+				      &nopeel_outside_cost, &dummy, &dummy,
+				      0, false);
       dummy.release ();
 
       /* Add epilogue costs.  As we do not peel for alignment here, no prologue
@@ -1925,7 +1938,7 @@
       epilogue_cost_vec.release ();
 
       npeel = best_peel.peel_info.npeel;
-      dr0 = best_peel.peel_info.dr;
+      dr0_info = best_peel.peel_info.dr_info;
 
       /* If no peeling is not more expensive than the best peeling we
 	 have so far, don't perform any peeling.  */
@@ -1935,13 +1948,12 @@
 
   if (do_peeling)
     {
-      stmt = DR_STMT (dr0);
-      stmt_info = vinfo_for_stmt (stmt);
+      stmt_vec_info stmt_info = dr0_info->stmt;
       vectype = STMT_VINFO_VECTYPE (stmt_info);
 
-      if (known_alignment_for_access_p (dr0))
+      if (known_alignment_for_access_p (dr0_info))
         {
-	  bool negative = tree_int_cst_compare (DR_STEP (dr0),
+	  bool negative = tree_int_cst_compare (DR_STEP (dr0_info->dr),
 						size_zero_node) < 0;
           if (!npeel)
             {
@@ -1950,18 +1962,19 @@
                  updating DR_MISALIGNMENT values.  The peeling factor is the
                  vectorization factor minus the misalignment as an element
                  count.  */
-	      mis = negative ? DR_MISALIGNMENT (dr0) : -DR_MISALIGNMENT (dr0);
-	      unsigned int target_align = DR_TARGET_ALIGNMENT (dr0);
+	      mis = (negative
+		     ? DR_MISALIGNMENT (dr0_info)
+		     : -DR_MISALIGNMENT (dr0_info));
+	      unsigned int target_align = DR_TARGET_ALIGNMENT (dr0_info);
 	      npeel = ((mis & (target_align - 1))
-		       / vect_get_scalar_dr_size (dr0));
+		       / vect_get_scalar_dr_size (dr0_info));
             }
 
 	  /* For interleaved data access every iteration accesses all the
 	     members of the group, therefore we divide the number of iterations
 	     by the group size.  */
-	  stmt_info = vinfo_for_stmt (DR_STMT (dr0));
 	  if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
-	    npeel /= GROUP_SIZE (stmt_info);
+	    npeel /= DR_GROUP_SIZE (stmt_info);
 
           if (dump_enabled_p ())
             dump_printf_loc (MSG_NOTE, vect_location,
@@ -1969,13 +1982,13 @@
         }
 
       /* Ensure that all datarefs can be vectorized after the peel.  */
-      if (!vect_peeling_supportable (loop_vinfo, dr0, npeel))
+      if (!vect_peeling_supportable (loop_vinfo, dr0_info, npeel))
 	do_peeling = false;
 
       /* Check if all datarefs are supportable and log.  */
-      if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
+      if (do_peeling && known_alignment_for_access_p (dr0_info) && npeel == 0)
         {
-          stat = vect_verify_datarefs_alignment (loop_vinfo);
+          opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
           if (!stat)
             do_peeling = false;
           else
@@ -1992,8 +2005,9 @@
               unsigned max_peel = npeel;
               if (max_peel == 0)
                 {
-		  unsigned int target_align = DR_TARGET_ALIGNMENT (dr0);
-		  max_peel = target_align / vect_get_scalar_dr_size (dr0) - 1;
+		  unsigned int target_align = DR_TARGET_ALIGNMENT (dr0_info);
+		  max_peel = (target_align
+			      / vect_get_scalar_dr_size (dr0_info) - 1);
                 }
               if (max_peel > max_allowed_peel)
                 {
@@ -2006,14 +2020,16 @@
         }
 
       /* Cost model #2 - if peeling may result in a remaining loop not
-	 iterating enough to be vectorized then do not peel.  */
+	 iterating enough to be vectorized then do not peel.  Since this
+	 is a cost heuristic rather than a correctness decision, use the
+	 most likely runtime value for variable vectorization factors.  */
       if (do_peeling
 	  && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
 	{
-	  unsigned max_peel
-	    = npeel == 0 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1 : npeel;
-	  if (LOOP_VINFO_INT_NITERS (loop_vinfo)
-	      < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + max_peel)
+	  unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
+	  unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
+	  if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
+	      < assumed_vf + max_peel)
 	    do_peeling = false;
 	}
 
@@ -2027,25 +2043,26 @@
              vectorization factor times the size).  Otherwise, the
              misalignment of DR_i must be set to unknown.  */
 	  FOR_EACH_VEC_ELT (datarefs, i, dr)
-	    if (dr != dr0)
+	    if (dr != dr0_info->dr)
 	      {
 		/* Strided accesses perform only component accesses, alignment
 		   is irrelevant for them.  */
-		stmt_info = vinfo_for_stmt (DR_STMT (dr));
+		dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
+		stmt_info = dr_info->stmt;
 		if (STMT_VINFO_STRIDED_P (stmt_info)
 		    && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
 		  continue;
 
-		vect_update_misalignment_for_peel (dr, dr0, npeel);
+		vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
 	      }
 
-          LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
+          LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
           if (npeel)
             LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
           else
             LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
-	      = DR_MISALIGNMENT (dr0);
-	  SET_DR_MISALIGNMENT (dr0, 0);
+	      = DR_MISALIGNMENT (dr0_info);
+	  SET_DR_MISALIGNMENT (dr0_info, 0);
 	  if (dump_enabled_p ())
             {
               dump_printf_loc (MSG_NOTE, vect_location,
@@ -2057,7 +2074,7 @@
 	  /* The inside-loop cost will be accounted for in vectorizable_load
 	     and vectorizable_store correctly with adjusted alignments.
 	     Drop the body_cst_vec on the floor here.  */
-	  stat = vect_verify_datarefs_alignment (loop_vinfo);
+	  opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
 	  gcc_assert (stat);
           return stat;
         }
@@ -2080,14 +2097,14 @@
     {
       FOR_EACH_VEC_ELT (datarefs, i, dr)
         {
-	  stmt = DR_STMT (dr);
-	  stmt_info = vinfo_for_stmt (stmt);
+	  dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
+	  stmt_vec_info stmt_info = dr_info->stmt;
 
 	  /* For interleaving, only the alignment of the first access
 	     matters.  */
-	  if (aligned_access_p (dr)
+	  if (aligned_access_p (dr_info)
 	      || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
-		  && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
+		  && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info))
 	    continue;
 
 	  if (STMT_VINFO_STRIDED_P (stmt_info))
@@ -2100,15 +2117,15 @@
 	      break;
 	    }
 
-	  supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
+	  supportable_dr_alignment
+	    = vect_supportable_dr_alignment (dr_info, false);
 
           if (!supportable_dr_alignment)
             {
-	      gimple *stmt;
               int mask;
               tree vectype;
 
-              if (known_alignment_for_access_p (dr)
+              if (known_alignment_for_access_p (dr_info)
                   || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
                      >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
                 {
@@ -2116,15 +2133,25 @@
                   break;
                 }
 
-              stmt = DR_STMT (dr);
-              vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
-              gcc_assert (vectype);
+	      vectype = STMT_VINFO_VECTYPE (stmt_info);
+	      gcc_assert (vectype);
+
+	      /* At present we don't support versioning for alignment
+		 with variable VF, since there's no guarantee that the
+		 VF is a power of two.  We could relax this if we added
+		 a way of enforcing a power-of-two size.  */
+	      unsigned HOST_WIDE_INT size;
+	      if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size))
+		{
+		  do_versioning = false;
+		  break;
+		}
 
               /* The rightmost bits of an aligned address must be zeros.
                  Construct the mask needed for this test.  For example,
                  GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
                  mask must be 15 = 0xf. */
-              mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
+	      mask = size - 1;
 
               /* FORNOW: use the same mask to test all potentially unaligned
                  references in the loop.  The vectorizer currently supports
@@ -2134,8 +2161,7 @@
               gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
                           || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
               LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
-              LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
-		      DR_STMT (dr));
+	      LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (stmt_info);
             }
         }
 
@@ -2148,18 +2174,17 @@
 
   if (do_versioning)
     {
-      vec<gimple *> may_misalign_stmts
+      vec<stmt_vec_info> may_misalign_stmts
         = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
-      gimple *stmt;
+      stmt_vec_info stmt_info;
 
       /* It can now be assumed that the data references in the statements
          in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
          of the loop being vectorized.  */
-      FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
+      FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt_info)
         {
-          stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-          dr = STMT_VINFO_DATA_REF (stmt_info);
-	  SET_DR_MISALIGNMENT (dr, 0);
+	  dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
+	  SET_DR_MISALIGNMENT (dr_info, 0);
 	  if (dump_enabled_p ())
             dump_printf_loc (MSG_NOTE, vect_location,
                              "Alignment of access forced using versioning.\n");
@@ -2172,7 +2197,7 @@
       /* Peeling and versioning can't be done together at this time.  */
       gcc_assert (! (do_peeling && do_versioning));
 
-      stat = vect_verify_datarefs_alignment (loop_vinfo);
+      opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
       gcc_assert (stat);
       return stat;
     }
@@ -2180,23 +2205,25 @@
   /* This point is reached if neither peeling nor versioning is being done.  */
   gcc_assert (! (do_peeling || do_versioning));
 
-  stat = vect_verify_datarefs_alignment (loop_vinfo);
+  opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
   return stat;
 }
 
 
 /* Function vect_find_same_alignment_drs.
 
-   Update group and alignment relations according to the chosen
+   Update group and alignment relations in VINFO according to the chosen
    vectorization factor.  */
 
 static void
-vect_find_same_alignment_drs (struct data_dependence_relation *ddr)
+vect_find_same_alignment_drs (vec_info *vinfo, data_dependence_relation *ddr)
 {
   struct data_reference *dra = DDR_A (ddr);
   struct data_reference *drb = DDR_B (ddr);
-  stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
-  stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
+  dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
+  dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
+  stmt_vec_info stmtinfo_a = dr_info_a->stmt;
+  stmt_vec_info stmtinfo_b = dr_info_b->stmt;
 
   if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
     return;
@@ -2204,39 +2231,38 @@
   if (dra == drb)
     return;
 
+  if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
+      || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
+    return;
+
   if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
       || !operand_equal_p (DR_OFFSET (dra), DR_OFFSET (drb), 0)
       || !operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
     return;
 
   /* Two references with distance zero have the same alignment.  */
-  offset_int diff = (wi::to_offset (DR_INIT (dra))
-		     - wi::to_offset (DR_INIT (drb)));
-  if (diff != 0)
+  poly_offset_int diff = (wi::to_poly_offset (DR_INIT (dra))
+			  - wi::to_poly_offset (DR_INIT (drb)));
+  if (maybe_ne (diff, 0))
     {
       /* Get the wider of the two alignments.  */
-      unsigned int align_a = (vect_calculate_target_alignment (dra)
+      unsigned int align_a = (vect_calculate_target_alignment (dr_info_a)
 			      / BITS_PER_UNIT);
-      unsigned int align_b = (vect_calculate_target_alignment (drb)
+      unsigned int align_b = (vect_calculate_target_alignment (dr_info_b)
 			      / BITS_PER_UNIT);
       unsigned int max_align = MAX (align_a, align_b);
 
       /* Require the gap to be a multiple of the larger vector alignment.  */
-      if (!wi::multiple_of_p (diff, max_align, SIGNED))
+      if (!multiple_p (diff, max_align))
 	return;
     }
 
   STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
   STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
   if (dump_enabled_p ())
-    {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "accesses have the same alignment: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-      dump_printf (MSG_NOTE,  " and ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-      dump_printf (MSG_NOTE, "\n");
-    }
+    dump_printf_loc (MSG_NOTE, vect_location,
+		     "accesses have the same alignment: %T and %T\n",
+		     DR_REF (dra), DR_REF (drb));
 }
 
 
@@ -2245,48 +2271,32 @@
    Analyze the alignment of the data-references in the loop.
    Return FALSE if a data reference is found that cannot be vectorized.  */
 
-bool
+opt_result
 vect_analyze_data_refs_alignment (loop_vec_info vinfo)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_refs_alignment ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
 
   /* Mark groups of data references with same alignment using
      data dependence information.  */
-  vec<ddr_p> ddrs = vinfo->ddrs;
+  vec<ddr_p> ddrs = vinfo->shared->ddrs;
   struct data_dependence_relation *ddr;
   unsigned int i;
 
   FOR_EACH_VEC_ELT (ddrs, i, ddr)
-    vect_find_same_alignment_drs (ddr);
-
-  vec<data_reference_p> datarefs = vinfo->datarefs;
+    vect_find_same_alignment_drs (vinfo, ddr);
+
+  vec<data_reference_p> datarefs = vinfo->shared->datarefs;
   struct data_reference *dr;
 
   vect_record_base_alignments (vinfo);
   FOR_EACH_VEC_ELT (datarefs, i, dr)
     {
-      stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
-      if (STMT_VINFO_VECTORIZABLE (stmt_info)
-	  && !vect_compute_data_ref_alignment (dr))
-	{
-	  /* Strided accesses perform only component accesses, misalignment
-	     information is irrelevant for them.  */
-	  if (STMT_VINFO_STRIDED_P (stmt_info)
-	      && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
-	    continue;
-
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "not vectorized: can't calculate alignment "
-			     "for data ref.\n");
-
-	  return false;
-	}
+      dr_vec_info *dr_info = vinfo->lookup_dr (dr);
+      if (STMT_VINFO_VECTORIZABLE (dr_info->stmt))
+	vect_compute_data_ref_alignment (dr_info);
     }
 
-  return true;
+  return opt_result::success ();
 }
 
 
@@ -2298,18 +2308,18 @@
   /* We vectorize from the first scalar stmt in the node unless
      the node is permuted in which case we start from the first
      element in the group.  */
-  gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
-  data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
+  stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
+  dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
   if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
-    first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
-
-  data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
-  if (! vect_compute_data_ref_alignment (dr)
-      /* For creating the data-ref pointer we need alignment of the
-	 first element anyway.  */
-      || (dr != first_dr
-	  && ! vect_compute_data_ref_alignment (first_dr))
-      || ! verify_data_ref_alignment (dr))
+    first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
+
+  dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
+  vect_compute_data_ref_alignment (dr_info);
+  /* For creating the data-ref pointer we need alignment of the
+     first element anyway.  */
+  if (dr_info != first_dr_info)
+    vect_compute_data_ref_alignment (first_dr_info);
+  if (! verify_data_ref_alignment (dr_info))
     {
       if (dump_enabled_p ())
 	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -2329,9 +2339,7 @@
 bool
 vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
 {
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_slp_analyze_and_verify_instance_alignment ===\n");
+  DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
 
   slp_tree node;
   unsigned i;
@@ -2340,7 +2348,7 @@
       return false;
 
   node = SLP_INSTANCE_TREE (instance);
-  if (STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]))
+  if (STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (node)[0])
       && ! vect_slp_analyze_and_verify_node_alignment
 	     (SLP_INSTANCE_TREE (instance)))
     return false;
@@ -2349,20 +2357,20 @@
 }
 
 
-/* Analyze groups of accesses: check that DR belongs to a group of
+/* Analyze groups of accesses: check that DR_INFO belongs to a group of
    accesses of legal size, step, etc.  Detect gaps, single element
    interleaving, and other special cases. Set grouped access info.
    Collect groups of strided stores for further use in SLP analysis.
    Worker for vect_analyze_group_access.  */
 
 static bool
-vect_analyze_group_access_1 (struct data_reference *dr)
+vect_analyze_group_access_1 (dr_vec_info *dr_info)
 {
+  data_reference *dr = dr_info->dr;
   tree step = DR_STEP (dr);
   tree scalar_type = TREE_TYPE (DR_REF (dr));
   HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  stmt_vec_info stmt_info = dr_info->stmt;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
   HOST_WIDE_INT dr_step = -1;
@@ -2376,23 +2384,18 @@
       dr_step = tree_to_shwi (step);
       /* Check that STEP is a multiple of type size.  Otherwise there is
          a non-element-sized gap at the end of the group which we
-	 cannot represent in GROUP_GAP or GROUP_SIZE.
+	 cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
 	 ???  As we can handle non-constant step fine here we should
-	 simply remove uses of GROUP_GAP between the last and first
-	 element and instead rely on DR_STEP.  GROUP_SIZE then would
+	 simply remove uses of DR_GROUP_GAP between the last and first
+	 element and instead rely on DR_STEP.  DR_GROUP_SIZE then would
 	 simply not include that gap.  */
       if ((dr_step % type_size) != 0)
 	{
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "Step ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
-	      dump_printf (MSG_NOTE,
-			   " is not a multiple of the element size for ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
-	      dump_printf (MSG_NOTE, "\n");
-	    }
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "Step %T is not a multiple of the element size"
+			     " for %T\n",
+			     step, DR_REF (dr));
 	  return false;
 	}
       groupsize = absu_hwi (dr_step) / type_size;
@@ -2401,63 +2404,56 @@
     groupsize = 0;
 
   /* Not consecutive access is possible only if it is a part of interleaving.  */
-  if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
+  if (!DR_GROUP_FIRST_ELEMENT (stmt_info))
     {
       /* Check if it this DR is a part of interleaving, and is a single
 	 element of the group that is accessed in the loop.  */
 
       /* Gaps are supported only for loads. STEP must be a multiple of the type
-	 size.  The size of the group must be a power of 2.  */
+	 size.  */
       if (DR_IS_READ (dr)
 	  && (dr_step % type_size) == 0
-	  && groupsize > 0
-	  && pow2p_hwi (groupsize))
+	  && groupsize > 0)
 	{
-	  GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
-	  GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
-	  GROUP_GAP (stmt_info) = groupsize - 1;
+	  DR_GROUP_FIRST_ELEMENT (stmt_info) = stmt_info;
+	  DR_GROUP_SIZE (stmt_info) = groupsize;
+	  DR_GROUP_GAP (stmt_info) = groupsize - 1;
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-	                       "Detected single element interleaving ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
-	      dump_printf (MSG_NOTE, " step ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
-	      dump_printf (MSG_NOTE, "\n");
-	    }
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "Detected single element interleaving %T"
+			     " step %T\n",
+			     DR_REF (dr), step);
 
 	  return true;
 	}
 
       if (dump_enabled_p ())
-        {
- 	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                   "not consecutive access ");
-	  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-        }
+	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			 "not consecutive access %G", stmt_info->stmt);
 
       if (bb_vinfo)
-        {
-          /* Mark the statement as unvectorizable.  */
-          STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
-          return true;
-        }
+	{
+	  /* Mark the statement as unvectorizable.  */
+	  STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+	  return true;
+	}
 
       dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
       STMT_VINFO_STRIDED_P (stmt_info) = true;
       return true;
     }
 
-  if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
+  if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
     {
       /* First stmt in the interleaving chain. Check the chain.  */
-      gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
+      stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
       struct data_reference *data_ref = dr;
       unsigned int count = 1;
       tree prev_init = DR_INIT (data_ref);
-      gimple *prev = stmt;
+      stmt_vec_info prev = stmt_info;
       HOST_WIDE_INT diff, gaps = 0;
 
+      /* By construction, all group members have INTEGER_CST DR_INITs.  */
       while (next)
         {
           /* Skip same data-refs.  In case that two or more stmts share
@@ -2465,8 +2461,7 @@
              stmt, and the rest get their vectorized loads from the first
              one.  */
           if (!tree_int_cst_compare (DR_INIT (data_ref),
-                                     DR_INIT (STMT_VINFO_DATA_REF (
-						   vinfo_for_stmt (next)))))
+				     DR_INIT (STMT_VINFO_DATA_REF (next))))
             {
               if (DR_IS_WRITE (data_ref))
                 {
@@ -2477,19 +2472,19 @@
                 }
 
 	      if (dump_enabled_p ())
-		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+		dump_printf_loc (MSG_NOTE, vect_location,
 				 "Two or more load stmts share the same dr.\n");
 
-              /* For load use the same data-ref load.  */
-              GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
-
-              prev = next;
-              next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
-              continue;
+	      /* For load use the same data-ref load.  */
+	      DR_GROUP_SAME_DR_STMT (next) = prev;
+
+	      prev = next;
+	      next = DR_GROUP_NEXT_ELEMENT (next);
+	      continue;
             }
 
-          prev = next;
-          data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
+	  prev = next;
+	  data_ref = STMT_VINFO_DATA_REF (next);
 
 	  /* All group members have the same STEP by construction.  */
 	  gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
@@ -2516,13 +2511,13 @@
 	  last_accessed_element += diff;
 
           /* Store the gap from the previous member of the group. If there is no
-             gap in the access, GROUP_GAP is always 1.  */
-          GROUP_GAP (vinfo_for_stmt (next)) = diff;
-
-          prev_init = DR_INIT (data_ref);
-          next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
-          /* Count the number of data-refs in the chain.  */
-          count++;
+             gap in the access, DR_GROUP_GAP is always 1.  */
+	  DR_GROUP_GAP (next) = diff;
+
+	  prev_init = DR_INIT (data_ref);
+	  next = DR_GROUP_NEXT_ELEMENT (next);
+	  /* Count the number of data-refs in the chain.  */
+	  count++;
         }
 
       if (groupsize == 0)
@@ -2543,90 +2538,90 @@
       if (groupsize != count
 	  && !DR_IS_READ (dr))
         {
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			     "interleaved store with gaps\n");
-	  return false;
+	  groupsize = count;
+	  STMT_VINFO_STRIDED_P (stmt_info) = true;
 	}
 
       /* If there is a gap after the last load in the group it is the
 	 difference between the groupsize and the last accessed
 	 element.
 	 When there is no gap, this difference should be 0.  */
-      GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
-
-      GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
+      DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
+
+      DR_GROUP_SIZE (stmt_info) = groupsize;
       if (dump_enabled_p ())
 	{
 	  dump_printf_loc (MSG_NOTE, vect_location,
 			   "Detected interleaving ");
 	  if (DR_IS_READ (dr))
 	    dump_printf (MSG_NOTE, "load ");
+	  else if (STMT_VINFO_STRIDED_P (stmt_info))
+	    dump_printf (MSG_NOTE, "strided store ");
 	  else
 	    dump_printf (MSG_NOTE, "store ");
-	  dump_printf (MSG_NOTE, "of size %u starting with ",
-		       (unsigned)groupsize);
-	  dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
-	  if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
+	  dump_printf (MSG_NOTE, "of size %u starting with %G",
+		       (unsigned)groupsize, stmt_info->stmt);
+	  if (DR_GROUP_GAP (stmt_info) != 0)
 	    dump_printf_loc (MSG_NOTE, vect_location,
 			     "There is a gap of %u elements after the group\n",
-			     GROUP_GAP (vinfo_for_stmt (stmt)));
+			     DR_GROUP_GAP (stmt_info));
 	}
 
       /* SLP: create an SLP data structure for every interleaving group of
 	 stores for further analysis in vect_analyse_slp.  */
       if (DR_IS_WRITE (dr) && !slp_impossible)
-        {
-          if (loop_vinfo)
-            LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
-          if (bb_vinfo)
-            BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
-        }
+	{
+	  if (loop_vinfo)
+	    LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt_info);
+	  if (bb_vinfo)
+	    BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt_info);
+	}
     }
 
   return true;
 }
 
-/* Analyze groups of accesses: check that DR belongs to a group of
+/* Analyze groups of accesses: check that DR_INFO belongs to a group of
    accesses of legal size, step, etc.  Detect gaps, single element
    interleaving, and other special cases. Set grouped access info.
    Collect groups of strided stores for further use in SLP analysis.  */
 
 static bool
-vect_analyze_group_access (struct data_reference *dr)
+vect_analyze_group_access (dr_vec_info *dr_info)
 {
-  if (!vect_analyze_group_access_1 (dr))
+  if (!vect_analyze_group_access_1 (dr_info))
     {
       /* Dissolve the group if present.  */
-      gimple *next;
-      gimple *stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
-      while (stmt)
+      stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt);
+      while (stmt_info)
 	{
-	  stmt_vec_info vinfo = vinfo_for_stmt (stmt);
-	  next = GROUP_NEXT_ELEMENT (vinfo);
-	  GROUP_FIRST_ELEMENT (vinfo) = NULL;
-	  GROUP_NEXT_ELEMENT (vinfo) = NULL;
-	  stmt = next;
+	  stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
+	  DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
+	  DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
+	  stmt_info = next;
 	}
       return false;
     }
   return true;
 }
 
-/* Analyze the access pattern of the data-reference DR.
+/* Analyze the access pattern of the data-reference DR_INFO.
    In case of non-consecutive accesses call vect_analyze_group_access() to
    analyze groups of accesses.  */
 
 static bool
-vect_analyze_data_ref_access (struct data_reference *dr)
+vect_analyze_data_ref_access (dr_vec_info *dr_info)
 {
+  data_reference *dr = dr_info->dr;
   tree step = DR_STEP (dr);
   tree scalar_type = TREE_TYPE (DR_REF (dr));
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  stmt_vec_info stmt_info = dr_info->stmt;
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *loop = NULL;
 
+  if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
+    return true;
+
   if (loop_vinfo)
     loop = LOOP_VINFO_LOOP (loop_vinfo);
 
@@ -2641,13 +2636,13 @@
   /* Allow loads with zero step in inner-loop vectorization.  */
   if (loop_vinfo && integer_zerop (step))
     {
-      GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
-      if (!nested_in_vect_loop_p (loop, stmt))
+      DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
+      if (!nested_in_vect_loop_p (loop, stmt_info))
 	return DR_IS_READ (dr);
       /* Allow references with zero step for outer loops marked
 	 with pragma omp simd only - it guarantees absence of
 	 loop-carried dependencies between inner loop iterations.  */
-      if (!loop->force_vectorize)
+      if (loop->safelen < 2)
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_NOTE, vect_location,
@@ -2656,11 +2651,11 @@
 	}
     }
 
-  if (loop && nested_in_vect_loop_p (loop, stmt))
+  if (loop && nested_in_vect_loop_p (loop, stmt_info))
     {
       /* Interleaved accesses are not yet supported within outer-loop
         vectorization for references in the inner-loop.  */
-      GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+      DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
 
       /* For the rest of the analysis we use the outer-loop step.  */
       step = STMT_VINFO_DR_STEP (stmt_info);
@@ -2682,12 +2677,12 @@
 	      && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
 	{
 	  /* Mark that it is not interleaving.  */
-	  GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
+	  DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
 	  return true;
 	}
     }
 
-  if (loop && nested_in_vect_loop_p (loop, stmt))
+  if (loop && nested_in_vect_loop_p (loop, stmt_info))
     {
       if (dump_enabled_p ())
 	dump_printf_loc (MSG_NOTE, vect_location,
@@ -2700,10 +2695,10 @@
   if (TREE_CODE (step) != INTEGER_CST)
     return (STMT_VINFO_STRIDED_P (stmt_info)
 	    && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
-		|| vect_analyze_group_access (dr)));
+		|| vect_analyze_group_access (dr_info)));
 
   /* Not consecutive access - check if it's a part of interleaving group.  */
-  return vect_analyze_group_access (dr);
+  return vect_analyze_group_access (dr_info);
 }
 
 /* Compare two data-references DRA and DRB to group them into chunks
@@ -2753,12 +2748,70 @@
     return cmp;
 
   /* Then sort after DR_INIT.  In case of identical DRs sort after stmt UID.  */
-  cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
+  cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
   if (cmp == 0)
     return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
   return cmp;
 }
 
+/* If OP is the result of a conversion, return the unconverted value,
+   otherwise return null.  */
+
+static tree
+strip_conversion (tree op)
+{
+  if (TREE_CODE (op) != SSA_NAME)
+    return NULL_TREE;
+  gimple *stmt = SSA_NAME_DEF_STMT (op);
+  if (!is_gimple_assign (stmt)
+      || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
+    return NULL_TREE;
+  return gimple_assign_rhs1 (stmt);
+}
+
+/* Return true if vectorizable_* routines can handle statements STMT1_INFO
+   and STMT2_INFO being in a single group.  */
+
+static bool
+can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
+{
+  if (gimple_assign_single_p (stmt1_info->stmt))
+    return gimple_assign_single_p (stmt2_info->stmt);
+
+  gcall *call1 = dyn_cast <gcall *> (stmt1_info->stmt);
+  if (call1 && gimple_call_internal_p (call1))
+    {
+      /* Check for two masked loads or two masked stores.  */
+      gcall *call2 = dyn_cast <gcall *> (stmt2_info->stmt);
+      if (!call2 || !gimple_call_internal_p (call2))
+	return false;
+      internal_fn ifn = gimple_call_internal_fn (call1);
+      if (ifn != IFN_MASK_LOAD && ifn != IFN_MASK_STORE)
+	return false;
+      if (ifn != gimple_call_internal_fn (call2))
+	return false;
+
+      /* Check that the masks are the same.  Cope with casts of masks,
+	 like those created by build_mask_conversion.  */
+      tree mask1 = gimple_call_arg (call1, 2);
+      tree mask2 = gimple_call_arg (call2, 2);
+      if (!operand_equal_p (mask1, mask2, 0))
+	{
+	  mask1 = strip_conversion (mask1);
+	  if (!mask1)
+	    return false;
+	  mask2 = strip_conversion (mask2);
+	  if (!mask2)
+	    return false;
+	  if (!operand_equal_p (mask1, mask2, 0))
+	    return false;
+	}
+      return true;
+    }
+
+  return false;
+}
+
 /* Function vect_analyze_data_ref_accesses.
 
    Analyze the access pattern of all the data references in the loop.
@@ -2768,33 +2821,34 @@
 
    FORNOW: handle only arrays and pointer accesses.  */
 
-bool
+opt_result
 vect_analyze_data_ref_accesses (vec_info *vinfo)
 {
   unsigned int i;
-  vec<data_reference_p> datarefs = vinfo->datarefs;
+  vec<data_reference_p> datarefs = vinfo->shared->datarefs;
   struct data_reference *dr;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_analyze_data_ref_accesses ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
 
   if (datarefs.is_empty ())
-    return true;
+    return opt_result::success ();
 
   /* Sort the array of datarefs to make building the interleaving chains
      linear.  Don't modify the original vector's order, it is needed for
      determining what dependencies are reversed.  */
   vec<data_reference_p> datarefs_copy = datarefs.copy ();
   datarefs_copy.qsort (dr_group_sort_cmp);
+  hash_set<stmt_vec_info> to_fixup;
 
   /* Build the interleaving chains.  */
   for (i = 0; i < datarefs_copy.length () - 1;)
     {
       data_reference_p dra = datarefs_copy[i];
-      stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
+      dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
+      stmt_vec_info stmtinfo_a = dr_info_a->stmt;
       stmt_vec_info lastinfo = NULL;
-      if (! STMT_VINFO_VECTORIZABLE (stmtinfo_a))
+      if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
+	  || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
 	{
 	  ++i;
 	  continue;
@@ -2802,8 +2856,10 @@
       for (i = i + 1; i < datarefs_copy.length (); ++i)
 	{
 	  data_reference_p drb = datarefs_copy[i];
-	  stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
-	  if (! STMT_VINFO_VECTORIZABLE (stmtinfo_b))
+	  dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
+	  stmt_vec_info stmtinfo_b = dr_info_b->stmt;
+	  if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
+	      || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
 	    break;
 
 	  /* ???  Imperfect sorting (non-compatible types, non-modulo
@@ -2825,8 +2881,7 @@
 	      || data_ref_compare_tree (DR_BASE_ADDRESS (dra),
 					DR_BASE_ADDRESS (drb)) != 0
 	      || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
-	      || !gimple_assign_single_p (DR_STMT (dra))
-	      || !gimple_assign_single_p (DR_STMT (drb)))
+	      || !can_group_stmts_p (stmtinfo_a, stmtinfo_b))
 	    break;
 
 	  /* Check that the data-refs have the same constant size.  */
@@ -2841,182 +2896,269 @@
 	  if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0)
 	    break;
 
-	  /* Do not place the same access in the interleaving chain twice.  */
-	  if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
-	    break;
-
 	  /* Check the types are compatible.
 	     ???  We don't distinguish this during sorting.  */
 	  if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
 				   TREE_TYPE (DR_REF (drb))))
 	    break;
 
+	  /* Check that the DR_INITs are compile-time constants.  */
+	  if (TREE_CODE (DR_INIT (dra)) != INTEGER_CST
+	      || TREE_CODE (DR_INIT (drb)) != INTEGER_CST)
+	    break;
+
 	  /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb).  */
 	  HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
 	  HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
-	  gcc_assert (init_a <= init_b);
-
-	  /* If init_b == init_a + the size of the type * k, we have an
-	     interleaving, and DRA is accessed before DRB.  */
-	  HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
-	  if (type_size_a == 0
-	      || (init_b - init_a) % type_size_a != 0)
-	    break;
-
-	  /* If we have a store, the accesses are adjacent.  This splits
-	     groups into chunks we support (we don't support vectorization
-	     of stores with gaps).  */
-	  if (!DR_IS_READ (dra)
-	      && (init_b - (HOST_WIDE_INT) TREE_INT_CST_LOW
-					     (DR_INIT (datarefs_copy[i-1]))
-		  != type_size_a))
-	    break;
-
-	  /* If the step (if not zero or non-constant) is greater than the
-	     difference between data-refs' inits this splits groups into
-	     suitable sizes.  */
-	  if (tree_fits_shwi_p (DR_STEP (dra)))
+	  HOST_WIDE_INT init_prev
+	    = TREE_INT_CST_LOW (DR_INIT (datarefs_copy[i-1]));
+	  gcc_assert (init_a <= init_b
+		      && init_a <= init_prev
+		      && init_prev <= init_b);
+
+	  /* Do not place the same access in the interleaving chain twice.  */
+	  if (init_b == init_prev)
+	    {
+	      gcc_assert (gimple_uid (DR_STMT (datarefs_copy[i-1]))
+			  < gimple_uid (DR_STMT (drb)));
+	      /* Simply link in duplicates and fix up the chain below.  */
+	    }
+	  else
 	    {
-	      HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
-	      if (step != 0 && step <= (init_b - init_a))
+	      /* If init_b == init_a + the size of the type * k, we have an
+		 interleaving, and DRA is accessed before DRB.  */
+	      HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
+	      if (type_size_a == 0
+		  || (init_b - init_a) % type_size_a != 0)
 		break;
+
+	      /* If we have a store, the accesses are adjacent.  This splits
+		 groups into chunks we support (we don't support vectorization
+		 of stores with gaps).  */
+	      if (!DR_IS_READ (dra) && init_b - init_prev != type_size_a)
+		break;
+
+	      /* If the step (if not zero or non-constant) is greater than the
+		 difference between data-refs' inits this splits groups into
+		 suitable sizes.  */
+	      if (tree_fits_shwi_p (DR_STEP (dra)))
+		{
+		  HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
+		  if (step != 0 && step <= (init_b - init_a))
+		    break;
+		}
 	    }
 
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "Detected interleaving ");
-	      if (DR_IS_READ (dra))
-		dump_printf (MSG_NOTE, "load ");
-	      else
-		dump_printf (MSG_NOTE, "store ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
-	      dump_printf (MSG_NOTE,  " and ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
-	      dump_printf (MSG_NOTE, "\n");
-	    }
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     DR_IS_READ (dra)
+			     ? "Detected interleaving load %T and %T\n"
+			     : "Detected interleaving store %T and %T\n",
+			     DR_REF (dra), DR_REF (drb));
 
 	  /* Link the found element into the group list.  */
-	  if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
+	  if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
 	    {
-	      GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
+	      DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = stmtinfo_a;
 	      lastinfo = stmtinfo_a;
 	    }
-	  GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
-	  GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
+	  DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = stmtinfo_a;
+	  DR_GROUP_NEXT_ELEMENT (lastinfo) = stmtinfo_b;
 	  lastinfo = stmtinfo_b;
+
+	  if (init_b == init_prev
+	      && !to_fixup.add (DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
+	      && dump_enabled_p ())
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "Queuing group with duplicate access for fixup\n");
 	}
     }
 
+  /* Fixup groups with duplicate entries by splitting it.  */
+  while (1)
+    {
+      hash_set<stmt_vec_info>::iterator it = to_fixup.begin ();
+      if (!(it != to_fixup.end ()))
+	break;
+      stmt_vec_info grp = *it;
+      to_fixup.remove (grp);
+
+      /* Find the earliest duplicate group member.  */
+      unsigned first_duplicate = -1u;
+      stmt_vec_info next, g = grp;
+      while ((next = DR_GROUP_NEXT_ELEMENT (g)))
+	{
+	  if ((DR_INIT (STMT_VINFO_DR_INFO (next)->dr)
+	       == DR_INIT (STMT_VINFO_DR_INFO (g)->dr))
+	      && gimple_uid (STMT_VINFO_STMT (next)) < first_duplicate)
+	    first_duplicate = gimple_uid (STMT_VINFO_STMT (next));
+	  g = next;
+	}
+      if (first_duplicate == -1U)
+	continue;
+
+      /* Then move all stmts after the first duplicate to a new group.
+         Note this is a heuristic but one with the property that *it
+	 is fixed up completely.  */
+      g = grp;
+      stmt_vec_info newgroup = NULL, ng;
+      while ((next = DR_GROUP_NEXT_ELEMENT (g)))
+	{
+	  if (gimple_uid (STMT_VINFO_STMT (next)) >= first_duplicate)
+	    {
+	      DR_GROUP_NEXT_ELEMENT (g) = DR_GROUP_NEXT_ELEMENT (next);
+	      if (!newgroup)
+		newgroup = next;
+	      else
+		DR_GROUP_NEXT_ELEMENT (ng) = next;
+	      ng = next;
+	      DR_GROUP_FIRST_ELEMENT (ng) = newgroup;
+	    }
+	  else
+	    g = DR_GROUP_NEXT_ELEMENT (g);
+	}
+      DR_GROUP_NEXT_ELEMENT (ng) = NULL;
+
+      /* Fixup the new group which still may contain duplicates.  */
+      to_fixup.add (newgroup);
+    }
+
   FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
-    if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) 
-        && !vect_analyze_data_ref_access (dr))
-      {
-	if (dump_enabled_p ())
-	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                   "not vectorized: complicated access pattern.\n");
-
-        if (is_a <bb_vec_info> (vinfo))
-          {
-            /* Mark the statement as not vectorizable.  */
-            STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
-            continue;
-          }
-        else
-	  {
-	    datarefs_copy.release ();
-	    return false;
-	  }
-      }
+    {
+      dr_vec_info *dr_info = vinfo->lookup_dr (dr);
+      if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)
+	  && !vect_analyze_data_ref_access (dr_info))
+	{
+	  if (dump_enabled_p ())
+	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			     "not vectorized: complicated access pattern.\n");
+
+	  if (is_a <bb_vec_info> (vinfo))
+	    {
+	      /* Mark the statement as not vectorizable.  */
+	      STMT_VINFO_VECTORIZABLE (dr_info->stmt) = false;
+	      continue;
+	    }
+	  else
+	    {
+	      datarefs_copy.release ();
+	      return opt_result::failure_at (dr_info->stmt->stmt,
+					     "not vectorized:"
+					     " complicated access pattern.\n");
+	    }
+	}
+    }
 
   datarefs_copy.release ();
-  return true;
+  return opt_result::success ();
 }
 
 /* Function vect_vfa_segment_size.
 
-   Create an expression that computes the size of segment
-   that will be accessed for a data reference.  The functions takes into
-   account that realignment loads may access one more vector.
-
    Input:
-     DR: The data reference.
+     DR_INFO: The data reference.
      LENGTH_FACTOR: segment length to consider.
 
-   Return an expression whose value is the size of segment which will be
-   accessed by DR.  */
+   Return a value suitable for the dr_with_seg_len::seg_len field.
+   This is the "distance travelled" by the pointer from the first
+   iteration in the segment to the last.  Note that it does not include
+   the size of the access; in effect it only describes the first byte.  */
 
 static tree
-vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
+vect_vfa_segment_size (dr_vec_info *dr_info, tree length_factor)
+{
+  length_factor = size_binop (MINUS_EXPR,
+			      fold_convert (sizetype, length_factor),
+			      size_one_node);
+  return size_binop (MULT_EXPR, fold_convert (sizetype, DR_STEP (dr_info->dr)),
+		     length_factor);
+}
+
+/* Return a value that, when added to abs (vect_vfa_segment_size (DR_INFO)),
+   gives the worst-case number of bytes covered by the segment.  */
+
+static unsigned HOST_WIDE_INT
+vect_vfa_access_size (dr_vec_info *dr_info)
 {
-  tree segment_length;
-
-  if (integer_zerop (DR_STEP (dr)))
-    segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
-  else
-    segment_length = size_binop (MULT_EXPR,
-				 fold_convert (sizetype, DR_STEP (dr)),
-				 fold_convert (sizetype, length_factor));
-
-  if (vect_supportable_dr_alignment (dr, false)
-	== dr_explicit_realign_optimized)
+  stmt_vec_info stmt_vinfo = dr_info->stmt;
+  tree ref_type = TREE_TYPE (DR_REF (dr_info->dr));
+  unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
+  unsigned HOST_WIDE_INT access_size = ref_size;
+  if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
+    {
+      gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == stmt_vinfo);
+      access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
+    }
+  if (STMT_VINFO_VEC_STMT (stmt_vinfo)
+      && (vect_supportable_dr_alignment (dr_info, false)
+	  == dr_explicit_realign_optimized))
     {
-      tree vector_size = TYPE_SIZE_UNIT
-			  (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
-
-      segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
+      /* We might access a full vector's worth.  */
+      tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
+      access_size += tree_to_uhwi (TYPE_SIZE_UNIT (vectype)) - ref_size;
     }
-  return segment_length;
+  return access_size;
+}
+
+/* Get the minimum alignment for all the scalar accesses that DR_INFO
+   describes.  */
+
+static unsigned int
+vect_vfa_align (dr_vec_info *dr_info)
+{
+  return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
 }
 
 /* Function vect_no_alias_p.
 
-   Given data references A and B with equal base and offset, the alias
-   relation can be decided at compilation time, return TRUE if they do
-   not alias to each other; return FALSE otherwise.  SEGMENT_LENGTH_A
-   and SEGMENT_LENGTH_B are the memory lengths accessed by A and B
-   respectively.  */
-
-static bool
-vect_no_alias_p (struct data_reference *a, struct data_reference *b,
-                 tree segment_length_a, tree segment_length_b)
+   Given data references A and B with equal base and offset, see whether
+   the alias relation can be decided at compilation time.  Return 1 if
+   it can and the references alias, 0 if it can and the references do
+   not alias, and -1 if we cannot decide at compile time.  SEGMENT_LENGTH_A,
+   SEGMENT_LENGTH_B, ACCESS_SIZE_A and ACCESS_SIZE_B are the equivalent
+   of dr_with_seg_len::{seg_len,access_size} for A and B.  */
+
+static int
+vect_compile_time_alias (dr_vec_info *a, dr_vec_info *b,
+			 tree segment_length_a, tree segment_length_b,
+			 unsigned HOST_WIDE_INT access_size_a,
+			 unsigned HOST_WIDE_INT access_size_b)
 {
-  gcc_assert (TREE_CODE (DR_INIT (a)) == INTEGER_CST
-	      && TREE_CODE (DR_INIT (b)) == INTEGER_CST);
-  if (tree_int_cst_equal (DR_INIT (a), DR_INIT (b)))
-    return false;
-
-  tree seg_a_min = DR_INIT (a);
-  tree seg_a_max = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_a_min),
-				seg_a_min, segment_length_a);
+  poly_offset_int offset_a = wi::to_poly_offset (DR_INIT (a->dr));
+  poly_offset_int offset_b = wi::to_poly_offset (DR_INIT (b->dr));
+  poly_uint64 const_length_a;
+  poly_uint64 const_length_b;
+
   /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT
      bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of
      [a, a+12) */
-  if (tree_int_cst_compare (DR_STEP (a), size_zero_node) < 0)
+  if (tree_int_cst_compare (DR_STEP (a->dr), size_zero_node) < 0)
     {
-      tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (a)));
-      seg_a_min = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_a_max),
-			       seg_a_max, unit_size);
-      seg_a_max = fold_build2 (PLUS_EXPR, TREE_TYPE (DR_INIT (a)),
-			       DR_INIT (a), unit_size);
+      const_length_a = (-wi::to_poly_wide (segment_length_a)).force_uhwi ();
+      offset_a = (offset_a + access_size_a) - const_length_a;
     }
-  tree seg_b_min = DR_INIT (b);
-  tree seg_b_max = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_b_min),
-				seg_b_min, segment_length_b);
-  if (tree_int_cst_compare (DR_STEP (b), size_zero_node) < 0)
+  else
+    const_length_a = tree_to_poly_uint64 (segment_length_a);
+  if (tree_int_cst_compare (DR_STEP (b->dr), size_zero_node) < 0)
     {
-      tree unit_size = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (b)));
-      seg_b_min = fold_build2 (PLUS_EXPR, TREE_TYPE (seg_b_max),
-			       seg_b_max, unit_size);
-      seg_b_max = fold_build2 (PLUS_EXPR, TREE_TYPE (DR_INIT (b)),
-			       DR_INIT (b), unit_size);
+      const_length_b = (-wi::to_poly_wide (segment_length_b)).force_uhwi ();
+      offset_b = (offset_b + access_size_b) - const_length_b;
     }
-
-  if (tree_int_cst_le (seg_a_max, seg_b_min)
-      || tree_int_cst_le (seg_b_max, seg_a_min))
-    return true;
-
-  return false;
+  else
+    const_length_b = tree_to_poly_uint64 (segment_length_b);
+
+  const_length_a += access_size_a;
+  const_length_b += access_size_b;
+
+  if (ranges_known_overlap_p (offset_a, const_length_a,
+			      offset_b, const_length_b))
+    return 1;
+
+  if (!ranges_maybe_overlap_p (offset_a, const_length_a,
+			       offset_b, const_length_b))
+    return 0;
+
+  return -1;
 }
 
 /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH
@@ -3024,7 +3166,7 @@
 
 static bool
 dependence_distance_ge_vf (data_dependence_relation *ddr,
-			   unsigned int loop_depth, unsigned HOST_WIDE_INT vf)
+			   unsigned int loop_depth, poly_uint64 vf)
 {
   if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
       || DDR_NUM_DIST_VECTS (ddr) == 0)
@@ -3040,20 +3182,122 @@
       HOST_WIDE_INT dist = dist_v[loop_depth];
       if (dist != 0
 	  && !(dist > 0 && DDR_REVERSED_P (ddr))
-	  && (unsigned HOST_WIDE_INT) abs_hwi (dist) < vf)
+	  && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
 	return false;
     }
 
   if (dump_enabled_p ())
+    dump_printf_loc (MSG_NOTE, vect_location,
+		     "dependence distance between %T and %T is >= VF\n",
+		     DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
+
+  return true;
+}
+
+/* Dump LOWER_BOUND using flags DUMP_KIND.  Dumps are known to be enabled.  */
+
+static void
+dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
+{
+  dump_printf (dump_kind, "%s (%T) >= ",
+	       lower_bound.unsigned_p ? "unsigned" : "abs",
+	       lower_bound.expr);
+  dump_dec (dump_kind, lower_bound.min_value);
+}
+
+/* Record that the vectorized loop requires the vec_lower_bound described
+   by EXPR, UNSIGNED_P and MIN_VALUE.  */
+
+static void
+vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
+			poly_uint64 min_value)
+{
+  vec<vec_lower_bound> lower_bounds = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo);
+  for (unsigned int i = 0; i < lower_bounds.length (); ++i)
+    if (operand_equal_p (lower_bounds[i].expr, expr, 0))
+      {
+	unsigned_p &= lower_bounds[i].unsigned_p;
+	min_value = upper_bound (lower_bounds[i].min_value, min_value);
+	if (lower_bounds[i].unsigned_p != unsigned_p
+	    || maybe_lt (lower_bounds[i].min_value, min_value))
+	  {
+	    lower_bounds[i].unsigned_p = unsigned_p;
+	    lower_bounds[i].min_value = min_value;
+	    if (dump_enabled_p ())
+	      {
+		dump_printf_loc (MSG_NOTE, vect_location,
+				 "updating run-time check to ");
+		dump_lower_bound (MSG_NOTE, lower_bounds[i]);
+		dump_printf (MSG_NOTE, "\n");
+	      }
+	  }
+	return;
+      }
+
+  vec_lower_bound lower_bound (expr, unsigned_p, min_value);
+  if (dump_enabled_p ())
     {
-      dump_printf_loc (MSG_NOTE, vect_location,
-		       "dependence distance between ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
-      dump_printf (MSG_NOTE,  " and ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
-      dump_printf (MSG_NOTE,  " is >= VF\n");
+      dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
+      dump_lower_bound (MSG_NOTE, lower_bound);
+      dump_printf (MSG_NOTE, "\n");
     }
-
+  LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
+}
+
+/* Return true if it's unlikely that the step of the vectorized form of DR_INFO
+   will span fewer than GAP bytes.  */
+
+static bool
+vect_small_gap_p (loop_vec_info loop_vinfo, dr_vec_info *dr_info,
+		  poly_int64 gap)
+{
+  stmt_vec_info stmt_info = dr_info->stmt;
+  HOST_WIDE_INT count
+    = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
+  if (DR_GROUP_FIRST_ELEMENT (stmt_info))
+    count *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
+  return (estimated_poly_value (gap)
+	  <= count * vect_get_scalar_dr_size (dr_info));
+}
+
+/* Return true if we know that there is no alias between DR_INFO_A and
+   DR_INFO_B when abs (DR_STEP (DR_INFO_A->dr)) >= N for some N.
+   When returning true, set *LOWER_BOUND_OUT to this N.  */
+
+static bool
+vectorizable_with_step_bound_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b,
+				poly_uint64 *lower_bound_out)
+{
+  /* Check that there is a constant gap of known sign between DR_A
+     and DR_B.  */
+  data_reference *dr_a = dr_info_a->dr;
+  data_reference *dr_b = dr_info_b->dr;
+  poly_int64 init_a, init_b;
+  if (!operand_equal_p (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b), 0)
+      || !operand_equal_p (DR_OFFSET (dr_a), DR_OFFSET (dr_b), 0)
+      || !operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)
+      || !poly_int_tree_p (DR_INIT (dr_a), &init_a)
+      || !poly_int_tree_p (DR_INIT (dr_b), &init_b)
+      || !ordered_p (init_a, init_b))
+    return false;
+
+  /* Sort DR_A and DR_B by the address they access.  */
+  if (maybe_lt (init_b, init_a))
+    {
+      std::swap (init_a, init_b);
+      std::swap (dr_info_a, dr_info_b);
+      std::swap (dr_a, dr_b);
+    }
+
+  /* If the two accesses could be dependent within a scalar iteration,
+     make sure that we'd retain their order.  */
+  if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_info_a), init_b)
+      && !vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
+    return false;
+
+  /* There is no alias if abs (DR_STEP) is greater than or equal to
+     the bytes spanned by the combination of the two accesses.  */
+  *lower_bound_out = init_b + vect_get_scalar_dr_size (dr_info_b) - init_a;
   return true;
 }
 
@@ -3064,7 +3308,7 @@
    Return FALSE if resulting list of ddrs is longer then allowed by
    PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE.  */
 
-bool
+opt_result
 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
 {
   typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash;
@@ -3075,19 +3319,30 @@
     = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
   vec<vec_object_pair> &check_unequal_addrs
     = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
-  int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
+  poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
   tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
 
   ddr_p ddr;
   unsigned int i;
   tree length_factor;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-                     "=== vect_prune_runtime_alias_test_list ===\n");
+  DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
+
+  /* Step values are irrelevant for aliasing if the number of vector
+     iterations is equal to the number of scalar iterations (which can
+     happen for fully-SLP loops).  */
+  bool ignore_step_p = known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), 1U);
+
+  if (!ignore_step_p)
+    {
+      /* Convert the checks for nonzero steps into bound tests.  */
+      tree value;
+      FOR_EACH_VEC_ELT (LOOP_VINFO_CHECK_NONZERO (loop_vinfo), i, value)
+	vect_check_lower_bound (loop_vinfo, value, true, 1);
+    }
 
   if (may_alias_ddrs.is_empty ())
-    return true;
+    return opt_result::success ();
 
   comp_alias_ddrs.create (may_alias_ddrs.length ());
 
@@ -3099,10 +3354,10 @@
   FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
     {
       int comp_res;
-      struct data_reference *dr_a, *dr_b;
-      gimple *dr_group_first_a, *dr_group_first_b;
+      poly_uint64 lower_bound;
       tree segment_length_a, segment_length_b;
-      gimple *stmt_a, *stmt_b;
+      unsigned HOST_WIDE_INT access_size_a, access_size_b;
+      unsigned int align_a, align_b;
 
       /* Ignore the alias if the VF we chose ended up being no greater
 	 than the dependence distance.  */
@@ -3115,69 +3370,150 @@
 	  if (!compared_objects.add (new_pair))
 	    {
 	      if (dump_enabled_p ())
-		{
-		  dump_printf_loc (MSG_NOTE, vect_location, "checking that ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.first);
-		  dump_printf (MSG_NOTE, " and ");
-		  dump_generic_expr (MSG_NOTE, TDF_SLIM, new_pair.second);
-		  dump_printf (MSG_NOTE, " have different addresses\n");
-		}
+		dump_printf_loc (MSG_NOTE, vect_location,
+				 "checking that %T and %T"
+				 " have different addresses\n",
+				 new_pair.first, new_pair.second);
 	      LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
 	    }
 	  continue;
 	}
 
-      dr_a = DDR_A (ddr);
-      stmt_a = DR_STMT (DDR_A (ddr));
-      dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
+      dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
+      stmt_vec_info stmt_info_a = dr_info_a->stmt;
+
+      dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
+      stmt_vec_info stmt_info_b = dr_info_b->stmt;
+
+      /* Skip the pair if inter-iteration dependencies are irrelevant
+	 and intra-iteration dependencies are guaranteed to be honored.  */
+      if (ignore_step_p
+	  && (vect_preserves_scalar_order_p (dr_info_a, dr_info_b)
+	      || vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
+						 &lower_bound)))
+	{
+	  if (dump_enabled_p ())
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "no need for alias check between "
+			     "%T and %T when VF is 1\n",
+			     DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
+	  continue;
+	}
+
+      /* See whether we can handle the alias using a bounds check on
+	 the step, and whether that's likely to be the best approach.
+	 (It might not be, for example, if the minimum step is much larger
+	 than the number of bytes handled by one vector iteration.)  */
+      if (!ignore_step_p
+	  && TREE_CODE (DR_STEP (dr_info_a->dr)) != INTEGER_CST
+	  && vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
+					     &lower_bound)
+	  && (vect_small_gap_p (loop_vinfo, dr_info_a, lower_bound)
+	      || vect_small_gap_p (loop_vinfo, dr_info_b, lower_bound)))
+	{
+	  bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr);
+	  if (dump_enabled_p ())
+	    {
+	      dump_printf_loc (MSG_NOTE, vect_location, "no alias between "
+			       "%T and %T when the step %T is outside ",
+			       DR_REF (dr_info_a->dr),
+			       DR_REF (dr_info_b->dr),
+			       DR_STEP (dr_info_a->dr));
+	      if (unsigned_p)
+		dump_printf (MSG_NOTE, "[0");
+	      else
+		{
+		  dump_printf (MSG_NOTE, "(");
+		  dump_dec (MSG_NOTE, poly_int64 (-lower_bound));
+		}
+	      dump_printf (MSG_NOTE, ", ");
+	      dump_dec (MSG_NOTE, lower_bound);
+	      dump_printf (MSG_NOTE, ")\n");
+	    }
+	  vect_check_lower_bound (loop_vinfo, DR_STEP (dr_info_a->dr),
+				  unsigned_p, lower_bound);
+	  continue;
+	}
+
+      stmt_vec_info dr_group_first_a = DR_GROUP_FIRST_ELEMENT (stmt_info_a);
       if (dr_group_first_a)
 	{
-	  stmt_a = dr_group_first_a;
-	  dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
+	  stmt_info_a = dr_group_first_a;
+	  dr_info_a = STMT_VINFO_DR_INFO (stmt_info_a);
 	}
 
-      dr_b = DDR_B (ddr);
-      stmt_b = DR_STMT (DDR_B (ddr));
-      dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
+      stmt_vec_info dr_group_first_b = DR_GROUP_FIRST_ELEMENT (stmt_info_b);
       if (dr_group_first_b)
 	{
-	  stmt_b = dr_group_first_b;
-	  dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
+	  stmt_info_b = dr_group_first_b;
+	  dr_info_b = STMT_VINFO_DR_INFO (stmt_info_b);
 	}
 
-      if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
-	length_factor = scalar_loop_iters;
+      if (ignore_step_p)
+	{
+	  segment_length_a = size_zero_node;
+	  segment_length_b = size_zero_node;
+	}
       else
-	length_factor = size_int (vect_factor);
-      segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
-      segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
-
-      comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
-					DR_BASE_ADDRESS (dr_b));
+	{
+	  if (!operand_equal_p (DR_STEP (dr_info_a->dr),
+				DR_STEP (dr_info_b->dr), 0))
+	    length_factor = scalar_loop_iters;
+	  else
+	    length_factor = size_int (vect_factor);
+	  segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
+	  segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
+	}
+      access_size_a = vect_vfa_access_size (dr_info_a);
+      access_size_b = vect_vfa_access_size (dr_info_b);
+      align_a = vect_vfa_align (dr_info_a);
+      align_b = vect_vfa_align (dr_info_b);
+
+      comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_info_a->dr),
+					DR_BASE_ADDRESS (dr_info_b->dr));
       if (comp_res == 0)
-	comp_res = data_ref_compare_tree (DR_OFFSET (dr_a),
-					  DR_OFFSET (dr_b));
-
-      /* Alias is known at compilation time.  */
+	comp_res = data_ref_compare_tree (DR_OFFSET (dr_info_a->dr),
+					  DR_OFFSET (dr_info_b->dr));
+
+      /* See whether the alias is known at compilation time.  */
       if (comp_res == 0
-	  && TREE_CODE (DR_STEP (dr_a)) == INTEGER_CST
-	  && TREE_CODE (DR_STEP (dr_b)) == INTEGER_CST
-	  && TREE_CODE (segment_length_a) == INTEGER_CST
-	  && TREE_CODE (segment_length_b) == INTEGER_CST)
+	  && TREE_CODE (DR_STEP (dr_info_a->dr)) == INTEGER_CST
+	  && TREE_CODE (DR_STEP (dr_info_b->dr)) == INTEGER_CST
+	  && poly_int_tree_p (segment_length_a)
+	  && poly_int_tree_p (segment_length_b))
 	{
-	  if (vect_no_alias_p (dr_a, dr_b, segment_length_a, segment_length_b))
+	  int res = vect_compile_time_alias (dr_info_a, dr_info_b,
+					     segment_length_a,
+					     segment_length_b,
+					     access_size_a,
+					     access_size_b);
+	  if (res >= 0 && dump_enabled_p ())
+	    {
+	      dump_printf_loc (MSG_NOTE, vect_location,
+			       "can tell at compile time that %T and %T",
+			       DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
+	      if (res == 0)
+		dump_printf (MSG_NOTE, " do not alias\n");
+	      else
+		dump_printf (MSG_NOTE, " alias\n");
+	    }
+
+	  if (res == 0)
 	    continue;
 
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "not vectorized: compilation time alias.\n");
-
-	  return false;
+	  if (res == 1)
+	    return opt_result::failure_at (stmt_info_b->stmt,
+					   "not vectorized:"
+					   " compilation time alias: %G%G",
+					   stmt_info_a->stmt,
+					   stmt_info_b->stmt);
 	}
 
       dr_with_seg_len_pair_t dr_with_seg_len_pair
-	  (dr_with_seg_len (dr_a, segment_length_a),
-	   dr_with_seg_len (dr_b, segment_length_b));
+	(dr_with_seg_len (dr_info_a->dr, segment_length_a,
+			  access_size_a, align_a),
+	 dr_with_seg_len (dr_info_b->dr, segment_length_b,
+			  access_size_b, align_b));
 
       /* Canonicalize pairs by sorting the two DR members.  */
       if (comp_res > 0)
@@ -3186,51 +3522,139 @@
       comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
     }
 
-  prune_runtime_alias_test_list (&comp_alias_ddrs,
-				 (unsigned HOST_WIDE_INT) vect_factor);
+  prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
 
   unsigned int count = (comp_alias_ddrs.length ()
 			+ check_unequal_addrs.length ());
+
   dump_printf_loc (MSG_NOTE, vect_location,
 		   "improved number of alias checks from %d to %d\n",
 		   may_alias_ddrs.length (), count);
   if ((int) count > PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
-    {
-      if (dump_enabled_p ())
-	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-			 "number of versioning for alias "
-			 "run-time tests exceeds %d "
-			 "(--param vect-max-version-for-alias-checks)\n",
-			 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
-      return false;
-    }
-
+    return opt_result::failure_at
+      (vect_location,
+       "number of versioning for alias "
+       "run-time tests exceeds %d "
+       "(--param vect-max-version-for-alias-checks)\n",
+       PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
+
+  return opt_result::success ();
+}
+
+/* Check whether we can use an internal function for a gather load
+   or scatter store.  READ_P is true for loads and false for stores.
+   MASKED_P is true if the load or store is conditional.  MEMORY_TYPE is
+   the type of the memory elements being loaded or stored.  OFFSET_BITS
+   is the number of bits in each scalar offset and OFFSET_SIGN is the
+   sign of the offset.  SCALE is the amount by which the offset should
+   be multiplied *after* it has been converted to address width.
+
+   Return true if the function is supported, storing the function
+   id in *IFN_OUT and the type of a vector element in *ELEMENT_TYPE_OUT.  */
+
+bool
+vect_gather_scatter_fn_p (bool read_p, bool masked_p, tree vectype,
+			  tree memory_type, unsigned int offset_bits,
+			  signop offset_sign, int scale,
+			  internal_fn *ifn_out, tree *element_type_out)
+{
+  unsigned int memory_bits = tree_to_uhwi (TYPE_SIZE (memory_type));
+  unsigned int element_bits = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype)));
+  if (offset_bits > element_bits)
+    /* Internal functions require the offset to be the same width as
+       the vector elements.  We can extend narrower offsets, but it isn't
+       safe to truncate wider offsets.  */
+    return false;
+
+  if (element_bits != memory_bits)
+    /* For now the vector elements must be the same width as the
+       memory elements.  */
+    return false;
+
+  /* Work out which function we need.  */
+  internal_fn ifn;
+  if (read_p)
+    ifn = masked_p ? IFN_MASK_GATHER_LOAD : IFN_GATHER_LOAD;
+  else
+    ifn = masked_p ? IFN_MASK_SCATTER_STORE : IFN_SCATTER_STORE;
+
+  /* Test whether the target supports this combination.  */
+  if (!internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type,
+					       offset_sign, scale))
+    return false;
+
+  *ifn_out = ifn;
+  *element_type_out = TREE_TYPE (vectype);
   return true;
 }
 
-/* Return true if a non-affine read or write in STMT is suitable for a
+/* STMT_INFO is a call to an internal gather load or scatter store function.
+   Describe the operation in INFO.  */
+
+static void
+vect_describe_gather_scatter_call (stmt_vec_info stmt_info,
+				   gather_scatter_info *info)
+{
+  gcall *call = as_a <gcall *> (stmt_info->stmt);
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+
+  info->ifn = gimple_call_internal_fn (call);
+  info->decl = NULL_TREE;
+  info->base = gimple_call_arg (call, 0);
+  info->offset = gimple_call_arg (call, 1);
+  info->offset_dt = vect_unknown_def_type;
+  info->offset_vectype = NULL_TREE;
+  info->scale = TREE_INT_CST_LOW (gimple_call_arg (call, 2));
+  info->element_type = TREE_TYPE (vectype);
+  info->memory_type = TREE_TYPE (DR_REF (dr));
+}
+
+/* Return true if a non-affine read or write in STMT_INFO is suitable for a
    gather load or scatter store.  Describe the operation in *INFO if so.  */
 
 bool
-vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
+vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
 			   gather_scatter_info *info)
 {
-  HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
+  HOST_WIDE_INT scale = 1;
+  poly_int64 pbitpos, pbitsize;
   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
   tree offtype = NULL_TREE;
-  tree decl, base, off;
+  tree decl = NULL_TREE, base, off;
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+  tree memory_type = TREE_TYPE (DR_REF (dr));
   machine_mode pmode;
   int punsignedp, reversep, pvolatilep = 0;
+  internal_fn ifn;
+  tree element_type;
+  bool masked_p = false;
+
+  /* See whether this is already a call to a gather/scatter internal function.
+     If not, see whether it's a masked load or store.  */
+  gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
+  if (call && gimple_call_internal_p (call))
+    {
+      ifn = gimple_call_internal_fn (call);
+      if (internal_gather_scatter_fn_p (ifn))
+	{
+	  vect_describe_gather_scatter_call (stmt_info, info);
+	  return true;
+	}
+      masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
+    }
+
+  /* True if we should aim to use internal functions rather than
+     built-in functions.  */
+  bool use_ifn_p = (DR_IS_READ (dr)
+		    ? supports_vec_gather_load_p ()
+		    : supports_vec_scatter_store_p ());
 
   base = DR_REF (dr);
   /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
      see if we can use the def stmt of the address.  */
-  if (is_gimple_call (stmt)
-      && gimple_call_internal_p (stmt)
-      && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
-	  || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
+  if (masked_p
       && TREE_CODE (base) == MEM_REF
       && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
       && integer_zerop (TREE_OPERAND (base, 1))
@@ -3256,17 +3680,17 @@
      that can be gimplified before the loop.  */
   base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode,
 			      &punsignedp, &reversep, &pvolatilep);
-  gcc_assert (base && (pbitpos % BITS_PER_UNIT) == 0 && !reversep);
+  if (reversep)
+    return false;
+
+  poly_int64 pbytepos = exact_div (pbitpos, BITS_PER_UNIT);
 
   if (TREE_CODE (base) == MEM_REF)
     {
       if (!integer_zerop (TREE_OPERAND (base, 1)))
 	{
 	  if (off == NULL_TREE)
-	    {
-	      offset_int moff = mem_ref_offset (base);
-	      off = wide_int_to_tree (sizetype, moff);
-	    }
+	    off = wide_int_to_tree (sizetype, mem_ref_offset (base));
 	  else
 	    off = size_binop (PLUS_EXPR, off,
 			      fold_convert (sizetype, TREE_OPERAND (base, 1)));
@@ -3289,14 +3713,14 @@
       if (!integer_zerop (off))
 	return false;
       off = base;
-      base = size_int (pbitpos / BITS_PER_UNIT);
+      base = size_int (pbytepos);
     }
   /* Otherwise put base + constant offset into the loop invariant BASE
      and continue with OFF.  */
   else
     {
       base = fold_convert (sizetype, base);
-      base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
+      base = size_binop (PLUS_EXPR, base, size_int (pbytepos));
     }
 
   /* OFF at this point may be either a SSA_NAME or some tree expression
@@ -3363,7 +3787,17 @@
 	case MULT_EXPR:
 	  if (scale == 1 && tree_fits_shwi_p (op1))
 	    {
-	      scale = tree_to_shwi (op1);
+	      int new_scale = tree_to_shwi (op1);
+	      /* Only treat this as a scaling operation if the target
+		 supports it.  */
+	      if (use_ifn_p
+		  && !vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p,
+						vectype, memory_type, 1,
+						TYPE_SIGN (TREE_TYPE (op0)),
+						new_scale, &ifn,
+						&element_type))
+		break;
+	      scale = new_scale;
 	      off = op0;
 	      continue;
 	    }
@@ -3381,6 +3815,15 @@
 	      off = op0;
 	      continue;
 	    }
+
+	  /* The internal functions need the offset to be the same width
+	     as the elements of VECTYPE.  Don't include operations that
+	     cast the offset from that width to a different width.  */
+	  if (use_ifn_p
+	      && (int_size_in_bytes (TREE_TYPE (vectype))
+		  == int_size_in_bytes (TREE_TYPE (off))))
+	    break;
+
 	  if (TYPE_PRECISION (TREE_TYPE (op0))
 	      < TYPE_PRECISION (TREE_TYPE (off)))
 	    {
@@ -3405,25 +3848,171 @@
   if (offtype == NULL_TREE)
     offtype = TREE_TYPE (off);
 
-  if (DR_IS_READ (dr))
-    decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
-					     offtype, scale);
+  if (use_ifn_p)
+    {
+      if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
+				     memory_type, TYPE_PRECISION (offtype),
+				     TYPE_SIGN (offtype), scale, &ifn,
+				     &element_type))
+	return false;
+    }
   else
-    decl = targetm.vectorize.builtin_scatter (STMT_VINFO_VECTYPE (stmt_info),
-					      offtype, scale);
-
-  if (decl == NULL_TREE)
-    return false;
-
+    {
+      if (DR_IS_READ (dr))
+	{
+	  if (targetm.vectorize.builtin_gather)
+	    decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
+	}
+      else
+	{
+	  if (targetm.vectorize.builtin_scatter)
+	    decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
+	}
+
+      if (!decl)
+	return false;
+
+      ifn = IFN_LAST;
+      element_type = TREE_TYPE (vectype);
+    }
+
+  info->ifn = ifn;
   info->decl = decl;
   info->base = base;
   info->offset = off;
   info->offset_dt = vect_unknown_def_type;
   info->offset_vectype = NULL_TREE;
   info->scale = scale;
+  info->element_type = element_type;
+  info->memory_type = memory_type;
   return true;
 }
 
+/* Find the data references in STMT, analyze them with respect to LOOP and
+   append them to DATAREFS.  Return false if datarefs in this stmt cannot
+   be handled.  */
+
+opt_result
+vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
+			       vec<data_reference_p> *datarefs)
+{
+  /* We can ignore clobbers for dataref analysis - they are removed during
+     loop vectorization and BB vectorization checks dependences with a
+     stmt walk.  */
+  if (gimple_clobber_p (stmt))
+    return opt_result::success ();
+
+  if (gimple_has_volatile_ops (stmt))
+    return opt_result::failure_at (stmt, "not vectorized: volatile type: %G",
+				   stmt);
+
+  if (stmt_can_throw_internal (cfun, stmt))
+    return opt_result::failure_at (stmt,
+				   "not vectorized:"
+				   " statement can throw an exception: %G",
+				   stmt);
+
+  auto_vec<data_reference_p, 2> refs;
+  opt_result res = find_data_references_in_stmt (loop, stmt, &refs);
+  if (!res)
+    return res;
+
+  if (refs.is_empty ())
+    return opt_result::success ();
+
+  if (refs.length () > 1)
+    return opt_result::failure_at (stmt,
+				   "not vectorized:"
+				   " more than one data ref in stmt: %G", stmt);
+
+  if (gcall *call = dyn_cast <gcall *> (stmt))
+    if (!gimple_call_internal_p (call)
+	|| (gimple_call_internal_fn (call) != IFN_MASK_LOAD
+	    && gimple_call_internal_fn (call) != IFN_MASK_STORE))
+      return opt_result::failure_at (stmt,
+				     "not vectorized: dr in a call %G", stmt);
+
+  data_reference_p dr = refs.pop ();
+  if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
+      && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
+    return opt_result::failure_at (stmt,
+				   "not vectorized:"
+				   " statement is bitfield access %G", stmt);
+
+  if (DR_BASE_ADDRESS (dr)
+      && TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
+    return opt_result::failure_at (stmt,
+				   "not vectorized:"
+				   " base addr of dr is a constant\n");
+
+  /* Check whether this may be a SIMD lane access and adjust the
+     DR to make it easier for us to handle it.  */
+  if (loop
+      && loop->simduid
+      && (!DR_BASE_ADDRESS (dr)
+	  || !DR_OFFSET (dr)
+	  || !DR_INIT (dr)
+	  || !DR_STEP (dr)))
+    {
+      struct data_reference *newdr
+	= create_data_ref (NULL, loop_containing_stmt (stmt), DR_REF (dr), stmt,
+			   DR_IS_READ (dr), DR_IS_CONDITIONAL_IN_STMT (dr));
+      if (DR_BASE_ADDRESS (newdr)
+	  && DR_OFFSET (newdr)
+	  && DR_INIT (newdr)
+	  && DR_STEP (newdr)
+	  && integer_zerop (DR_STEP (newdr)))
+	{
+	  tree off = DR_OFFSET (newdr);
+	  STRIP_NOPS (off);
+	  if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
+	      && TREE_CODE (off) == MULT_EXPR
+	      && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
+	    {
+	      tree step = TREE_OPERAND (off, 1);
+	      off = TREE_OPERAND (off, 0);
+	      STRIP_NOPS (off);
+	      if (CONVERT_EXPR_P (off)
+		  && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, 0)))
+		      < TYPE_PRECISION (TREE_TYPE (off))))
+		off = TREE_OPERAND (off, 0);
+	      if (TREE_CODE (off) == SSA_NAME)
+		{
+		  gimple *def = SSA_NAME_DEF_STMT (off);
+		  tree reft = TREE_TYPE (DR_REF (newdr));
+		  if (is_gimple_call (def)
+		      && gimple_call_internal_p (def)
+		      && (gimple_call_internal_fn (def) == IFN_GOMP_SIMD_LANE))
+		    {
+		      tree arg = gimple_call_arg (def, 0);
+		      gcc_assert (TREE_CODE (arg) == SSA_NAME);
+		      arg = SSA_NAME_VAR (arg);
+		      if (arg == loop->simduid
+			  /* For now.  */
+			  && tree_int_cst_equal (TYPE_SIZE_UNIT (reft), step))
+			{
+			  DR_OFFSET (newdr) = ssize_int (0);
+			  DR_STEP (newdr) = step;
+			  DR_OFFSET_ALIGNMENT (newdr) = BIGGEST_ALIGNMENT;
+			  DR_STEP_ALIGNMENT (newdr)
+			    = highest_pow2_factor (step);
+			  /* Mark as simd-lane access.  */
+			  newdr->aux = (void *)-1;
+			  free_data_ref (dr);
+			  datarefs->safe_push (newdr);
+			  return opt_result::success ();
+			}
+		    }
+		}
+	    }
+	}
+      free_data_ref (newdr);
+    }
+
+  datarefs->safe_push (dr);
+  return opt_result::success ();
+}
+
 /* Function vect_analyze_data_refs.
 
   Find all the data references in the loop or basic block.
@@ -3439,17 +4028,15 @@
 
 */
 
-bool
-vect_analyze_data_refs (vec_info *vinfo, int *min_vf)
+opt_result
+vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf)
 {
   struct loop *loop = NULL;
   unsigned int i;
   struct data_reference *dr;
   tree scalar_type;
 
-  if (dump_enabled_p ())
-    dump_printf_loc (MSG_NOTE, vect_location,
-		     "=== vect_analyze_data_refs ===\n");
+  DUMP_VECT_SCOPE ("vect_analyze_data_refs");
 
   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
     loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -3457,42 +4044,17 @@
   /* Go through the data-refs, check that the analysis succeeded.  Update
      pointer from stmt_vec_info struct to DR and vectype.  */
 
-  vec<data_reference_p> datarefs = vinfo->datarefs;
+  vec<data_reference_p> datarefs = vinfo->shared->datarefs;
   FOR_EACH_VEC_ELT (datarefs, i, dr)
     {
-      gimple *stmt;
-      stmt_vec_info stmt_info;
-      tree base, offset, init;
       enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
-      bool simd_lane_access = false;
-      int vf;
-
-again:
-      if (!dr || !DR_REF (dr))
-        {
-          if (dump_enabled_p ())
-	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-	                     "not vectorized: unhandled data-ref\n");
-          return false;
-        }
-
-      stmt = DR_STMT (dr);
-      stmt_info = vinfo_for_stmt (stmt);
-
-      /* Discard clobbers from the dataref vector.  We will remove
-         clobber stmts during vectorization.  */
-      if (gimple_clobber_p (stmt))
-	{
-	  free_data_ref (dr);
-	  if (i == datarefs.length () - 1)
-	    {
-	      datarefs.pop ();
-	      break;
-	    }
-	  datarefs.ordered_remove (i);
-	  dr = datarefs[i];
-	  goto again;
-	}
+      poly_uint64 vf;
+
+      gcc_assert (DR_REF (dr));
+      stmt_vec_info stmt_info = vinfo->lookup_stmt (DR_STMT (dr));
+      gcc_assert (!stmt_info->dr_aux.dr);
+      stmt_info->dr_aux.dr = dr;
+      stmt_info->dr_aux.stmt = stmt_info;
 
       /* Check that analysis of the data-ref succeeded.  */
       if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
@@ -3501,197 +4063,89 @@
 	  bool maybe_gather
 	    = DR_IS_READ (dr)
 	      && !TREE_THIS_VOLATILE (DR_REF (dr))
-	      && targetm.vectorize.builtin_gather != NULL;
+	      && (targetm.vectorize.builtin_gather != NULL
+		  || supports_vec_gather_load_p ());
 	  bool maybe_scatter
 	    = DR_IS_WRITE (dr)
 	      && !TREE_THIS_VOLATILE (DR_REF (dr))
-	      && targetm.vectorize.builtin_scatter != NULL;
-	  bool maybe_simd_lane_access
-	    = is_a <loop_vec_info> (vinfo) && loop->simduid;
-
-	  /* If target supports vector gather loads or scatter stores, or if
-	     this might be a SIMD lane access, see if they can't be used.  */
+	      && (targetm.vectorize.builtin_scatter != NULL
+		  || supports_vec_scatter_store_p ());
+
+	  /* If target supports vector gather loads or scatter stores,
+	     see if they can't be used.  */
 	  if (is_a <loop_vec_info> (vinfo)
-	      && (maybe_gather || maybe_scatter || maybe_simd_lane_access)
-	      && !nested_in_vect_loop_p (loop, stmt))
+	      && !nested_in_vect_loop_p (loop, stmt_info))
 	    {
-	      struct data_reference *newdr
-		= create_data_ref (NULL, loop_containing_stmt (stmt),
-				   DR_REF (dr), stmt, !maybe_scatter,
-				   DR_IS_CONDITIONAL_IN_STMT (dr));
-	      gcc_assert (newdr != NULL && DR_REF (newdr));
-	      if (DR_BASE_ADDRESS (newdr)
-		  && DR_OFFSET (newdr)
-		  && DR_INIT (newdr)
-		  && DR_STEP (newdr)
-		  && integer_zerop (DR_STEP (newdr)))
+	      if (maybe_gather || maybe_scatter)
 		{
-		  if (maybe_simd_lane_access)
-		    {
-		      tree off = DR_OFFSET (newdr);
-		      STRIP_NOPS (off);
-		      if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
-			  && TREE_CODE (off) == MULT_EXPR
-			  && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
-			{
-			  tree step = TREE_OPERAND (off, 1);
-			  off = TREE_OPERAND (off, 0);
-			  STRIP_NOPS (off);
-			  if (CONVERT_EXPR_P (off)
-			      && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
-									  0)))
-				 < TYPE_PRECISION (TREE_TYPE (off)))
-			    off = TREE_OPERAND (off, 0);
-			  if (TREE_CODE (off) == SSA_NAME)
-			    {
-			      gimple *def = SSA_NAME_DEF_STMT (off);
-			      tree reft = TREE_TYPE (DR_REF (newdr));
-			      if (is_gimple_call (def)
-				  && gimple_call_internal_p (def)
-				  && (gimple_call_internal_fn (def)
-				      == IFN_GOMP_SIMD_LANE))
-				{
-				  tree arg = gimple_call_arg (def, 0);
-				  gcc_assert (TREE_CODE (arg) == SSA_NAME);
-				  arg = SSA_NAME_VAR (arg);
-				  if (arg == loop->simduid
-				      /* For now.  */
-				      && tree_int_cst_equal
-					   (TYPE_SIZE_UNIT (reft),
-					    step))
-				    {
-				      DR_OFFSET (newdr) = ssize_int (0);
-				      DR_STEP (newdr) = step;
-				      DR_OFFSET_ALIGNMENT (newdr)
-					= BIGGEST_ALIGNMENT;
-				      DR_STEP_ALIGNMENT (newdr)
-					= highest_pow2_factor (step);
-				      dr = newdr;
-				      simd_lane_access = true;
-				    }
-				}
-			    }
-			}
-		    }
-		  if (!simd_lane_access && (maybe_gather || maybe_scatter))
-		    {
-		      dr = newdr;
-		      if (maybe_gather)
-			gatherscatter = GATHER;
-		      else
-			gatherscatter = SCATTER;
-		    }
+		  if (maybe_gather)
+		    gatherscatter = GATHER;
+		  else
+		    gatherscatter = SCATTER;
 		}
-	      if (gatherscatter == SG_NONE && !simd_lane_access)
-		free_data_ref (newdr);
 	    }
 
-	  if (gatherscatter == SG_NONE && !simd_lane_access)
+	  if (gatherscatter == SG_NONE)
 	    {
 	      if (dump_enabled_p ())
+		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+				 "not vectorized: data ref analysis "
+				 "failed %G", stmt_info->stmt);
+	      if (is_a <bb_vec_info> (vinfo))
 		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                                   "not vectorized: data ref analysis "
-                                   "failed ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+		  /* In BB vectorization the ref can still participate
+		     in dependence analysis, we just can't vectorize it.  */
+		  STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+		  continue;
 		}
-
-	      if (is_a <bb_vec_info> (vinfo))
-		break;
-
-	      return false;
+	      return opt_result::failure_at (stmt_info->stmt,
+					     "not vectorized:"
+					     " data ref analysis failed: %G",
+					     stmt_info->stmt);
 	    }
         }
 
-      if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
-        {
-          if (dump_enabled_p ())
-            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                             "not vectorized: base addr of dr is a "
-                             "constant\n");
-
-          if (is_a <bb_vec_info> (vinfo))
-	    break;
-
-	  if (gatherscatter != SG_NONE || simd_lane_access)
-	    free_data_ref (dr);
-	  return false;
-        }
-
-      if (TREE_THIS_VOLATILE (DR_REF (dr)))
-        {
-          if (dump_enabled_p ())
-            {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: volatile type ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-            }
-
-          if (is_a <bb_vec_info> (vinfo))
-	    break;
-
-          return false;
-        }
-
-      if (stmt_can_throw_internal (stmt))
-        {
-          if (dump_enabled_p ())
-            {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: statement can throw an "
-                               "exception ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-            }
-
-          if (is_a <bb_vec_info> (vinfo))
-	    break;
-
-	  if (gatherscatter != SG_NONE || simd_lane_access)
-	    free_data_ref (dr);
-          return false;
-        }
-
-      if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
-	  && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
+      /* See if this was detected as SIMD lane access.  */
+      if (dr->aux == (void *)-1)
+	{
+	  if (nested_in_vect_loop_p (loop, stmt_info))
+	    return opt_result::failure_at (stmt_info->stmt,
+					   "not vectorized:"
+					   " data ref analysis failed: %G",
+					   stmt_info->stmt);
+	  STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
+	}
+
+      tree base = get_base_address (DR_REF (dr));
+      if (base && VAR_P (base) && DECL_NONALIASED (base))
 	{
           if (dump_enabled_p ())
-            {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: statement is bitfield "
-                               "access ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-            }
-
+	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+			     "not vectorized: base object not addressable "
+			     "for stmt: %G", stmt_info->stmt);
           if (is_a <bb_vec_info> (vinfo))
-	    break;
-
-	  if (gatherscatter != SG_NONE || simd_lane_access)
-	    free_data_ref (dr);
-          return false;
+	    {
+	      /* In BB vectorization the ref can still participate
+	         in dependence analysis, we just can't vectorize it.  */
+	      STMT_VINFO_VECTORIZABLE (stmt_info) = false;
+	      continue;
+	    }
+	  return opt_result::failure_at (stmt_info->stmt,
+					 "not vectorized: base object not"
+					 " addressable for stmt: %G",
+					 stmt_info->stmt);
 	}
 
-      base = unshare_expr (DR_BASE_ADDRESS (dr));
-      offset = unshare_expr (DR_OFFSET (dr));
-      init = unshare_expr (DR_INIT (dr));
-
-      if (is_gimple_call (stmt)
-	  && (!gimple_call_internal_p (stmt)
-	      || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
-		  && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
+      if (is_a <loop_vec_info> (vinfo)
+	  && DR_STEP (dr)
+	  && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
 	{
-	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_MISSED_OPTIMIZATION,  vect_location,
-	                       "not vectorized: dr in a call ");
-	      dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-	    }
-
-	  if (is_a <bb_vec_info> (vinfo))
-	    break;
-
-	  if (gatherscatter != SG_NONE || simd_lane_access)
-	    free_data_ref (dr);
-	  return false;
+	  if (nested_in_vect_loop_p (loop, stmt_info))
+	    return opt_result::failure_at (stmt_info->stmt,
+					   "not vectorized:"
+					   "not suitable for strided load %G",
+					   stmt_info->stmt);
+	  STMT_VINFO_STRIDED_P (stmt_info) = true;
 	}
 
       /* Update DR field in stmt_vec_info struct.  */
@@ -3702,102 +4156,49 @@
 	 inner-most enclosing loop).  We do that by building a reference to the
 	 first location accessed by the inner-loop, and analyze it relative to
 	 the outer-loop.  */
-      if (loop && nested_in_vect_loop_p (loop, stmt))
+      if (loop && nested_in_vect_loop_p (loop, stmt_info))
 	{
 	  /* Build a reference to the first location accessed by the
 	     inner loop: *(BASE + INIT + OFFSET).  By construction,
 	     this address must be invariant in the inner loop, so we
 	     can consider it as being used in the outer loop.  */
+	  tree base = unshare_expr (DR_BASE_ADDRESS (dr));
+	  tree offset = unshare_expr (DR_OFFSET (dr));
+	  tree init = unshare_expr (DR_INIT (dr));
 	  tree init_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset),
 					  init, offset);
 	  tree init_addr = fold_build_pointer_plus (base, init_offset);
 	  tree init_ref = build_fold_indirect_ref (init_addr);
 
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "analyze in outer loop: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM, init_ref);
-	      dump_printf (MSG_NOTE, "\n");
-	    }
-
-	  if (!dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
-				     init_ref, loop))
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "analyze in outer loop: %T\n", init_ref);
+
+	  opt_result res
+	    = dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
+				    init_ref, loop, stmt_info->stmt);
+	  if (!res)
 	    /* dr_analyze_innermost already explained the failure.  */
-	    return false;
+	    return res;
 
           if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-                               "\touter base_address: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_OFFSET (stmt_info));
-	      dump_printf (MSG_NOTE,
-                           "\n\touter constant offset from base address: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_INIT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter step: ");
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-                                 STMT_VINFO_DR_STEP (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter base alignment: %d\n",
-			   STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter base misalignment: %d\n",
-			   STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter offset alignment: %d\n",
-			   STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info));
-	      dump_printf (MSG_NOTE, "\n\touter step alignment: %d\n",
-			   STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
-	    }
-	}
-
-      if (STMT_VINFO_DATA_REF (stmt_info))
-        {
-          if (dump_enabled_p ())
-            {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: more than one data ref "
-                               "in stmt: ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-            }
-
-          if (is_a <bb_vec_info> (vinfo))
-	    break;
-
-	  if (gatherscatter != SG_NONE || simd_lane_access)
-	    free_data_ref (dr);
-          return false;
-        }
-
-      STMT_VINFO_DATA_REF (stmt_info) = dr;
-      if (simd_lane_access)
-	{
-	  STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
-	  free_data_ref (datarefs[i]);
-	  datarefs[i] = dr;
-	}
-
-      if (TREE_CODE (DR_BASE_ADDRESS (dr)) == ADDR_EXPR
-	  && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0))
-	  && DECL_NONALIASED (TREE_OPERAND (DR_BASE_ADDRESS (dr), 0)))
-	{
-          if (dump_enabled_p ())
-            {
-              dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: base object not addressable "
-			       "for stmt: ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-            }
-          if (is_a <bb_vec_info> (vinfo))
-	    {
-	      /* In BB vectorization the ref can still participate
-	         in dependence analysis, we just can't vectorize it.  */
-	      STMT_VINFO_VECTORIZABLE (stmt_info) = false;
-	      continue;
-	    }
-	  return false;
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "\touter base_address: %T\n"
+			     "\touter offset from base address: %T\n"
+			     "\touter constant offset from base address: %T\n"
+			     "\touter step: %T\n"
+			     "\touter base alignment: %d\n\n"
+			     "\touter base misalignment: %d\n"
+			     "\touter offset alignment: %d\n"
+			     "\touter step alignment: %d\n",
+			     STMT_VINFO_DR_BASE_ADDRESS (stmt_info),
+			     STMT_VINFO_DR_OFFSET (stmt_info),
+			     STMT_VINFO_DR_INIT (stmt_info),
+			     STMT_VINFO_DR_STEP (stmt_info),
+			     STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info),
+			     STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info),
+			     STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info),
+			     STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
 	}
 
       /* Set vectype for STMT.  */
@@ -3809,8 +4210,8 @@
           if (dump_enabled_p ())
             {
               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-                               "not vectorized: no vectype for stmt: ");
-              dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
+                               "not vectorized: no vectype for stmt: %G",
+			       stmt_info->stmt);
               dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
               dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
                                  scalar_type);
@@ -3824,96 +4225,47 @@
 	      STMT_VINFO_VECTORIZABLE (stmt_info) = false;
 	      continue;
 	    }
-
-	  if (gatherscatter != SG_NONE || simd_lane_access)
-	    {
-	      STMT_VINFO_DATA_REF (stmt_info) = NULL;
-	      if (gatherscatter != SG_NONE)
-		free_data_ref (dr);
-	    }
-	  return false;
+	  return opt_result::failure_at (stmt_info->stmt,
+					 "not vectorized:"
+					 " no vectype for stmt: %G"
+					 " scalar_type: %T\n",
+					 stmt_info->stmt, scalar_type);
         }
       else
 	{
 	  if (dump_enabled_p ())
-	    {
-	      dump_printf_loc (MSG_NOTE, vect_location,
-			       "got vectype for stmt: ");
-	      dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
-	      dump_generic_expr (MSG_NOTE, TDF_SLIM,
-				 STMT_VINFO_VECTYPE (stmt_info));
-	      dump_printf (MSG_NOTE, "\n");
-	    }
+	    dump_printf_loc (MSG_NOTE, vect_location,
+			     "got vectype for stmt: %G%T\n",
+			     stmt_info->stmt, STMT_VINFO_VECTYPE (stmt_info));
 	}
 
       /* Adjust the minimal vectorization factor according to the
 	 vector type.  */
       vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
-      if (vf > *min_vf)
-	*min_vf = vf;
+      *min_vf = upper_bound (*min_vf, vf);
 
       if (gatherscatter != SG_NONE)
 	{
 	  gather_scatter_info gs_info;
-	  if (!vect_check_gather_scatter (stmt, as_a <loop_vec_info> (vinfo),
+	  if (!vect_check_gather_scatter (stmt_info,
+					  as_a <loop_vec_info> (vinfo),
 					  &gs_info)
 	      || !get_vectype_for_scalar_type (TREE_TYPE (gs_info.offset)))
-	    {
-	      STMT_VINFO_DATA_REF (stmt_info) = NULL;
-	      free_data_ref (dr);
-	      if (dump_enabled_p ())
-		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
-				   (gatherscatter == GATHER) ?
-				   "not vectorized: not suitable for gather "
-				   "load " :
-				   "not vectorized: not suitable for scatter "
-				   "store ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-		}
-	      return false;
-	    }
-
-	  free_data_ref (datarefs[i]);
-	  datarefs[i] = dr;
+	    return opt_result::failure_at
+	      (stmt_info->stmt,
+	       (gatherscatter == GATHER) ?
+	       "not vectorized: not suitable for gather load %G" :
+	       "not vectorized: not suitable for scatter store %G",
+	       stmt_info->stmt);
 	  STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
 	}
-
-      else if (is_a <loop_vec_info> (vinfo)
-	       && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
-	{
-	  if (nested_in_vect_loop_p (loop, stmt))
-	    {
-	      if (dump_enabled_p ())
-		{
-		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
-                                   "not vectorized: not suitable for strided "
-                                   "load ");
-		  dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
-		}
-	      return false;
-	    }
-	  STMT_VINFO_STRIDED_P (stmt_info) = true;
-	}
     }
 
-  /* If we stopped analysis at the first dataref we could not analyze
-     when trying to vectorize a basic-block mark the rest of the datarefs
-     as not vectorizable and truncate the vector of datarefs.  That
-     avoids spending useless time in analyzing their dependence.  */
-  if (i != datarefs.length ())
-    {
-      gcc_assert (is_a <bb_vec_info> (vinfo));
-      for (unsigned j = i; j < datarefs.length (); ++j)
-	{
-	  data_reference_p dr = datarefs[j];
-          STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
-	  free_data_ref (dr);
-	}
-      datarefs.truncate (i);
-    }
-
-  return true;
+  /* We used to stop processing and prune the list here.  Verify we no
+     longer need to.  */
+  gcc_assert (i == datarefs.length ());
+
+  return opt_result::success ();
 }
 
 
@@ -3995,18 +4347,18 @@
   return new_vect_var;
 }
 
-/* Duplicate ptr info and set alignment/misaligment on NAME from DR.  */
+/* Duplicate ptr info and set alignment/misaligment on NAME from DR_INFO.  */
 
 static void
-vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr)
+vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info)
 {
-  duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
-  int misalign = DR_MISALIGNMENT (dr);
+  duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr_info->dr));
+  int misalign = DR_MISALIGNMENT (dr_info);
   if (misalign == DR_MISALIGNMENT_UNKNOWN)
     mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
   else
     set_ptr_info_alignment (SSA_NAME_PTR_INFO (name),
-			    DR_TARGET_ALIGNMENT (dr), misalign);
+			    DR_TARGET_ALIGNMENT (dr_info), misalign);
 }
 
 /* Function vect_create_addr_base_for_vector_ref.
@@ -4015,7 +4367,7 @@
    that will be accessed for a data reference.
 
    Input:
-   STMT: The statement containing the data reference.
+   STMT_INFO: The statement containing the data reference.
    NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
    OFFSET: Optional. If supplied, it is be added to the initial address.
    LOOP:    Specify relative to which loop-nest should the address be computed.
@@ -4044,13 +4396,13 @@
    FORNOW: We are only handling array accesses with step 1.  */
 
 tree
-vect_create_addr_base_for_vector_ref (gimple *stmt,
+vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info,
 				      gimple_seq *new_stmt_list,
 				      tree offset,
 				      tree byte_offset)
 {
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
-  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+  dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
+  struct data_reference *dr = dr_info->dr;
   const char *base_name;
   tree addr_base;
   tree dest;
@@ -4058,7 +4410,7 @@
   tree vect_ptr_type;
   tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
-  innermost_loop_behavior *drb = vect_dr_behavior (dr);
+  innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
 
   tree data_ref_base = unshare_expr (drb->base_address);
   tree base_offset = unshare_expr (drb->offset);
@@ -4111,17 +4463,13 @@
       && TREE_CODE (addr_base) == SSA_NAME
       && !SSA_NAME_PTR_INFO (addr_base))
     {
-      vect_duplicate_ssa_name_ptr_info (addr_base, dr);
+      vect_duplicate_ssa_name_ptr_info (addr_base, dr_info);
       if (offset || byte_offset)
 	mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
     }
 
   if (dump_enabled_p ())
-    {
-      dump_printf_loc (MSG_NOTE, vect_location, "created ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
-      dump_printf (MSG_NOTE, "\n");
-    }
+    dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base);
 
   return addr_base;
 }
@@ -4130,28 +4478,32 @@
 /* Function vect_create_data_ref_ptr.
 
    Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
-   location accessed in the loop by STMT, along with the def-use update
+   location accessed in the loop by STMT_INFO, along with the def-use update
    chain to appropriately advance the pointer through the loop iterations.
    Also set aliasing information for the pointer.  This pointer is used by
    the callers to this function to create a memory reference expression for
    vector load/store access.
 
    Input:
-   1. STMT: a stmt that references memory. Expected to be of the form
+   1. STMT_INFO: a stmt that references memory. Expected to be of the form
          GIMPLE_ASSIGN <name, data-ref> or
 	 GIMPLE_ASSIGN <data-ref, name>.
    2. AGGR_TYPE: the type of the reference, which should be either a vector
         or an array.
    3. AT_LOOP: the loop where the vector memref is to be created.
    4. OFFSET (optional): an offset to be added to the initial address accessed
-        by the data-ref in STMT.
+	by the data-ref in STMT_INFO.
    5. BSI: location where the new stmts are to be placed if there is no loop
    6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
         pointing to the initial address.
    7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
-	to the initial address accessed by the data-ref in STMT.  This is
+	to the initial address accessed by the data-ref in STMT_INFO.  This is
 	similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
 	in bytes.
+   8. IV_STEP (optional, defaults to NULL): the amount that should be added
+	to the IV during each iteration of the loop.  NULL says to move
+	by one copy of AGGR_TYPE up or down, depending on the step of the
+	data reference.
 
    Output:
    1. Declare a new ptr to vector_type, and have it point to the base of the
@@ -4175,19 +4527,16 @@
 
       Return the increment stmt that updates the pointer in PTR_INCR.
 
-   3. Set INV_P to true if the access pattern of the data reference in the
-      vectorized loop is invariant.  Set it to false otherwise.
-
-   4. Return the pointer.  */
+   3. Return the pointer.  */
 
 tree
-vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
-			  tree offset, tree *initial_address,
-			  gimple_stmt_iterator *gsi, gimple **ptr_incr,
-			  bool only_init, bool *inv_p, tree byte_offset)
+vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
+			  struct loop *at_loop, tree offset,
+			  tree *initial_address, gimple_stmt_iterator *gsi,
+			  gimple **ptr_incr, bool only_init,
+			  tree byte_offset, tree iv_step)
 {
   const char *base_name;
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *loop = NULL;
   bool nested_in_vect_loop = false;
@@ -4199,23 +4548,24 @@
   edge pe = NULL;
   basic_block new_bb;
   tree aggr_ptr_init;
-  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+  dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
+  struct data_reference *dr = dr_info->dr;
   tree aptr;
   gimple_stmt_iterator incr_gsi;
   bool insert_after;
   tree indx_before_incr, indx_after_incr;
   gimple *incr;
-  tree step;
   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
 
-  gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
+  gcc_assert (iv_step != NULL_TREE
+	      || TREE_CODE (aggr_type) == ARRAY_TYPE
 	      || TREE_CODE (aggr_type) == VECTOR_TYPE);
 
   if (loop_vinfo)
     {
       loop = LOOP_VINFO_LOOP (loop_vinfo);
-      nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
-      containing_loop = (gimple_bb (stmt))->loop_father;
+      nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
+      containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
       pe = loop_preheader_edge (loop);
     }
   else
@@ -4225,14 +4575,6 @@
       *ptr_incr = NULL;
     }
 
-  /* Check the step (evolution) of the load in LOOP, and record
-     whether it's invariant.  */
-  step = vect_dr_behavior (dr)->step;
-  if (integer_zerop (step))
-    *inv_p = true;
-  else
-    *inv_p = false;
-
   /* Create an expression for the first address accessed by this load
      in LOOP.  */
   base_name = get_name (DR_BASE_ADDRESS (dr));
@@ -4241,9 +4583,9 @@
     {
       tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
       dump_printf_loc (MSG_NOTE, vect_location,
-                       "create %s-pointer variable to type: ",
-		       get_tree_code_name (TREE_CODE (aggr_type)));
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
+                       "create %s-pointer variable to type: %T",
+		       get_tree_code_name (TREE_CODE (aggr_type)),
+		       aggr_type);
       if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
         dump_printf (MSG_NOTE, "  vectorizing an array ref: ");
       else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
@@ -4252,8 +4594,7 @@
         dump_printf (MSG_NOTE, "  vectorizing a record based array ref: ");
       else
         dump_printf (MSG_NOTE, "  vectorizing a pointer ref: ");
-      dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
-      dump_printf (MSG_NOTE, "\n");
+      dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr));
     }
 
   /* (1) Create the new aggregate-pointer variable.
@@ -4266,12 +4607,11 @@
 			      get_alias_set (DR_REF (dr))))
     need_ref_all = true;
   /* Likewise for any of the data references in the stmt group.  */
-  else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
+  else if (DR_GROUP_SIZE (stmt_info) > 1)
     {
-      gimple *orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
+      stmt_vec_info sinfo = DR_GROUP_FIRST_ELEMENT (stmt_info);
       do
 	{
-	  stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
 	  struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
 	  if (!alias_sets_conflict_p (get_alias_set (aggr_type),
 				      get_alias_set (DR_REF (sdr))))
@@ -4279,9 +4619,9 @@
 	      need_ref_all = true;
 	      break;
 	    }
-	  orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
+	  sinfo = DR_GROUP_NEXT_ELEMENT (sinfo);
 	}
-      while (orig_stmt);
+      while (sinfo);
     }
   aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
 					       need_ref_all);
@@ -4324,7 +4664,7 @@
 
   /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader.  */
 
-  new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
+  new_temp = vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list,
 						   offset, byte_offset);
   if (new_stmt_list)
     {
@@ -4349,14 +4689,19 @@
     aptr = aggr_ptr_init;
   else
     {
-      /* The step of the aggregate pointer is the type size.  */
-      tree iv_step = TYPE_SIZE_UNIT (aggr_type);
-      /* One exception to the above is when the scalar step of the load in
-	 LOOP is zero. In this case the step here is also zero.  */
-      if (*inv_p)
-	iv_step = size_zero_node;
-      else if (tree_int_cst_sgn (step) == -1)
-	iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
+      /* Accesses to invariant addresses should be handled specially
+	 by the caller.  */
+      tree step = vect_dr_behavior (dr_info)->step;
+      gcc_assert (!integer_zerop (step));
+
+      if (iv_step == NULL_TREE)
+	{
+	  /* The step of the aggregate pointer is the type size,
+	     negated for downward accesses.  */
+	  iv_step = TYPE_SIZE_UNIT (aggr_type);
+	  if (tree_int_cst_sgn (step) == -1)
+	    iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
+	}
 
       standard_iv_increment_position (loop, &incr_gsi, &insert_after);
 
@@ -4365,13 +4710,13 @@
 		 aggr_ptr, loop, &incr_gsi, insert_after,
 		 &indx_before_incr, &indx_after_incr);
       incr = gsi_stmt (incr_gsi);
-      set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
+      loop_vinfo->add_stmt (incr);
 
       /* Copy the points-to information if it exists. */
       if (DR_PTR_INFO (dr))
 	{
-	  vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr);
-	  vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr);
+	  vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
+	  vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
 	}
       if (ptr_incr)
 	*ptr_incr = incr;
@@ -4395,13 +4740,13 @@
 		 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
 		 &indx_after_incr);
       incr = gsi_stmt (incr_gsi);
-      set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
+      loop_vinfo->add_stmt (incr);
 
       /* Copy the points-to information if it exists. */
       if (DR_PTR_INFO (dr))
 	{
-	  vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr);
-	  vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr);
+	  vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
+	  vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
 	}
       if (ptr_incr)
 	*ptr_incr = incr;
@@ -4439,7 +4784,7 @@
 	      the loop.  The increment amount across iterations is expected
 	      to be vector_size.
    BSI - location where the new update stmt is to be placed.
-   STMT - the original scalar memory-access stmt that is being vectorized.
+   STMT_INFO - the original scalar memory-access stmt that is being vectorized.
    BUMP - optional. The offset by which to bump the pointer. If not given,
 	  the offset is assumed to be vector_size.
 
@@ -4449,9 +4794,8 @@
 
 tree
 bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
-		 gimple *stmt, tree bump)
+		 stmt_vec_info stmt_info, tree bump)
 {
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   tree update = TYPE_SIZE_UNIT (vectype);
@@ -4469,7 +4813,7 @@
     new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
   incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
 				   dataref_ptr, update);
-  vect_finish_stmt_generation (stmt, incr_stmt, gsi);
+  vect_finish_stmt_generation (stmt_info, incr_stmt, gsi);
 
   /* Copy the points-to information if it exists. */
   if (DR_PTR_INFO (dr))
@@ -4489,13 +4833,34 @@
       if (use == dataref_ptr)
         SET_USE (use_p, new_dataref_ptr);
       else
-        gcc_assert (tree_int_cst_compare (use, update) == 0);
+        gcc_assert (operand_equal_p (use, update, 0));
     }
 
   return new_dataref_ptr;
 }
 
 
+/* Copy memory reference info such as base/clique from the SRC reference
+   to the DEST MEM_REF.  */
+
+void
+vect_copy_ref_info (tree dest, tree src)
+{
+  if (TREE_CODE (dest) != MEM_REF)
+    return;
+
+  tree src_base = src;
+  while (handled_component_p (src_base))
+    src_base = TREE_OPERAND (src_base, 0);
+  if (TREE_CODE (src_base) != MEM_REF
+      && TREE_CODE (src_base) != TARGET_MEM_REF)
+    return;
+
+  MR_DEPENDENCE_CLIQUE (dest) = MR_DEPENDENCE_CLIQUE (src_base);
+  MR_DEPENDENCE_BASE (dest) = MR_DEPENDENCE_BASE (src_base);
+}
+
+
 /* Function vect_create_destination_var.
 
    Create a new temporary of type VECTYPE.  */
@@ -4553,15 +4918,25 @@
   /* Check that the permutation is supported.  */
   if (VECTOR_MODE_P (mode))
     {
-      unsigned int i, nelt = GET_MODE_NUNITS (mode);
-      auto_vec_perm_indices sel (nelt);
-      sel.quick_grow (nelt);
-
+      unsigned int i;
       if (count == 3)
 	{
 	  unsigned int j0 = 0, j1 = 0, j2 = 0;
 	  unsigned int i, j;
 
+	  unsigned int nelt;
+	  if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
+	    {
+	      if (dump_enabled_p ())
+		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+				 "cannot handle groups of 3 stores for"
+				 " variable-length vectors\n");
+	      return false;
+	    }
+
+	  vec_perm_builder sel (nelt, nelt, 1);
+	  sel.quick_grow (nelt);
+	  vec_perm_indices indices;
 	  for (j = 0; j < 3; j++)
 	    {
 	      int nelt0 = ((3 - j) * nelt) % 3;
@@ -4576,11 +4951,12 @@
 		  if (3 * i + nelt2 < nelt)
 		    sel[3 * i + nelt2] = 0;
 		}
-	      if (!can_vec_perm_p (mode, false, &sel))
+	      indices.new_vector (sel, 2, nelt);
+	      if (!can_vec_perm_const_p (mode, indices))
 		{
 		  if (dump_enabled_p ())
 		    dump_printf (MSG_MISSED_OPTIMIZATION,
-				 "permutaion op not supported by target.\n");
+				 "permutation op not supported by target.\n");
 		  return false;
 		}
 
@@ -4593,11 +4969,12 @@
 		  if (3 * i + nelt2 < nelt)
 		    sel[3 * i + nelt2] = nelt + j2++;
 		}
-	      if (!can_vec_perm_p (mode, false, &sel))
+	      indices.new_vector (sel, 2, nelt);
+	      if (!can_vec_perm_const_p (mode, indices))
 		{
 		  if (dump_enabled_p ())
 		    dump_printf (MSG_MISSED_OPTIMIZATION,
-				 "permutaion op not supported by target.\n");
+				 "permutation op not supported by target.\n");
 		  return false;
 		}
 	    }
@@ -4607,17 +4984,23 @@
 	{
 	  /* If length is not equal to 3 then only power of 2 is supported.  */
 	  gcc_assert (pow2p_hwi (count));
-
-	  for (i = 0; i < nelt / 2; i++)
+	  poly_uint64 nelt = GET_MODE_NUNITS (mode);
+
+	  /* The encoding has 2 interleaved stepped patterns.  */
+	  vec_perm_builder sel (nelt, 2, 3);
+	  sel.quick_grow (6);
+	  for (i = 0; i < 3; i++)
 	    {
 	      sel[i * 2] = i;
 	      sel[i * 2 + 1] = i + nelt;
 	    }
-	  if (can_vec_perm_p (mode, false, &sel))
+	  vec_perm_indices indices (sel, 2, nelt);
+	  if (can_vec_perm_const_p (mode, indices))
 	    {
-	      for (i = 0; i < nelt; i++)
-		sel[i] += nelt / 2;
-	      if (can_vec_perm_p (mode, false, &sel))
+	      for (i = 0; i < 6; i++)
+		sel[i] += exact_div (nelt, 2);
+	      indices.new_vector (sel, 2, nelt);
+	      if (can_vec_perm_const_p (mode, indices))
 		return true;
 	    }
 	}
@@ -4625,20 +5008,26 @@
 
   if (dump_enabled_p ())
     dump_printf (MSG_MISSED_OPTIMIZATION,
-		 "permutaion op not supported by target.\n");
+		 "permutation op not supported by target.\n");
   return false;
 }
 
 
-/* Return TRUE if vec_store_lanes is available for COUNT vectors of
-   type VECTYPE.  */
+/* Return TRUE if vec_{mask_}store_lanes is available for COUNT vectors of
+   type VECTYPE.  MASKED_P says whether the masked form is needed.  */
 
 bool
-vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
+vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
+			    bool masked_p)
 {
-  return vect_lanes_optab_supported_p ("vec_store_lanes",
-				       vec_store_lanes_optab,
-				       vectype, count);
+  if (masked_p)
+    return vect_lanes_optab_supported_p ("vec_mask_store_lanes",
+					 vec_mask_store_lanes_optab,
+					 vectype, count);
+  else
+    return vect_lanes_optab_supported_p ("vec_store_lanes",
+					 vec_store_lanes_optab,
+					 vectype, count);
 }
 
 
@@ -4706,21 +5095,17 @@
 void
 vect_permute_store_chain (vec<tree> dr_chain,
 			  unsigned int length,
-			  gimple *stmt,
+			  stmt_vec_info stmt_info,
 			  gimple_stmt_iterator *gsi,
 			  vec<tree> *result_chain)
 {
   tree vect1, vect2, high, low;
   gimple *perm_stmt;
-  tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   tree perm_mask_low, perm_mask_high;
   tree data_ref;
   tree perm3_mask_low, perm3_mask_high;
-  unsigned int i, n, log_length = exact_log2 (length);
-  unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
-
-  auto_vec_perm_indices sel (nelt);
-  sel.quick_grow (nelt);
+  unsigned int i, j, n, log_length = exact_log2 (length);
 
   result_chain->quick_grow (length);
   memcpy (result_chain->address (), dr_chain.address (),
@@ -4728,8 +5113,13 @@
 
   if (length == 3)
     {
+      /* vect_grouped_store_supported ensures that this is constant.  */
+      unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
       unsigned int j0 = 0, j1 = 0, j2 = 0;
 
+      vec_perm_builder sel (nelt, nelt, 1);
+      sel.quick_grow (nelt);
+      vec_perm_indices indices;
       for (j = 0; j < 3; j++)
         {
 	  int nelt0 = ((3 - j) * nelt) % 3;
@@ -4745,7 +5135,8 @@
 	      if (3 * i + nelt2 < nelt)
 		sel[3 * i + nelt2] = 0;
 	    }
-	  perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
+	  indices.new_vector (sel, 2, nelt);
+	  perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
 
 	  for (i = 0; i < nelt; i++)
 	    {
@@ -4756,7 +5147,8 @@
 	      if (3 * i + nelt2 < nelt)
 		sel[3 * i + nelt2] = nelt + j2++;
 	    }
-	  perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
+	  indices.new_vector (sel, 2, nelt);
+	  perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
 
 	  vect1 = dr_chain[0];
 	  vect2 = dr_chain[1];
@@ -4768,7 +5160,7 @@
 	  data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
 	  perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
 					   vect2, perm3_mask_low);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 
 	  vect1 = data_ref;
 	  vect2 = dr_chain[2];
@@ -4779,7 +5171,7 @@
 	  data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
 	  perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
 					   vect2, perm3_mask_high);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	  (*result_chain)[j] = data_ref;
 	}
     }
@@ -4788,16 +5180,22 @@
       /* If length is not equal to 3 then only power of 2 is supported.  */
       gcc_assert (pow2p_hwi (length));
 
-      for (i = 0, n = nelt / 2; i < n; i++)
+      /* The encoding has 2 interleaved stepped patterns.  */
+      poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
+      vec_perm_builder sel (nelt, 2, 3);
+      sel.quick_grow (6);
+      for (i = 0; i < 3; i++)
 	{
 	  sel[i * 2] = i;
 	  sel[i * 2 + 1] = i + nelt;
 	}
-	perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
-
-	for (i = 0; i < nelt; i++)
-	  sel[i] += nelt / 2;
-	perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
+	vec_perm_indices indices (sel, 2, nelt);
+	perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
+
+	for (i = 0; i < 6; i++)
+	  sel[i] += exact_div (nelt, 2);
+	indices.new_vector (sel, 2, nelt);
+	perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
 
 	for (i = 0, n = log_length; i < n; i++)
 	  {
@@ -4812,7 +5210,7 @@
 		high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
 		perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
 						 vect2, perm_mask_high);
-		vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+		vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 		(*result_chain)[2*j] = high;
 
 		/* Create interleaving stmt:
@@ -4822,7 +5220,7 @@
 		low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
 		perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
 						 vect2, perm_mask_low);
-		vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+		vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 		(*result_chain)[2*j+1] = low;
 	      }
 	    memcpy (dr_chain.address (), result_chain->address (),
@@ -4847,12 +5245,12 @@
    dr_explicit_realign_optimized.
 
    The code above sets up a new (vector) pointer, pointing to the first
-   location accessed by STMT, and a "floor-aligned" load using that pointer.
-   It also generates code to compute the "realignment-token" (if the relevant
-   target hook was defined), and creates a phi-node at the loop-header bb
-   whose arguments are the result of the prolog-load (created by this
-   function) and the result of a load that takes place in the loop (to be
-   created by the caller to this function).
+   location accessed by STMT_INFO, and a "floor-aligned" load using that
+   pointer.  It also generates code to compute the "realignment-token"
+   (if the relevant target hook was defined), and creates a phi-node at the
+   loop-header bb whose arguments are the result of the prolog-load (created
+   by this function) and the result of a load that takes place in the loop
+   (to be created by the caller to this function).
 
    For the case of dr_explicit_realign_optimized:
    The caller to this function uses the phi-result (msq) to create the
@@ -4871,8 +5269,8 @@
       result = realign_load (msq, lsq, realignment_token);
 
    Input:
-   STMT - (scalar) load stmt to be vectorized. This load accesses
-          a memory location that may be unaligned.
+   STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
+	       a memory location that may be unaligned.
    BSI - place where new code is to be inserted.
    ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
 			      is used.
@@ -4883,19 +5281,19 @@
    Return value - the result of the loop-header phi node.  */
 
 tree
-vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
+vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
                         tree *realignment_token,
 			enum dr_alignment_support alignment_support_scheme,
 			tree init_addr,
 			struct loop **at_loop)
 {
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
-  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
+  dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
+  struct data_reference *dr = dr_info->dr;
   struct loop *loop = NULL;
   edge pe = NULL;
-  tree scalar_dest = gimple_assign_lhs (stmt);
+  tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
   tree vec_dest;
   gimple *inc;
   tree ptr;
@@ -4906,16 +5304,15 @@
   gphi *phi_stmt;
   tree msq = NULL_TREE;
   gimple_seq stmts = NULL;
-  bool inv_p;
   bool compute_in_loop = false;
   bool nested_in_vect_loop = false;
-  struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
+  struct loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
   struct loop *loop_for_initial_load = NULL;
 
   if (loop_vinfo)
     {
       loop = LOOP_VINFO_LOOP (loop_vinfo);
-      nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
+      nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
     }
 
   gcc_assert (alignment_support_scheme == dr_explicit_realign
@@ -4998,14 +5395,14 @@
 
       gcc_assert (!compute_in_loop);
       vec_dest = vect_create_destination_var (scalar_dest, vectype);
-      ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
-				      NULL_TREE, &init_addr, NULL, &inc,
-				      true, &inv_p);
+      ptr = vect_create_data_ref_ptr (stmt_info, vectype,
+				      loop_for_initial_load, NULL_TREE,
+				      &init_addr, NULL, &inc, true);
       if (TREE_CODE (ptr) == SSA_NAME)
 	new_temp = copy_ssa_name (ptr);
       else
 	new_temp = make_ssa_name (TREE_TYPE (ptr));
-      unsigned int align = DR_TARGET_ALIGNMENT (dr);
+      unsigned int align = DR_TARGET_ALIGNMENT (dr_info);
       new_stmt = gimple_build_assign
 		   (new_temp, BIT_AND_EXPR, ptr,
 		    build_int_cst (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
@@ -5014,6 +5411,7 @@
       data_ref
 	= build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
 		  build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
+      vect_copy_ref_info (data_ref, DR_REF (dr));
       new_stmt = gimple_build_assign (vec_dest, data_ref);
       new_temp = make_ssa_name (vec_dest, new_stmt);
       gimple_assign_set_lhs (new_stmt, new_temp);
@@ -5041,7 +5439,7 @@
       if (!init_addr)
 	{
 	  /* Generate the INIT_ADDR computation outside LOOP.  */
-	  init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
+	  init_addr = vect_create_addr_base_for_vector_ref (stmt_info, &stmts,
 							    NULL_TREE);
           if (loop)
             {
@@ -5118,7 +5516,7 @@
      that leaves unused vector loads around punt - we at least create
      very sub-optimal code in that case (and blow up memory,
      see PR65518).  */
-  if (single_element_p && count > TYPE_VECTOR_SUBPARTS (vectype))
+  if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
     {
       if (dump_enabled_p ())
 	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5141,12 +5539,22 @@
   /* Check that the permutation is supported.  */
   if (VECTOR_MODE_P (mode))
     {
-      unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
-      auto_vec_perm_indices sel (nelt);
-      sel.quick_grow (nelt);
-
+      unsigned int i, j;
       if (count == 3)
 	{
+	  unsigned int nelt;
+	  if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
+	    {
+	      if (dump_enabled_p ())
+		dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+				 "cannot handle groups of 3 loads for"
+				 " variable-length vectors\n");
+	      return false;
+	    }
+
+	  vec_perm_builder sel (nelt, nelt, 1);
+	  sel.quick_grow (nelt);
+	  vec_perm_indices indices;
 	  unsigned int k;
 	  for (k = 0; k < 3; k++)
 	    {
@@ -5155,7 +5563,8 @@
 		  sel[i] = 3 * i + k;
 		else
 		  sel[i] = 0;
-	      if (!can_vec_perm_p (mode, false, &sel))
+	      indices.new_vector (sel, 2, nelt);
+	      if (!can_vec_perm_const_p (mode, indices))
 		{
 		  if (dump_enabled_p ())
 		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5168,7 +5577,8 @@
 		  sel[i] = i;
 		else
 		  sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
-	      if (!can_vec_perm_p (mode, false, &sel))
+	      indices.new_vector (sel, 2, nelt);
+	      if (!can_vec_perm_const_p (mode, indices))
 		{
 		  if (dump_enabled_p ())
 		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5183,13 +5593,20 @@
 	{
 	  /* If length is not equal to 3 then only power of 2 is supported.  */
 	  gcc_assert (pow2p_hwi (count));
-	  for (i = 0; i < nelt; i++)
+	  poly_uint64 nelt = GET_MODE_NUNITS (mode);
+
+	  /* The encoding has a single stepped pattern.  */
+	  vec_perm_builder sel (nelt, 1, 3);
+	  sel.quick_grow (3);
+	  for (i = 0; i < 3; i++)
 	    sel[i] = i * 2;
-	  if (can_vec_perm_p (mode, false, &sel))
+	  vec_perm_indices indices (sel, 2, nelt);
+	  if (can_vec_perm_const_p (mode, indices))
 	    {
-	      for (i = 0; i < nelt; i++)
+	      for (i = 0; i < 3; i++)
 		sel[i] = i * 2 + 1;
-	      if (can_vec_perm_p (mode, false, &sel))
+	      indices.new_vector (sel, 2, nelt);
+	      if (can_vec_perm_const_p (mode, indices))
 		return true;
 	    }
         }
@@ -5201,15 +5618,21 @@
   return false;
 }
 
-/* Return TRUE if vec_load_lanes is available for COUNT vectors of
-   type VECTYPE.  */
+/* Return TRUE if vec_{masked_}load_lanes is available for COUNT vectors of
+   type VECTYPE.  MASKED_P says whether the masked form is needed.  */
 
 bool
-vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
+vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
+			   bool masked_p)
 {
-  return vect_lanes_optab_supported_p ("vec_load_lanes",
-				       vec_load_lanes_optab,
-				       vectype, count);
+  if (masked_p)
+    return vect_lanes_optab_supported_p ("vec_mask_load_lanes",
+					 vec_mask_load_lanes_optab,
+					 vectype, count);
+  else
+    return vect_lanes_optab_supported_p ("vec_load_lanes",
+					 vec_load_lanes_optab,
+					 vectype, count);
 }
 
 /* Function vect_permute_load_chain.
@@ -5292,7 +5715,7 @@
 static void
 vect_permute_load_chain (vec<tree> dr_chain,
 			 unsigned int length,
-			 gimple *stmt,
+			 stmt_vec_info stmt_info,
 			 gimple_stmt_iterator *gsi,
 			 vec<tree> *result_chain)
 {
@@ -5300,12 +5723,8 @@
   tree perm_mask_even, perm_mask_odd;
   tree perm3_mask_low, perm3_mask_high;
   gimple *perm_stmt;
-  tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   unsigned int i, j, log_length = exact_log2 (length);
-  unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
-
-  auto_vec_perm_indices sel (nelt);
-  sel.quick_grow (nelt);
 
   result_chain->quick_grow (length);
   memcpy (result_chain->address (), dr_chain.address (),
@@ -5313,8 +5732,13 @@
 
   if (length == 3)
     {
+      /* vect_grouped_load_supported ensures that this is constant.  */
+      unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
       unsigned int k;
 
+      vec_perm_builder sel (nelt, nelt, 1);
+      sel.quick_grow (nelt);
+      vec_perm_indices indices;
       for (k = 0; k < 3; k++)
 	{
 	  for (i = 0; i < nelt; i++)
@@ -5322,15 +5746,16 @@
 	      sel[i] = 3 * i + k;
 	    else
 	      sel[i] = 0;
-	  perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
+	  indices.new_vector (sel, 2, nelt);
+	  perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
 
 	  for (i = 0, j = 0; i < nelt; i++)
 	    if (3 * i + k < 2 * nelt)
 	      sel[i] = i;
 	    else
 	      sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
-
-	  perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
+	  indices.new_vector (sel, 2, nelt);
+	  perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
 
 	  first_vect = dr_chain[0];
 	  second_vect = dr_chain[1];
@@ -5341,7 +5766,7 @@
 	  data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
 	  perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
 					   second_vect, perm3_mask_low);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 
 	  /* Create interleaving stmt (high part of):
 	     high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
@@ -5351,7 +5776,7 @@
 	  data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
 	  perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
 					   second_vect, perm3_mask_high);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	  (*result_chain)[k] = data_ref;
 	}
     }
@@ -5360,13 +5785,19 @@
       /* If length is not equal to 3 then only power of 2 is supported.  */
       gcc_assert (pow2p_hwi (length));
 
-      for (i = 0; i < nelt; ++i)
+      /* The encoding has a single stepped pattern.  */
+      poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
+      vec_perm_builder sel (nelt, 1, 3);
+      sel.quick_grow (3);
+      for (i = 0; i < 3; ++i)
 	sel[i] = i * 2;
-      perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
-
-      for (i = 0; i < nelt; ++i)
+      vec_perm_indices indices (sel, 2, nelt);
+      perm_mask_even = vect_gen_perm_mask_checked (vectype, indices);
+
+      for (i = 0; i < 3; ++i)
 	sel[i] = i * 2 + 1;
-      perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
+      indices.new_vector (sel, 2, nelt);
+      perm_mask_odd = vect_gen_perm_mask_checked (vectype, indices);
 
       for (i = 0; i < log_length; i++)
 	{
@@ -5380,7 +5811,7 @@
 	      perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					       first_vect, second_vect,
 					       perm_mask_even);
-	      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	      (*result_chain)[j/2] = data_ref;
 
 	      /* data_ref = permute_odd (first_data_ref, second_data_ref);  */
@@ -5388,7 +5819,7 @@
 	      perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					       first_vect, second_vect,
 					       perm_mask_odd);
-	      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	      (*result_chain)[j/2+length/2] = data_ref;
 	    }
 	  memcpy (dr_chain.address (), result_chain->address (),
@@ -5487,7 +5918,7 @@
 static bool
 vect_shift_permute_load_chain (vec<tree> dr_chain,
 			       unsigned int length,
-			       gimple *stmt,
+			       stmt_vec_info stmt_info,
 			       gimple_stmt_iterator *gsi,
 			       vec<tree> *result_chain)
 {
@@ -5496,27 +5927,32 @@
   tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
   gimple *perm_stmt;
 
-  tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
+  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   unsigned int i;
-  unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
 
-  auto_vec_perm_indices sel (nelt);
+  unsigned HOST_WIDE_INT nelt, vf;
+  if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
+      || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
+    /* Not supported for variable-length vectors.  */
+    return false;
+
+  vec_perm_builder sel (nelt, nelt, 1);
   sel.quick_grow (nelt);
 
   result_chain->quick_grow (length);
   memcpy (result_chain->address (), dr_chain.address (),
 	  length * sizeof (tree));
 
-  if (pow2p_hwi (length) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
+  if (pow2p_hwi (length) && vf > 4)
     {
       unsigned int j, log_length = exact_log2 (length);
       for (i = 0; i < nelt / 2; ++i)
 	sel[i] = i * 2;
       for (i = 0; i < nelt / 2; ++i)
 	sel[nelt / 2 + i] = i * 2 + 1;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      vec_perm_indices indices (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5524,13 +5960,14 @@
 			      supported by target\n");
 	  return false;
 	}
-      perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
+      perm2_mask1 = vect_gen_perm_mask_checked (vectype, indices);
 
       for (i = 0; i < nelt / 2; ++i)
 	sel[i] = i * 2 + 1;
       for (i = 0; i < nelt / 2; ++i)
 	sel[nelt / 2 + i] = i * 2;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5538,20 +5975,21 @@
 			      supported by target\n");
 	  return false;
 	}
-      perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
+      perm2_mask2 = vect_gen_perm_mask_checked (vectype, indices);
 
       /* Generating permutation constant to shift all elements.
 	 For vector length 8 it is {4 5 6 7 8 9 10 11}.  */
       for (i = 0; i < nelt; i++)
 	sel[i] = nelt / 2 + i;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 			     "shift permutation is not supported by target\n");
 	  return false;
 	}
-      shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
+      shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       /* Generating permutation constant to select vector from 2.
 	 For vector length 8 it is {0 1 2 3 12 13 14 15}.  */
@@ -5559,14 +5997,15 @@
 	sel[i] = i;
       for (i = nelt / 2; i < nelt; i++)
 	sel[i] = nelt + i;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 			     "select is not supported by target\n");
 	  return false;
 	}
-      select_mask = vect_gen_perm_mask_checked (vectype, sel);
+      select_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       for (i = 0; i < log_length; i++)
 	{
@@ -5579,26 +6018,26 @@
 	      perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					       first_vect, first_vect,
 					       perm2_mask1);
-	      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	      vect[0] = data_ref;
 
 	      data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
 	      perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					       second_vect, second_vect,
 					       perm2_mask2);
-	      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	      vect[1] = data_ref;
 
 	      data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
 	      perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					       vect[0], vect[1], shift1_mask);
-	      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	      (*result_chain)[j/2 + length/2] = data_ref;
 
 	      data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
 	      perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					       vect[0], vect[1], select_mask);
-	      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	      (*result_chain)[j/2] = data_ref;
 	    }
 	  memcpy (dr_chain.address (), result_chain->address (),
@@ -5606,7 +6045,7 @@
 	}
       return true;
     }
-  if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
+  if (length == 3 && vf > 2)
     {
       unsigned int k = 0, l = 0;
 
@@ -5622,7 +6061,8 @@
 	  sel[i] = 3 * k + (l % 3);
 	  k++;
 	}
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      vec_perm_indices indices (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5630,59 +6070,63 @@
 			      supported by target\n");
 	  return false;
 	}
-      perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
+      perm3_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       /* Generating permutation constant to shift all elements.
 	 For vector length 8 it is {6 7 8 9 10 11 12 13}.  */
       for (i = 0; i < nelt; i++)
 	sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 			     "shift permutation is not supported by target\n");
 	  return false;
 	}
-      shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
+      shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       /* Generating permutation constant to shift all elements.
 	 For vector length 8 it is {5 6 7 8 9 10 11 12}.  */
       for (i = 0; i < nelt; i++)
 	sel[i] = 2 * (nelt / 3) + 1 + i;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 			     "shift permutation is not supported by target\n");
 	  return false;
 	}
-      shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
+      shift2_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       /* Generating permutation constant to shift all elements.
 	 For vector length 8 it is {3 4 5 6 7 8 9 10}.  */
       for (i = 0; i < nelt; i++)
 	sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 			     "shift permutation is not supported by target\n");
 	  return false;
 	}
-      shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
+      shift3_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       /* Generating permutation constant to shift all elements.
 	 For vector length 8 it is {5 6 7 8 9 10 11 12}.  */
       for (i = 0; i < nelt; i++)
 	sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
-      if (!can_vec_perm_p (TYPE_MODE (vectype), false, &sel))
+      indices.new_vector (sel, 2, nelt);
+      if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
 			     "shift permutation is not supported by target\n");
 	  return false;
 	}
-      shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
+      shift4_mask = vect_gen_perm_mask_checked (vectype, indices);
 
       for (k = 0; k < 3; k++)
 	{
@@ -5690,7 +6134,7 @@
 	  perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					   dr_chain[k], dr_chain[k],
 					   perm3_mask);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	  vect[k] = data_ref;
 	}
 
@@ -5700,7 +6144,7 @@
 	  perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
 					   vect[k % 3], vect[(k + 1) % 3],
 					   shift1_mask);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	  vect_shift[k] = data_ref;
 	}
 
@@ -5711,7 +6155,7 @@
 					   vect_shift[(4 - k) % 3],
 					   vect_shift[(3 - k) % 3],
 					   shift2_mask);
-	  vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+	  vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
 	  vect[k] = data_ref;
 	}
 
@@ -5720,13 +6164,13 @@
       data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
       perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
 				       vect[0], shift3_mask);
-      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
       (*result_chain)[nelt % 3] = data_ref;
 
       data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
       perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
 				       vect[1], shift4_mask);
-      vect_finish_stmt_generation (stmt, perm_stmt, gsi);
+      vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
       (*result_chain)[0] = data_ref;
       return true;
     }
@@ -5741,8 +6185,8 @@
 */
 
 void
-vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
-			     gimple_stmt_iterator *gsi)
+vect_transform_grouped_load (stmt_vec_info stmt_info, vec<tree> dr_chain,
+			     int size, gimple_stmt_iterator *gsi)
 {
   machine_mode mode;
   vec<tree> result_chain = vNULL;
@@ -5755,85 +6199,84 @@
   /* If reassociation width for vector type is 2 or greater target machine can
      execute 2 or more vector instructions in parallel.  Otherwise try to
      get chain for loads group using vect_shift_permute_load_chain.  */
-  mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
+  mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
   if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
       || pow2p_hwi (size)
-      || !vect_shift_permute_load_chain (dr_chain, size, stmt,
+      || !vect_shift_permute_load_chain (dr_chain, size, stmt_info,
 					 gsi, &result_chain))
-    vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
-  vect_record_grouped_load_vectors (stmt, result_chain);
+    vect_permute_load_chain (dr_chain, size, stmt_info, gsi, &result_chain);
+  vect_record_grouped_load_vectors (stmt_info, result_chain);
   result_chain.release ();
 }
 
 /* RESULT_CHAIN contains the output of a group of grouped loads that were
-   generated as part of the vectorization of STMT.  Assign the statement
+   generated as part of the vectorization of STMT_INFO.  Assign the statement
    for each vector to the associated scalar statement.  */
 
 void
-vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
+vect_record_grouped_load_vectors (stmt_vec_info stmt_info,
+				  vec<tree> result_chain)
 {
-  gimple *first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
-  gimple *next_stmt, *new_stmt;
+  vec_info *vinfo = stmt_info->vinfo;
+  stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
   unsigned int i, gap_count;
   tree tmp_data_ref;
 
   /* Put a permuted data-ref in the VECTORIZED_STMT field.
      Since we scan the chain starting from it's first node, their order
      corresponds the order of data-refs in RESULT_CHAIN.  */
-  next_stmt = first_stmt;
+  stmt_vec_info next_stmt_info = first_stmt_info;
   gap_count = 1;
   FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
     {
-      if (!next_stmt)
+      if (!next_stmt_info)
 	break;
 
       /* Skip the gaps.  Loads created for the gaps will be removed by dead
        code elimination pass later.  No need to check for the first stmt in
        the group, since it always exists.
-       GROUP_GAP is the number of steps in elements from the previous
-       access (if there is no gap GROUP_GAP is 1).  We skip loads that
+       DR_GROUP_GAP is the number of steps in elements from the previous
+       access (if there is no gap DR_GROUP_GAP is 1).  We skip loads that
        correspond to the gaps.  */
-      if (next_stmt != first_stmt
-          && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
+      if (next_stmt_info != first_stmt_info
+	  && gap_count < DR_GROUP_GAP (next_stmt_info))
       {
         gap_count++;
         continue;
       }
 
-      while (next_stmt)
+      while (next_stmt_info)
         {
-	  new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
+	  stmt_vec_info new_stmt_info = vinfo->lookup_def (tmp_data_ref);
 	  /* We assume that if VEC_STMT is not NULL, this is a case of multiple
 	     copies, and we put the new vector statement in the first available
 	     RELATED_STMT.  */
-	  if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
-	    STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
+	  if (!STMT_VINFO_VEC_STMT (next_stmt_info))
+	    STMT_VINFO_VEC_STMT (next_stmt_info) = new_stmt_info;
 	  else
             {
-              if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+	      if (!DR_GROUP_SAME_DR_STMT (next_stmt_info))
                 {
-		  gimple *prev_stmt =
-		    STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
-		  gimple *rel_stmt =
-		    STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
-	          while (rel_stmt)
+		  stmt_vec_info prev_stmt_info
+		    = STMT_VINFO_VEC_STMT (next_stmt_info);
+		  stmt_vec_info rel_stmt_info
+		    = STMT_VINFO_RELATED_STMT (prev_stmt_info);
+		  while (rel_stmt_info)
 		    {
-		      prev_stmt = rel_stmt;
-		      rel_stmt =
-                        STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
+		      prev_stmt_info = rel_stmt_info;
+		      rel_stmt_info = STMT_VINFO_RELATED_STMT (rel_stmt_info);
 		    }
 
-  	          STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
-                    new_stmt;
+		  STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
                 }
             }
 
-	  next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
+	  next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
 	  gap_count = 1;
-	  /* If NEXT_STMT accesses the same DR as the previous statement,
+	  /* If NEXT_STMT_INFO accesses the same DR as the previous statement,
 	     put the same TMP_DATA_REF as its vectorized statement; otherwise
 	     get the next data-ref from RESULT_CHAIN.  */
-	  if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
+	  if (!next_stmt_info || !DR_GROUP_SAME_DR_STMT (next_stmt_info))
 	    break;
         }
     }
@@ -5861,39 +6304,39 @@
 }
 
 
-/* Return whether the data reference DR is supported with respect to its
+/* Return whether the data reference DR_INFO is supported with respect to its
    alignment.
    If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
    it is aligned, i.e., check if it is possible to vectorize it with different
    alignment.  */
 
 enum dr_alignment_support
-vect_supportable_dr_alignment (struct data_reference *dr,
+vect_supportable_dr_alignment (dr_vec_info *dr_info,
                                bool check_aligned_accesses)
 {
-  gimple *stmt = DR_STMT (dr);
-  stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
+  data_reference *dr = dr_info->dr;
+  stmt_vec_info stmt_info = dr_info->stmt;
   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
   machine_mode mode = TYPE_MODE (vectype);
   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
   struct loop *vect_loop = NULL;
   bool nested_in_vect_loop = false;
 
-  if (aligned_access_p (dr) && !check_aligned_accesses)
+  if (aligned_access_p (dr_info) && !check_aligned_accesses)
     return dr_aligned;
 
   /* For now assume all conditional loads/stores support unaligned
      access without any special code.  */
-  if (is_gimple_call (stmt)
-      && gimple_call_internal_p (stmt)
-      && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
-	  || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
-    return dr_unaligned_supported;
+  if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
+    if (gimple_call_internal_p (stmt)
+	&& (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
+	    || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
+      return dr_unaligned_supported;
 
   if (loop_vinfo)
     {
       vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
-      nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
+      nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt_info);
     }
 
   /* Possibly unaligned access.  */
@@ -5974,23 +6417,24 @@
 	     same alignment, instead it depends on the SLP group size.  */
 	  if (loop_vinfo
 	      && STMT_SLP_TYPE (stmt_info)
-	      && (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
-		  * GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)))
-		  % TYPE_VECTOR_SUBPARTS (vectype) != 0))
+	      && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
+			      * (DR_GROUP_SIZE
+				 (DR_GROUP_FIRST_ELEMENT (stmt_info))),
+			      TYPE_VECTOR_SUBPARTS (vectype)))
 	    ;
 	  else if (!loop_vinfo
 		   || (nested_in_vect_loop
-		       && (TREE_INT_CST_LOW (DR_STEP (dr))
-			   != GET_MODE_SIZE (TYPE_MODE (vectype)))))
+		       && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
+				    GET_MODE_SIZE (TYPE_MODE (vectype)))))
 	    return dr_explicit_realign;
 	  else
 	    return dr_explicit_realign_optimized;
 	}
-      if (!known_alignment_for_access_p (dr))
+      if (!known_alignment_for_access_p (dr_info))
 	is_packed = not_size_aligned (DR_REF (dr));
 
       if (targetm.vectorize.support_vector_misalignment
-	    (mode, type, DR_MISALIGNMENT (dr), is_packed))
+	    (mode, type, DR_MISALIGNMENT (dr_info), is_packed))
 	/* Can't software pipeline the loads, but can at least do them.  */
 	return dr_unaligned_supported;
     }
@@ -5999,11 +6443,11 @@
       bool is_packed = false;
       tree type = (TREE_TYPE (DR_REF (dr)));
 
-      if (!known_alignment_for_access_p (dr))
+      if (!known_alignment_for_access_p (dr_info))
 	is_packed = not_size_aligned (DR_REF (dr));
 
      if (targetm.vectorize.support_vector_misalignment
-	   (mode, type, DR_MISALIGNMENT (dr), is_packed))
+	   (mode, type, DR_MISALIGNMENT (dr_info), is_packed))
        return dr_unaligned_supported;
     }