diff gcc/convert.c @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents f6334be47118
children 84e7813d76e9
line wrap: on
line diff
--- a/gcc/convert.c	Sun Aug 21 07:07:55 2011 +0900
+++ b/gcc/convert.c	Fri Oct 27 22:46:09 2017 +0900
@@ -1,7 +1,5 @@
 /* Utility routines for data type conversion for GCC.
-   Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
-   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
-   Free Software Foundation, Inc.
+   Copyright (C) 1987-2017 Free Software Foundation, Inc.
 
 This file is part of GCC.
 
@@ -26,29 +24,38 @@
 #include "config.h"
 #include "system.h"
 #include "coretypes.h"
-#include "tm.h"
+#include "target.h"
 #include "tree.h"
-#include "flags.h"
+#include "diagnostic-core.h"
+#include "fold-const.h"
+#include "stor-layout.h"
 #include "convert.h"
-#include "diagnostic-core.h"
 #include "langhooks.h"
+#include "builtins.h"
+#include "ubsan.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "asan.h"
+
+#define maybe_fold_build1_loc(FOLD_P, LOC, CODE, TYPE, EXPR) \
+  ((FOLD_P) ? fold_build1_loc (LOC, CODE, TYPE, EXPR)	     \
+   : build1_loc (LOC, CODE, TYPE, EXPR))
+#define maybe_fold_build2_loc(FOLD_P, LOC, CODE, TYPE, EXPR1, EXPR2) \
+  ((FOLD_P) ? fold_build2_loc (LOC, CODE, TYPE, EXPR1, EXPR2)	     \
+   : build2_loc (LOC, CODE, TYPE, EXPR1, EXPR2))
 
 /* Convert EXPR to some pointer or reference type TYPE.
    EXPR must be pointer, reference, integer, enumeral, or literal zero;
-   in other cases error is called.  */
+   in other cases error is called.  If FOLD_P is true, try to fold the
+   expression.  */
 
-tree
-convert_to_pointer (tree type, tree expr)
+static tree
+convert_to_pointer_1 (tree type, tree expr, bool fold_p)
 {
   location_t loc = EXPR_LOCATION (expr);
   if (TREE_TYPE (expr) == type)
     return expr;
 
-  /* Propagate overflow to the NULL pointer.  */
-  if (integer_zerop (expr))
-    return force_fit_type_double (type, double_int_zero, 0,
-				  TREE_OVERFLOW (expr));
-
   switch (TREE_CODE (TREE_TYPE (expr)))
     {
     case POINTER_TYPE:
@@ -60,9 +67,10 @@
 	addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
 
 	if (to_as == from_as)
-	  return fold_build1_loc (loc, NOP_EXPR, type, expr);
+	  return maybe_fold_build1_loc (fold_p, loc, NOP_EXPR, type, expr);
 	else
-	  return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
+	  return maybe_fold_build1_loc (fold_p, loc, ADDR_SPACE_CONVERT_EXPR,
+					type, expr);
       }
 
     case INTEGER_TYPE:
@@ -76,77 +84,59 @@
 	unsigned int pprec = TYPE_PRECISION (type);
 	unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
 
- 	if (eprec != pprec)
-	  expr = fold_build1_loc (loc, NOP_EXPR,
-			      lang_hooks.types.type_for_size (pprec, 0),
-			      expr);
+	if (eprec != pprec)
+	  expr
+	    = maybe_fold_build1_loc (fold_p, loc, NOP_EXPR,
+				     lang_hooks.types.type_for_size (pprec, 0),
+				     expr);
       }
-
-      return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
+      return maybe_fold_build1_loc (fold_p, loc, CONVERT_EXPR, type, expr);
 
     default:
       error ("cannot convert to a pointer type");
-      return convert_to_pointer (type, integer_zero_node);
+      return convert_to_pointer_1 (type, integer_zero_node, fold_p);
     }
 }
 
-/* Avoid any floating point extensions from EXP.  */
-tree
-strip_float_extensions (tree exp)
-{
-  tree sub, expt, subt;
-
-  /*  For floating point constant look up the narrowest type that can hold
-      it properly and handle it like (type)(narrowest_type)constant.
-      This way we can optimize for instance a=a*2.0 where "a" is float
-      but 2.0 is double constant.  */
-  if (TREE_CODE (exp) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (TREE_TYPE (exp)))
-    {
-      REAL_VALUE_TYPE orig;
-      tree type = NULL;
+/* A wrapper around convert_to_pointer_1 that always folds the
+   expression.  */
 
-      orig = TREE_REAL_CST (exp);
-      if (TYPE_PRECISION (TREE_TYPE (exp)) > TYPE_PRECISION (float_type_node)
-	  && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
-	type = float_type_node;
-      else if (TYPE_PRECISION (TREE_TYPE (exp))
-	       > TYPE_PRECISION (double_type_node)
-	       && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
-	type = double_type_node;
-      if (type)
-	return build_real (type, real_value_truncate (TYPE_MODE (type), orig));
-    }
-
-  if (!CONVERT_EXPR_P (exp))
-    return exp;
-
-  sub = TREE_OPERAND (exp, 0);
-  subt = TREE_TYPE (sub);
-  expt = TREE_TYPE (exp);
-
-  if (!FLOAT_TYPE_P (subt))
-    return exp;
-
-  if (DECIMAL_FLOAT_TYPE_P (expt) != DECIMAL_FLOAT_TYPE_P (subt))
-    return exp;
-
-  if (TYPE_PRECISION (subt) > TYPE_PRECISION (expt))
-    return exp;
-
-  return strip_float_extensions (sub);
+tree
+convert_to_pointer (tree type, tree expr)
+{
+  return convert_to_pointer_1 (type, expr, true);
 }
 
+/* A wrapper around convert_to_pointer_1 that only folds the
+   expression if DOFOLD, or if it is CONSTANT_CLASS_P.  */
+
+tree
+convert_to_pointer_maybe_fold (tree type, tree expr, bool dofold)
+{
+  return convert_to_pointer_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
+}
 
 /* Convert EXPR to some floating-point type TYPE.
 
    EXPR must be float, fixed-point, integer, or enumeral;
-   in other cases error is called.  */
+   in other cases error is called.  If FOLD_P is true, try to fold
+   the expression.  */
 
-tree
-convert_to_real (tree type, tree expr)
+static tree
+convert_to_real_1 (tree type, tree expr, bool fold_p)
 {
   enum built_in_function fcode = builtin_mathfn_code (expr);
   tree itype = TREE_TYPE (expr);
+  location_t loc = EXPR_LOCATION (expr);
+
+  if (TREE_CODE (expr) == COMPOUND_EXPR)
+    {
+      tree t = convert_to_real_1 (type, TREE_OPERAND (expr, 1), fold_p);
+      if (t == TREE_OPERAND (expr, 1))
+	return expr;
+      return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
+			 TREE_OPERAND (expr, 0), t);
+    }    
 
   /* Disable until we figure out how to decide whether the functions are
      present in runtime.  */
@@ -177,6 +167,7 @@
 	       -fmath-errno.  */
 	    if (flag_errno_math)
 	      break;
+	    gcc_fallthrough ();
 	  CASE_MATHFN (ACOS)
 	  CASE_MATHFN (ACOSH)
 	  CASE_MATHFN (ASIN)
@@ -187,16 +178,20 @@
  	  CASE_MATHFN (COS)
  	  CASE_MATHFN (ERF)
  	  CASE_MATHFN (ERFC)
- 	  CASE_MATHFN (FABS)
 	  CASE_MATHFN (LOG)
 	  CASE_MATHFN (LOG10)
 	  CASE_MATHFN (LOG2)
  	  CASE_MATHFN (LOG1P)
- 	  CASE_MATHFN (LOGB)
  	  CASE_MATHFN (SIN)
-	  CASE_MATHFN (SQRT)
  	  CASE_MATHFN (TAN)
  	  CASE_MATHFN (TANH)
+	    /* The above functions are not safe to do this conversion.  */
+	    if (!flag_unsafe_math_optimizations)
+	      break;
+	    gcc_fallthrough ();
+	  CASE_MATHFN (SQRT)
+	  CASE_MATHFN (FABS)
+	  CASE_MATHFN (LOGB)
 #undef CASE_MATHFN
 	    {
 	      tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
@@ -207,73 +202,75 @@
 	      if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
 		newtype = TREE_TYPE (arg0);
 
+	      /* We consider to convert
+
+		     (T1) sqrtT2 ((T2) exprT3)
+		 to
+		     (T1) sqrtT4 ((T4) exprT3)
+
+		  , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
+		 and T4 is NEWTYPE.  All those types are of floating point types.
+		 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
+		 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
+		 T2 and T4.  See the following URL for a reference:
+		 http://stackoverflow.com/questions/9235456/determining-
+                 floating-point-square-root
+		 */
+	      if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
+		  && !flag_unsafe_math_optimizations)
+		{
+		  /* The following conversion is unsafe even the precision condition
+		     below is satisfied:
+
+		     (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
+		    */
+		  if (TYPE_MODE (type) != TYPE_MODE (newtype))
+		    break;
+
+		  int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
+		  int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
+		  if (p1 < p2 * 2 + 2)
+		    break;
+		}
+
 	      /* Be careful about integer to fp conversions.
 		 These may overflow still.  */
 	      if (FLOAT_TYPE_P (TREE_TYPE (arg0))
 		  && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
 		  && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
 		      || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
-	        {
+		{
 		  tree fn = mathfn_built_in (newtype, fcode);
-
 		  if (fn)
-		  {
-		    tree arg = fold (convert_to_real (newtype, arg0));
-		    expr = build_call_expr (fn, 1, arg);
-		    if (newtype == type)
-		      return expr;
-		  }
+		    {
+		      tree arg = convert_to_real_1 (newtype, arg0, fold_p);
+		      expr = build_call_expr (fn, 1, arg);
+		      if (newtype == type)
+			return expr;
+		    }
 		}
 	    }
 	default:
 	  break;
 	}
     }
-  if (optimize
-      && (((fcode == BUILT_IN_FLOORL
-	   || fcode == BUILT_IN_CEILL
-	   || fcode == BUILT_IN_ROUNDL
-	   || fcode == BUILT_IN_RINTL
-	   || fcode == BUILT_IN_TRUNCL
-	   || fcode == BUILT_IN_NEARBYINTL)
-	  && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
-	      || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
-	  || ((fcode == BUILT_IN_FLOOR
-	       || fcode == BUILT_IN_CEIL
-	       || fcode == BUILT_IN_ROUND
-	       || fcode == BUILT_IN_RINT
-	       || fcode == BUILT_IN_TRUNC
-	       || fcode == BUILT_IN_NEARBYINT)
-	      && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
-    {
-      tree fn = mathfn_built_in (type, fcode);
-
-      if (fn)
-	{
-	  tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
-
-	  /* Make sure (type)arg0 is an extension, otherwise we could end up
-	     changing (float)floor(double d) into floorf((float)d), which is
-	     incorrect because (float)d uses round-to-nearest and can round
-	     up to the next integer.  */
-	  if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
-	    return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
-	}
-    }
 
   /* Propagate the cast into the operation.  */
   if (itype != type && FLOAT_TYPE_P (type))
     switch (TREE_CODE (expr))
       {
 	/* Convert (float)-x into -(float)x.  This is safe for
-	   round-to-nearest rounding mode.  */
+	   round-to-nearest rounding mode when the inner type is float.  */
 	case ABS_EXPR:
 	case NEGATE_EXPR:
 	  if (!flag_rounding_math
-	      && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (expr)))
-	    return build1 (TREE_CODE (expr), type,
-			   fold (convert_to_real (type,
-						  TREE_OPERAND (expr, 0))));
+	      && FLOAT_TYPE_P (itype)
+	      && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
+	    {
+	      tree arg = convert_to_real_1 (type, TREE_OPERAND (expr, 0),
+					    fold_p);
+	      return build1 (TREE_CODE (expr), type, arg);
+	    }
 	  break;
 	/* Convert (outertype)((innertype0)a+(innertype1)b)
 	   into ((newtype)a+(newtype)b) where newtype
@@ -309,8 +306,10 @@
 		      || newtype == dfloat128_type_node)
 		    {
 		      expr = build2 (TREE_CODE (expr), newtype,
-				     fold (convert_to_real (newtype, arg0)),
-				     fold (convert_to_real (newtype, arg1)));
+				     convert_to_real_1 (newtype, arg0,
+							fold_p),
+				     convert_to_real_1 (newtype, arg1,
+							fold_p));
 		      if (newtype == type)
 			return expr;
 		      break;
@@ -349,8 +348,10 @@
 			      && !excess_precision_type (newtype))))
 		    {
 		      expr = build2 (TREE_CODE (expr), newtype,
-				     fold (convert_to_real (newtype, arg0)),
-				     fold (convert_to_real (newtype, arg1)));
+				     convert_to_real_1 (newtype, arg0,
+							fold_p),
+				     convert_to_real_1 (newtype, arg1,
+							fold_p));
 		      if (newtype == type)
 			return expr;
 		    }
@@ -366,10 +367,11 @@
     case REAL_TYPE:
       /* Ignore the conversion if we don't need to store intermediate
 	 results and neither type is a decimal float.  */
-      return build1 ((flag_float_store
-		     || DECIMAL_FLOAT_TYPE_P (type)
-		     || DECIMAL_FLOAT_TYPE_P (itype))
-		     ? CONVERT_EXPR : NOP_EXPR, type, expr);
+      return build1_loc (loc,
+			 (flag_float_store
+			  || DECIMAL_FLOAT_TYPE_P (type)
+			  || DECIMAL_FLOAT_TYPE_P (itype))
+			 ? CONVERT_EXPR : NOP_EXPR, type, expr);
 
     case INTEGER_TYPE:
     case ENUMERAL_TYPE:
@@ -381,35 +383,141 @@
 
     case COMPLEX_TYPE:
       return convert (type,
-		      fold_build1 (REALPART_EXPR,
-				   TREE_TYPE (TREE_TYPE (expr)), expr));
+		      maybe_fold_build1_loc (fold_p, loc, REALPART_EXPR,
+					     TREE_TYPE (TREE_TYPE (expr)),
+					     expr));
 
     case POINTER_TYPE:
     case REFERENCE_TYPE:
       error ("pointer value used where a floating point value was expected");
-      return convert_to_real (type, integer_zero_node);
+      return convert_to_real_1 (type, integer_zero_node, fold_p);
 
     default:
       error ("aggregate value used where a float was expected");
-      return convert_to_real (type, integer_zero_node);
+      return convert_to_real_1 (type, integer_zero_node, fold_p);
     }
 }
 
+/* A wrapper around convert_to_real_1 that always folds the
+   expression.  */
+
+tree
+convert_to_real (tree type, tree expr)
+{
+  return convert_to_real_1 (type, expr, true);
+}
+
+/* A wrapper around convert_to_real_1 that only folds the
+   expression if DOFOLD, or if it is CONSTANT_CLASS_P.  */
+
+tree
+convert_to_real_maybe_fold (tree type, tree expr, bool dofold)
+{
+  return convert_to_real_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
+}
+
+/* Try to narrow EX_FORM ARG0 ARG1 in narrowed arg types producing a
+   result in TYPE.  */
+
+static tree
+do_narrow (location_t loc,
+	   enum tree_code ex_form, tree type, tree arg0, tree arg1,
+	   tree expr, unsigned inprec, unsigned outprec, bool dofold)
+{
+  /* Do the arithmetic in type TYPEX,
+     then convert result to TYPE.  */
+  tree typex = type;
+
+  /* Can't do arithmetic in enumeral types
+     so use an integer type that will hold the values.  */
+  if (TREE_CODE (typex) == ENUMERAL_TYPE)
+    typex = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
+					    TYPE_UNSIGNED (typex));
+
+  /* The type demotion below might cause doing unsigned arithmetic
+     instead of signed, and thus hide overflow bugs.  */
+  if ((ex_form == PLUS_EXPR || ex_form == MINUS_EXPR)
+      && !TYPE_UNSIGNED (typex)
+      && sanitize_flags_p (SANITIZE_SI_OVERFLOW))
+    return NULL_TREE;
+
+  /* But now perhaps TYPEX is as wide as INPREC.
+     In that case, do nothing special here.
+     (Otherwise would recurse infinitely in convert.  */
+  if (TYPE_PRECISION (typex) != inprec)
+    {
+      /* Don't do unsigned arithmetic where signed was wanted,
+	 or vice versa.
+	 Exception: if both of the original operands were
+	 unsigned then we can safely do the work as unsigned.
+	 Exception: shift operations take their type solely
+	 from the first argument.
+	 Exception: the LSHIFT_EXPR case above requires that
+	 we perform this operation unsigned lest we produce
+	 signed-overflow undefinedness.
+	 And we may need to do it as unsigned
+	 if we truncate to the original size.  */
+      if (TYPE_UNSIGNED (TREE_TYPE (expr))
+	  || (TYPE_UNSIGNED (TREE_TYPE (arg0))
+	      && (TYPE_UNSIGNED (TREE_TYPE (arg1))
+		  || ex_form == LSHIFT_EXPR
+		  || ex_form == RSHIFT_EXPR
+		  || ex_form == LROTATE_EXPR
+		  || ex_form == RROTATE_EXPR))
+	  || ex_form == LSHIFT_EXPR
+	  /* If we have !flag_wrapv, and either ARG0 or
+	     ARG1 is of a signed type, we have to do
+	     PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
+	     type in case the operation in outprec precision
+	     could overflow.  Otherwise, we would introduce
+	     signed-overflow undefinedness.  */
+	  || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
+	       || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
+	      && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
+		   > outprec)
+		  || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
+		      > outprec))
+	      && (ex_form == PLUS_EXPR
+		  || ex_form == MINUS_EXPR
+		  || ex_form == MULT_EXPR)))
+	{
+	  if (!TYPE_UNSIGNED (typex))
+	    typex = unsigned_type_for (typex);
+	}
+      else
+	{
+	  if (TYPE_UNSIGNED (typex))
+	    typex = signed_type_for (typex);
+	}
+      /* We should do away with all this once we have a proper
+	 type promotion/demotion pass, see PR45397.  */
+      expr = maybe_fold_build2_loc (dofold, loc, ex_form, typex,
+				    convert (typex, arg0),
+				    convert (typex, arg1));
+      return convert (type, expr);
+    }
+  
+  return NULL_TREE;
+}
+
 /* Convert EXPR to some integer (or enum) type TYPE.
 
    EXPR must be pointer, integer, discrete (enum, char, or bool), float,
    fixed-point or vector; in other cases error is called.
 
+   If DOFOLD is TRUE, we try to simplify newly-created patterns by folding.
+
    The result of this is always supposed to be a newly created tree node
    not in use in any existing structure.  */
 
-tree
-convert_to_integer (tree type, tree expr)
+static tree
+convert_to_integer_1 (tree type, tree expr, bool dofold)
 {
   enum tree_code ex_form = TREE_CODE (expr);
   tree intype = TREE_TYPE (expr);
-  unsigned int inprec = TYPE_PRECISION (intype);
-  unsigned int outprec = TYPE_PRECISION (type);
+  unsigned int inprec = element_precision (intype);
+  unsigned int outprec = element_precision (type);
+  location_t loc = EXPR_LOCATION (expr);
 
   /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
      be.  Consider `enum E = { a, b = (enum E) 3 };'.  */
@@ -419,6 +527,15 @@
       return error_mark_node;
     }
 
+  if (ex_form == COMPOUND_EXPR)
+    {
+      tree t = convert_to_integer_1 (type, TREE_OPERAND (expr, 1), dofold);
+      if (t == TREE_OPERAND (expr, 1))
+	return expr;
+      return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
+			 TREE_OPERAND (expr, 0), t);
+    }    
+
   /* Convert e.g. (long)round(d) -> lround(d).  */
   /* If we're converting to char, we may encounter differing behavior
      between converting from double->char vs double->long->char.
@@ -438,11 +555,14 @@
         {
 	CASE_FLT_FN (BUILT_IN_CEIL):
 	  /* Only convert in ISO C99 mode.  */
-	  if (!TARGET_C99_FUNCTIONS)
+	  if (!targetm.libc_has_function (function_c99_misc))
 	    break;
-	  if (outprec < TYPE_PRECISION (long_integer_type_node)
-	      || (outprec == TYPE_PRECISION (long_integer_type_node)
+	  if (outprec < TYPE_PRECISION (integer_type_node)
+	      || (outprec == TYPE_PRECISION (integer_type_node)
 		  && !TYPE_UNSIGNED (type)))
+	    fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
+	  else if (outprec == TYPE_PRECISION (long_integer_type_node)
+		   && !TYPE_UNSIGNED (type))
 	    fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
 	  else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
 		   && !TYPE_UNSIGNED (type))
@@ -451,11 +571,14 @@
 
 	CASE_FLT_FN (BUILT_IN_FLOOR):
 	  /* Only convert in ISO C99 mode.  */
-	  if (!TARGET_C99_FUNCTIONS)
+	  if (!targetm.libc_has_function (function_c99_misc))
 	    break;
-	  if (outprec < TYPE_PRECISION (long_integer_type_node)
-	      || (outprec == TYPE_PRECISION (long_integer_type_node)
+	  if (outprec < TYPE_PRECISION (integer_type_node)
+	      || (outprec == TYPE_PRECISION (integer_type_node)
 		  && !TYPE_UNSIGNED (type)))
+	    fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
+	  else if (outprec == TYPE_PRECISION (long_integer_type_node)
+		   && !TYPE_UNSIGNED (type))
 	    fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
 	  else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
 		   && !TYPE_UNSIGNED (type))
@@ -463,9 +586,15 @@
 	  break;
 
 	CASE_FLT_FN (BUILT_IN_ROUND):
-	  if (outprec < TYPE_PRECISION (long_integer_type_node)
-	      || (outprec == TYPE_PRECISION (long_integer_type_node)
+	  /* Only convert in ISO C99 mode and with -fno-math-errno.  */
+	  if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
+	    break;
+	  if (outprec < TYPE_PRECISION (integer_type_node)
+	      || (outprec == TYPE_PRECISION (integer_type_node)
 		  && !TYPE_UNSIGNED (type)))
+	    fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
+	  else if (outprec == TYPE_PRECISION (long_integer_type_node)
+		   && !TYPE_UNSIGNED (type))
 	    fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
 	  else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
 		   && !TYPE_UNSIGNED (type))
@@ -476,11 +605,17 @@
 	  /* Only convert nearbyint* if we can ignore math exceptions.  */
 	  if (flag_trapping_math)
 	    break;
-	  /* ... Fall through ...  */
+	  gcc_fallthrough ();
 	CASE_FLT_FN (BUILT_IN_RINT):
-	  if (outprec < TYPE_PRECISION (long_integer_type_node)
-	      || (outprec == TYPE_PRECISION (long_integer_type_node)
+	  /* Only convert in ISO C99 mode and with -fno-math-errno.  */
+	  if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
+	    break;
+	  if (outprec < TYPE_PRECISION (integer_type_node)
+	      || (outprec == TYPE_PRECISION (integer_type_node)
 		  && !TYPE_UNSIGNED (type)))
+	    fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
+	  else if (outprec == TYPE_PRECISION (long_integer_type_node)
+		   && !TYPE_UNSIGNED (type))
 	    fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
 	  else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
 		   && !TYPE_UNSIGNED (type))
@@ -488,7 +623,7 @@
 	  break;
 
 	CASE_FLT_FN (BUILT_IN_TRUNC):
-	  return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
+	  return convert_to_integer_1 (type, CALL_EXPR_ARG (s_expr, 0), dofold);
 
 	default:
 	  break;
@@ -497,7 +632,7 @@
       if (fn)
         {
 	  tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
-	  return convert_to_integer (type, newexpr);
+	  return convert_to_integer_1 (type, newexpr, dofold);
 	}
     }
 
@@ -528,7 +663,7 @@
       if (fn)
         {
 	  tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
-	  return convert_to_integer (type, newexpr);
+	  return convert_to_integer_1 (type, newexpr, dofold);
 	}
     }
 
@@ -543,6 +678,8 @@
 	 there widen/truncate to the required type.  Some targets support the
 	 coexistence of multiple valid pointer sizes, so fetch the one we need
 	 from the type.  */
+      if (!dofold)
+	return build1 (CONVERT_EXPR, type, expr);
       expr = fold_build1 (CONVERT_EXPR,
 			  lang_hooks.types.type_for_size
 			    (TYPE_PRECISION (intype), 0),
@@ -570,7 +707,6 @@
       else if (outprec >= inprec)
 	{
 	  enum tree_code code;
-	  tree tem;
 
 	  /* If the precision of the EXPR's type is K bits and the
 	     destination mode has more bits, and the sign is changing,
@@ -582,19 +718,12 @@
 	     the signed-to-unsigned case the high-order bits have to
 	     be cleared.  */
 	  if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
-	      && (TYPE_PRECISION (TREE_TYPE (expr))
-		  != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr)))))
+	      && !type_has_mode_precision_p (TREE_TYPE (expr)))
 	    code = CONVERT_EXPR;
 	  else
 	    code = NOP_EXPR;
 
-	  tem = fold_unary (code, type, expr);
-	  if (tem)
-	    return tem;
-
-	  tem = build1 (code, type, expr);
-	  TREE_NO_WARNING (tem) = 1;
-	  return tem;
+	  return maybe_fold_build1_loc (dofold, loc, code, type, expr);
 	}
 
       /* If TYPE is an enumeral type or a type with a precision less
@@ -602,11 +731,12 @@
 	 type corresponding to its mode, then do a nop conversion
 	 to TYPE.  */
       else if (TREE_CODE (type) == ENUMERAL_TYPE
-	       || outprec != GET_MODE_BITSIZE (TYPE_MODE (type)))
-	return build1 (NOP_EXPR, type,
-		       convert (lang_hooks.types.type_for_mode
-				(TYPE_MODE (type), TYPE_UNSIGNED (type)),
-				expr));
+	       || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
+	{
+	  expr = convert (lang_hooks.types.type_for_mode
+			  (TYPE_MODE (type), TYPE_UNSIGNED (type)), expr);
+	  return maybe_fold_build1_loc (dofold, loc, NOP_EXPR, type, expr);
+	}
 
       /* Here detect when we can distribute the truncation down past some
 	 arithmetic.  For example, if adding two longs and converting to an
@@ -628,228 +758,228 @@
 	 two narrow values can be combined in their narrow type even to
 	 make a wider result--are handled by "shorten" in build_binary_op.  */
 
-      switch (ex_form)
-	{
-	case RSHIFT_EXPR:
-	  /* We can pass truncation down through right shifting
-	     when the shift count is a nonpositive constant.  */
-	  if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
-	      && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
-	    goto trunc1;
-	  break;
+      if (dofold)
+	switch (ex_form)
+	  {
+	  case RSHIFT_EXPR:
+	    /* We can pass truncation down through right shifting
+	       when the shift count is a nonpositive constant.  */
+	    if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+		&& tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
+	      goto trunc1;
+	    break;
 
-	case LSHIFT_EXPR:
-	  /* We can pass truncation down through left shifting
-	     when the shift count is a nonnegative constant and
-	     the target type is unsigned.  */
-	  if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
-	      && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
-	      && TYPE_UNSIGNED (type)
-	      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+	  case LSHIFT_EXPR:
+	    /* We can pass truncation down through left shifting
+	       when the shift count is a nonnegative constant and
+	       the target type is unsigned.  */
+	    if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
+		&& tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
+		&& TYPE_UNSIGNED (type)
+		&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+	      {
+		/* If shift count is less than the width of the truncated type,
+		   really shift.  */
+		if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
+		  /* In this case, shifting is like multiplication.  */
+		  goto trunc1;
+		else
+		  {
+		    /* If it is >= that width, result is zero.
+		       Handling this with trunc1 would give the wrong result:
+		       (int) ((long long) a << 32) is well defined (as 0)
+		       but (int) a << 32 is undefined and would get a
+		       warning.  */
+
+		    tree t = build_int_cst (type, 0);
+
+		    /* If the original expression had side-effects, we must
+		       preserve it.  */
+		    if (TREE_SIDE_EFFECTS (expr))
+		      return build2 (COMPOUND_EXPR, type, expr, t);
+		    else
+		      return t;
+		  }
+	      }
+	    break;
+
+	  case TRUNC_DIV_EXPR:
 	    {
-	      /* If shift count is less than the width of the truncated type,
-		 really shift.  */
-	      if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
-		/* In this case, shifting is like multiplication.  */
-		goto trunc1;
-	      else
+	      tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), NULL_TREE);
+	      tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), NULL_TREE);
+
+	      /* Don't distribute unless the output precision is at least as
+		 big as the actual inputs and it has the same signedness.  */
+	      if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
+		  && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
+		  /* If signedness of arg0 and arg1 don't match,
+		     we can't necessarily find a type to compare them in.  */
+		  && (TYPE_UNSIGNED (TREE_TYPE (arg0))
+		      == TYPE_UNSIGNED (TREE_TYPE (arg1)))
+		  /* Do not change the sign of the division.  */
+		  && (TYPE_UNSIGNED (TREE_TYPE (expr))
+		      == TYPE_UNSIGNED (TREE_TYPE (arg0)))
+		  /* Either require unsigned division or a division by
+		     a constant that is not -1.  */
+		  && (TYPE_UNSIGNED (TREE_TYPE (arg0))
+		      || (TREE_CODE (arg1) == INTEGER_CST
+			  && !integer_all_onesp (arg1))))
 		{
-		  /* If it is >= that width, result is zero.
-		     Handling this with trunc1 would give the wrong result:
-		     (int) ((long long) a << 32) is well defined (as 0)
-		     but (int) a << 32 is undefined and would get a
-		     warning.  */
+		  tree tem = do_narrow (loc, ex_form, type, arg0, arg1,
+					expr, inprec, outprec, dofold);
+		  if (tem)
+		    return tem;
+		}
+	      break;
+	    }
+
+	  case MAX_EXPR:
+	  case MIN_EXPR:
+	  case MULT_EXPR:
+	    {
+	      tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+	      tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+
+	      /* Don't distribute unless the output precision is at least as
+		 big as the actual inputs.  Otherwise, the comparison of the
+		 truncated values will be wrong.  */
+	      if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
+		  && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
+		  /* If signedness of arg0 and arg1 don't match,
+		     we can't necessarily find a type to compare them in.  */
+		  && (TYPE_UNSIGNED (TREE_TYPE (arg0))
+		      == TYPE_UNSIGNED (TREE_TYPE (arg1))))
+		goto trunc1;
+	      break;
+	    }
 
-		  tree t = build_int_cst (type, 0);
+	  case PLUS_EXPR:
+	  case MINUS_EXPR:
+	  case BIT_AND_EXPR:
+	  case BIT_IOR_EXPR:
+	  case BIT_XOR_EXPR:
+	  trunc1:
+	    {
+	      tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
+	      tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
 
-		  /* If the original expression had side-effects, we must
-		     preserve it.  */
-		  if (TREE_SIDE_EFFECTS (expr))
-		    return build2 (COMPOUND_EXPR, type, expr, t);
-		  else
-		    return t;
+	      /* Do not try to narrow operands of pointer subtraction;
+		 that will interfere with other folding.  */
+	      if (ex_form == MINUS_EXPR
+		  && CONVERT_EXPR_P (arg0)
+		  && CONVERT_EXPR_P (arg1)
+		  && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+		  && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
+		break;
+
+	      if (outprec >= BITS_PER_WORD
+		  || targetm.truly_noop_truncation (outprec, inprec)
+		  || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
+		  || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
+		{
+		  tree tem = do_narrow (loc, ex_form, type, arg0, arg1,
+					expr, inprec, outprec, dofold);
+		  if (tem)
+		    return tem;
 		}
 	    }
-	  break;
+	    break;
 
-	case TRUNC_DIV_EXPR:
-	  {
-	    tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
-	    tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+	  case NEGATE_EXPR:
+	    /* Using unsigned arithmetic for signed types may hide overflow
+	       bugs.  */
+	    if (!TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (expr, 0)))
+		&& sanitize_flags_p (SANITIZE_SI_OVERFLOW))
+	      break;
+	    /* Fall through.  */
+	  case BIT_NOT_EXPR:
+	    /* This is not correct for ABS_EXPR,
+	       since we must test the sign before truncation.  */
+	    {
+	      /* Do the arithmetic in type TYPEX,
+		 then convert result to TYPE.  */
+	      tree typex = type;
+
+	      /* Can't do arithmetic in enumeral types
+		 so use an integer type that will hold the values.  */
+	      if (TREE_CODE (typex) == ENUMERAL_TYPE)
+		typex
+		  = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
+						    TYPE_UNSIGNED (typex));
 
-	    /* Don't distribute unless the output precision is at least as big
-	       as the actual inputs and it has the same signedness.  */
-	    if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
-		&& outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
-		/* If signedness of arg0 and arg1 don't match,
-		   we can't necessarily find a type to compare them in.  */
-		&& (TYPE_UNSIGNED (TREE_TYPE (arg0))
-		    == TYPE_UNSIGNED (TREE_TYPE (arg1)))
-		/* Do not change the sign of the division.  */
-		&& (TYPE_UNSIGNED (TREE_TYPE (expr))
-		    == TYPE_UNSIGNED (TREE_TYPE (arg0)))
-		/* Either require unsigned division or a division by
-		   a constant that is not -1.  */
-		&& (TYPE_UNSIGNED (TREE_TYPE (arg0))
-		    || (TREE_CODE (arg1) == INTEGER_CST
-			&& !integer_all_onesp (arg1))))
-	      goto trunc1;
-	    break;
-	  }
+	      if (!TYPE_UNSIGNED (typex))
+		typex = unsigned_type_for (typex);
+	      return convert (type,
+			      fold_build1 (ex_form, typex,
+					   convert (typex,
+						    TREE_OPERAND (expr, 0))));
+	    }
 
-	case MAX_EXPR:
-	case MIN_EXPR:
-	case MULT_EXPR:
-	  {
-	    tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
-	    tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
+	  CASE_CONVERT:
+	    /* Don't introduce a "can't convert between vector values of
+	       different size" error.  */
+	    if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
+		&& (GET_MODE_SIZE (TYPE_MODE
+				   (TREE_TYPE (TREE_OPERAND (expr, 0))))
+		    != GET_MODE_SIZE (TYPE_MODE (type))))
+	      break;
+	    /* If truncating after truncating, might as well do all at once.
+	       If truncating after extending, we may get rid of wasted work.  */
+	    return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
 
-	    /* Don't distribute unless the output precision is at least as big
-	       as the actual inputs.  Otherwise, the comparison of the
-	       truncated values will be wrong.  */
-	    if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
-		&& outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
-		/* If signedness of arg0 and arg1 don't match,
-		   we can't necessarily find a type to compare them in.  */
-		&& (TYPE_UNSIGNED (TREE_TYPE (arg0))
-		    == TYPE_UNSIGNED (TREE_TYPE (arg1))))
-	      goto trunc1;
+	  case COND_EXPR:
+	    /* It is sometimes worthwhile to push the narrowing down through
+	       the conditional and never loses.  A COND_EXPR may have a throw
+	       as one operand, which then has void type.  Just leave void
+	       operands as they are.  */
+	    return
+	      fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
+			   VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
+			   ? TREE_OPERAND (expr, 1)
+			   : convert (type, TREE_OPERAND (expr, 1)),
+			   VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
+			   ? TREE_OPERAND (expr, 2)
+			   : convert (type, TREE_OPERAND (expr, 2)));
+
+	  default:
 	    break;
 	  }
 
-	case PLUS_EXPR:
-	case MINUS_EXPR:
-	case BIT_AND_EXPR:
-	case BIT_IOR_EXPR:
-	case BIT_XOR_EXPR:
-	trunc1:
-	  {
-	    tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
-	    tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
-
-	    if (outprec >= BITS_PER_WORD
-		|| TRULY_NOOP_TRUNCATION (outprec, inprec)
-		|| inprec > TYPE_PRECISION (TREE_TYPE (arg0))
-		|| inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
-	      {
-		/* Do the arithmetic in type TYPEX,
-		   then convert result to TYPE.  */
-		tree typex = type;
-
-		/* Can't do arithmetic in enumeral types
-		   so use an integer type that will hold the values.  */
-		if (TREE_CODE (typex) == ENUMERAL_TYPE)
-		  typex = lang_hooks.types.type_for_size
-		    (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
-
-		/* But now perhaps TYPEX is as wide as INPREC.
-		   In that case, do nothing special here.
-		   (Otherwise would recurse infinitely in convert.  */
-		if (TYPE_PRECISION (typex) != inprec)
-		  {
-		    /* Don't do unsigned arithmetic where signed was wanted,
-		       or vice versa.
-		       Exception: if both of the original operands were
-		       unsigned then we can safely do the work as unsigned.
-		       Exception: shift operations take their type solely
-		       from the first argument.
-		       Exception: the LSHIFT_EXPR case above requires that
-		       we perform this operation unsigned lest we produce
-		       signed-overflow undefinedness.
-		       And we may need to do it as unsigned
-		       if we truncate to the original size.  */
-		    if (TYPE_UNSIGNED (TREE_TYPE (expr))
-			|| (TYPE_UNSIGNED (TREE_TYPE (arg0))
-			    && (TYPE_UNSIGNED (TREE_TYPE (arg1))
-				|| ex_form == LSHIFT_EXPR
-				|| ex_form == RSHIFT_EXPR
-				|| ex_form == LROTATE_EXPR
-				|| ex_form == RROTATE_EXPR))
-			|| ex_form == LSHIFT_EXPR
-			/* If we have !flag_wrapv, and either ARG0 or
-			   ARG1 is of a signed type, we have to do
-			   PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
-			   type in case the operation in outprec precision
-			   could overflow.  Otherwise, we would introduce
-			   signed-overflow undefinedness.  */
-			|| ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
-			     || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
-			    && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
-				 > outprec)
-				|| (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
-				    > outprec))
-			    && (ex_form == PLUS_EXPR
-				|| ex_form == MINUS_EXPR
-				|| ex_form == MULT_EXPR)))
-		      typex = unsigned_type_for (typex);
-		    else
-		      typex = signed_type_for (typex);
-		    return convert (type,
-				    fold_build2 (ex_form, typex,
-						 convert (typex, arg0),
-						 convert (typex, arg1)));
-		  }
-	      }
-	  }
-	  break;
-
-	case NEGATE_EXPR:
-	case BIT_NOT_EXPR:
-	  /* This is not correct for ABS_EXPR,
-	     since we must test the sign before truncation.  */
-	  {
-	    tree typex = unsigned_type_for (type);
-	    return convert (type,
-			    fold_build1 (ex_form, typex,
-					 convert (typex,
-						  TREE_OPERAND (expr, 0))));
-	  }
-
-	case NOP_EXPR:
-	  /* Don't introduce a
-	     "can't convert between vector values of different size" error.  */
-	  if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
-	      && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
-		  != GET_MODE_SIZE (TYPE_MODE (type))))
-	    break;
-	  /* If truncating after truncating, might as well do all at once.
-	     If truncating after extending, we may get rid of wasted work.  */
-	  return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
-
-	case COND_EXPR:
-	  /* It is sometimes worthwhile to push the narrowing down through
-	     the conditional and never loses.  A COND_EXPR may have a throw
-	     as one operand, which then has void type.  Just leave void
-	     operands as they are.  */
-	  return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
-			      VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
-			      ? TREE_OPERAND (expr, 1)
-			      : convert (type, TREE_OPERAND (expr, 1)),
-			      VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
-			      ? TREE_OPERAND (expr, 2)
-			      : convert (type, TREE_OPERAND (expr, 2)));
-
-	default:
-	  break;
-	}
-
+      /* When parsing long initializers, we might end up with a lot of casts.
+	 Shortcut this.  */
+      if (TREE_CODE (expr) == INTEGER_CST)
+	return fold_convert (type, expr);
       return build1 (CONVERT_EXPR, type, expr);
 
     case REAL_TYPE:
-      return build1 (FIX_TRUNC_EXPR, type, expr);
+      if (sanitize_flags_p (SANITIZE_FLOAT_CAST)
+	  && current_function_decl != NULL_TREE)
+	{
+	  expr = save_expr (expr);
+	  tree check = ubsan_instrument_float_cast (loc, type, expr);
+	  expr = build1 (FIX_TRUNC_EXPR, type, expr);
+	  if (check == NULL_TREE)
+	    return expr;
+	  return maybe_fold_build2_loc (dofold, loc, COMPOUND_EXPR,
+					TREE_TYPE (expr), check, expr);
+	}
+      else
+	return build1 (FIX_TRUNC_EXPR, type, expr);
 
     case FIXED_POINT_TYPE:
       return build1 (FIXED_CONVERT_EXPR, type, expr);
 
     case COMPLEX_TYPE:
-      return convert (type,
-		      fold_build1 (REALPART_EXPR,
-				   TREE_TYPE (TREE_TYPE (expr)), expr));
+      expr = maybe_fold_build1_loc (dofold, loc, REALPART_EXPR,
+				    TREE_TYPE (TREE_TYPE (expr)), expr);
+      return convert (type, expr);
 
     case VECTOR_TYPE:
       if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
 	{
-	  error ("can%'t convert between vector values of different size");
+	  error ("can%'t convert a vector of type %qT"
+		 " to type %qT which has different size",
+		 TREE_TYPE (expr), type);
 	  return error_mark_node;
 	}
       return build1 (VIEW_CONVERT_EXPR, type, expr);
@@ -860,11 +990,36 @@
     }
 }
 
-/* Convert EXPR to the complex type TYPE in the usual ways.  */
+/* Convert EXPR to some integer (or enum) type TYPE.
+
+   EXPR must be pointer, integer, discrete (enum, char, or bool), float,
+   fixed-point or vector; in other cases error is called.
+
+   The result of this is always supposed to be a newly created tree node
+   not in use in any existing structure.  */
 
 tree
-convert_to_complex (tree type, tree expr)
+convert_to_integer (tree type, tree expr)
+{
+  return convert_to_integer_1 (type, expr, true);
+}
+
+/* A wrapper around convert_to_complex_1 that only folds the
+   expression if DOFOLD, or if it is CONSTANT_CLASS_P.  */
+
+tree
+convert_to_integer_maybe_fold (tree type, tree expr, bool dofold)
 {
+  return convert_to_integer_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
+}
+
+/* Convert EXPR to the complex type TYPE in the usual ways.  If FOLD_P is
+   true, try to fold the expression.  */
+
+static tree
+convert_to_complex_1 (tree type, tree expr, bool fold_p)
+{
+  location_t loc = EXPR_LOCATION (expr);
   tree subtype = TREE_TYPE (type);
 
   switch (TREE_CODE (TREE_TYPE (expr)))
@@ -883,37 +1038,65 @@
 
 	if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
 	  return expr;
+	else if (TREE_CODE (expr) == COMPOUND_EXPR)
+	  {
+	    tree t = convert_to_complex_1 (type, TREE_OPERAND (expr, 1),
+					   fold_p);
+	    if (t == TREE_OPERAND (expr, 1))
+	      return expr;
+	    return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR,
+			       TREE_TYPE (t), TREE_OPERAND (expr, 0), t);
+	  }
 	else if (TREE_CODE (expr) == COMPLEX_EXPR)
-	  return fold_build2 (COMPLEX_EXPR, type,
-			      convert (subtype, TREE_OPERAND (expr, 0)),
-			      convert (subtype, TREE_OPERAND (expr, 1)));
+	  return maybe_fold_build2_loc (fold_p, loc, COMPLEX_EXPR, type,
+					convert (subtype,
+						 TREE_OPERAND (expr, 0)),
+					convert (subtype,
+						 TREE_OPERAND (expr, 1)));
 	else
 	  {
 	    expr = save_expr (expr);
-	    return
-	      fold_build2 (COMPLEX_EXPR, type,
-			   convert (subtype,
-				    fold_build1 (REALPART_EXPR,
-						 TREE_TYPE (TREE_TYPE (expr)),
-						 expr)),
-			   convert (subtype,
-				    fold_build1 (IMAGPART_EXPR,
-						 TREE_TYPE (TREE_TYPE (expr)),
-						 expr)));
+	    tree realp = maybe_fold_build1_loc (fold_p, loc, REALPART_EXPR,
+						TREE_TYPE (TREE_TYPE (expr)),
+						expr);
+	    tree imagp = maybe_fold_build1_loc (fold_p, loc, IMAGPART_EXPR,
+						TREE_TYPE (TREE_TYPE (expr)),
+						expr);
+	    return maybe_fold_build2_loc (fold_p, loc, COMPLEX_EXPR, type,
+					  convert (subtype, realp),
+					  convert (subtype, imagp));
 	  }
       }
 
     case POINTER_TYPE:
     case REFERENCE_TYPE:
       error ("pointer value used where a complex was expected");
-      return convert_to_complex (type, integer_zero_node);
+      return convert_to_complex_1 (type, integer_zero_node, fold_p);
 
     default:
       error ("aggregate value used where a complex was expected");
-      return convert_to_complex (type, integer_zero_node);
+      return convert_to_complex_1 (type, integer_zero_node, fold_p);
     }
 }
 
+/* A wrapper around convert_to_complex_1 that always folds the
+   expression.  */
+
+tree
+convert_to_complex (tree type, tree expr)
+{
+  return convert_to_complex_1 (type, expr, true);
+}
+
+/* A wrapper around convert_to_complex_1 that only folds the
+   expression if DOFOLD, or if it is CONSTANT_CLASS_P.  */
+
+tree
+convert_to_complex_maybe_fold (tree type, tree expr, bool dofold)
+{
+  return convert_to_complex_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
+}
+
 /* Convert EXPR to the vector type TYPE in the usual ways.  */
 
 tree
@@ -925,7 +1108,9 @@
     case VECTOR_TYPE:
       if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
 	{
-	  error ("can%'t convert between vector values of different size");
+	  error ("can%'t convert a value of type %qT"
+		 " to vector type %qT which has different size",
+		 TREE_TYPE (expr), type);
 	  return error_mark_node;
 	}
       return build1 (VIEW_CONVERT_EXPR, type, expr);