diff gcc/config/s390/vx-builtins.md @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents
children 84e7813d76e9
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/gcc/config/s390/vx-builtins.md	Fri Oct 27 22:46:09 2017 +0900
@@ -0,0 +1,2064 @@
+;;- Instruction patterns for the System z vector facility builtins.
+;;  Copyright (C) 2015-2017 Free Software Foundation, Inc.
+;;  Contributed by Andreas Krebbel (Andreas.Krebbel@de.ibm.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+;; for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3.  If not see
+;; <http://www.gnu.org/licenses/>.
+
+; The patterns in this file are enabled with -mzvector
+
+(define_mode_iterator V_HW_32_64 [V4SI V2DI V2DF (V4SF "TARGET_VXE")])
+(define_mode_iterator VI_HW_SD [V4SI V2DI])
+(define_mode_iterator V_HW_HSD [V8HI V4SI V2DI V2DF])
+(define_mode_iterator V_HW_4 [V4SI V4SF])
+; Full size vector modes with more than one element which are directly supported in vector registers by the hardware.
+(define_mode_iterator VEC_HW  [V16QI V8HI V4SI V2DI V2DF (V4SF "TARGET_VXE")])
+(define_mode_iterator VECF_HW [(V4SF "TARGET_VXE") V2DF])
+
+; The element type of the vector with floating point modes translated
+; to int modes of the same size.
+(define_mode_attr non_vec_int[(V1QI "QI") (V2QI "QI") (V4QI "QI") (V8QI "QI") (V16QI "QI")
+			      (V1HI "HI") (V2HI "HI") (V4HI "HI") (V8HI "HI")
+			      (V1SI "SI") (V2SI "SI") (V4SI "SI")
+			      (V1DI "DI") (V2DI "DI")
+			      (V1SF "SI") (V2SF "SI") (V4SF "SI")
+			      (V1DF "DI") (V2DF "DI")])
+
+; Condition code modes generated by int comparisons
+(define_mode_iterator VICMP [CCVEQ CCVIH CCVIHU])
+
+; Comparisons supported by the vec_cmp* builtins
+(define_code_iterator intcmp [eq gt gtu ge geu lt ltu le leu])
+(define_code_iterator fpcmp  [eq gt ge lt le])
+
+; Comparisons supported by the vec_all/any* builtins
+(define_code_iterator intcmpcc [eq ne gt ge lt le gtu geu ltu leu])
+(define_code_iterator fpcmpcc  [eq ne gt ge unle unlt lt le])
+
+; Flags for vector string instructions (vfae all 4, vfee only ZS and CS, vstrc all 4)
+(define_constants
+  [(VSTRING_FLAG_IN         8)   ; invert result
+   (VSTRING_FLAG_RT         4)   ; result type
+   (VSTRING_FLAG_ZS         2)   ; zero search
+   (VSTRING_FLAG_CS         1)]) ; condition code set
+
+; Rounding modes as being used for e.g. VFI
+(define_constants
+  [(VEC_RND_CURRENT                0)
+   (VEC_RND_NEAREST_AWAY_FROM_ZERO 1)
+   (VEC_RND_SHORT_PREC             3)
+   (VEC_RND_NEAREST_TO_EVEN        4)
+   (VEC_RND_TO_ZERO                5)
+   (VEC_RND_TO_INF                 6)
+   (VEC_RND_TO_MINF                7)])
+
+; Inexact suppression facility flag as being used for e.g. VFI
+(define_constants
+  [(VEC_INEXACT                0)
+   (VEC_NOINEXACT              4)])
+
+
+; Vector gather element
+
+; vgef, vgeg
+(define_insn "vec_gather_element<mode>"
+  [(set (match_operand:V_HW_32_64                     0 "register_operand"  "=v")
+	(unspec:V_HW_32_64 [(match_operand:V_HW_32_64 1 "register_operand"   "0")
+			    (match_operand:<tointvec> 2 "register_operand"   "v")
+			    (match_operand:BLK        3 "memory_operand"     "R")
+			    (match_operand:QI         4 "const_mask_operand" "C")]
+			   UNSPEC_VEC_GATHER))]
+  "TARGET_VX && UINTVAL (operands[4]) < GET_MODE_NUNITS (<V_HW_32_64:MODE>mode)"
+  "vge<bhfgq>\t%0,%O3(%v2,%R3),%b4"
+  [(set_attr "op_type" "VRV")])
+
+(define_expand "vec_genmask<mode>"
+  [(match_operand:VI_HW 0 "register_operand" "=v")
+   (match_operand:QI    1 "const_int_operand" "C")
+   (match_operand:QI    2 "const_int_operand" "C")]
+  "TARGET_VX"
+{
+  int nunits = GET_MODE_NUNITS (<VI_HW:MODE>mode);
+  int bitlen = GET_MODE_UNIT_BITSIZE (<VI_HW:MODE>mode);
+  /* To bit little endian style.  */
+  int end = bitlen - 1 - INTVAL (operands[1]);
+  int start = bitlen - 1 - INTVAL (operands[2]);
+  rtx const_vec[16];
+  int i;
+  unsigned HOST_WIDE_INT mask;
+  bool swapped_p = false;
+
+  if (start > end)
+    {
+      i = start - 1; start = end + 1; end = i;
+      swapped_p = true;
+    }
+  if (end == 63)
+    mask = HOST_WIDE_INT_M1U;
+  else
+    mask = (HOST_WIDE_INT_1U << (end + 1)) - 1;
+
+  mask &= ~((HOST_WIDE_INT_1U << start) - 1);
+
+  if (swapped_p)
+    mask = ~mask;
+
+  for (i = 0; i < nunits; i++)
+    const_vec[i] = GEN_INT (trunc_int_for_mode (mask,
+			      GET_MODE_INNER (<VI_HW:MODE>mode)));
+
+  emit_insn (gen_rtx_SET (operands[0],
+			  gen_rtx_CONST_VECTOR (<VI_HW:MODE>mode,
+						gen_rtvec_v (nunits, const_vec))));
+  DONE;
+})
+
+(define_expand "vec_genbytemaskv16qi"
+  [(match_operand:V16QI 0 "register_operand"  "")
+   (match_operand:HI    1 "const_int_operand" "")]
+  "TARGET_VX"
+{
+  int i;
+  unsigned mask = 0x8000;
+  rtx const_vec[16];
+  unsigned HOST_WIDE_INT byte_mask = UINTVAL (operands[1]);
+
+  for (i = 0; i < 16; i++)
+    {
+      if (mask & byte_mask)
+	const_vec[i] = constm1_rtx;
+      else
+	const_vec[i] = const0_rtx;
+      mask = mask >> 1;
+    }
+  emit_insn (gen_rtx_SET (operands[0],
+			  gen_rtx_CONST_VECTOR (V16QImode,
+						gen_rtvec_v (16, const_vec))));
+  DONE;
+})
+
+(define_expand "vec_splats<mode>"
+  [(set (match_operand:VEC_HW                          0 "register_operand" "")
+	(vec_duplicate:VEC_HW (match_operand:<non_vec> 1 "general_operand"  "")))]
+  "TARGET_VX")
+
+(define_expand "vec_insert<mode>"
+  [(set (match_operand:VEC_HW                    0 "register_operand" "")
+	(unspec:VEC_HW [(match_operand:<non_vec> 2 "register_operand" "")
+			(match_operand:SI        3 "nonmemory_operand" "")
+			(match_operand:VEC_HW    1 "register_operand" "")]
+		       UNSPEC_VEC_SET))]
+  "TARGET_VX"
+  "")
+
+; This is vec_set + modulo arithmetic on the element selector (op 2)
+(define_expand "vec_promote<mode>"
+  [(set (match_operand:VEC_HW                    0 "register_operand" "")
+	(unspec:VEC_HW [(match_operand:<non_vec> 1 "register_operand" "")
+			(match_operand:SI        2 "nonmemory_operand" "")
+			(match_dup 0)]
+		       UNSPEC_VEC_SET))]
+  "TARGET_VX"
+  "")
+
+; vec_extract is also an RTL standard name -> vector.md
+
+; vllezb, vllezh, vllezf, vllezg
+(define_insn "vec_insert_and_zero<mode>"
+  [(set (match_operand:VEC_HW                    0 "register_operand" "=v")
+	(unspec:VEC_HW [(match_operand:<non_vec> 1 "memory_operand"    "R")]
+		       UNSPEC_VEC_INSERT_AND_ZERO))]
+  "TARGET_VX"
+  "vllez<bhfgq>\t%v0,%1"
+  [(set_attr "op_type" "VRX")])
+
+(define_insn "vlbb"
+  [(set (match_operand:V16QI              0 "register_operand"   "=v")
+	(unspec:V16QI [(match_operand:BLK 1 "memory_operand"      "R")
+		       (match_operand:QI  2 "const_mask_operand"  "C")]
+		      UNSPEC_VEC_LOAD_BNDRY))]
+  "TARGET_VX && UINTVAL (operands[2]) < 7"
+  "vlbb\t%v0,%1,%2"
+  [(set_attr "op_type" "VRX")])
+
+(define_insn "vlrlrv16qi"
+  [(set (match_operand:V16QI              0 "register_operand"  "=v,v")
+	(unspec:V16QI [(match_operand:BLK 2 "memory_operand"     "Q,Q")
+		       (match_operand:SI  1 "nonmemory_operand"  "d,C")]
+		      UNSPEC_VEC_LOAD_LEN_R))]
+  "TARGET_VXE"
+  "@
+   vlrlr\t%v0,%1,%2
+   vlrl\t%v0,%2,%1"
+  [(set_attr "op_type" "VRS,VSI")])
+
+
+; FIXME: The following two patterns might using vec_merge. But what is
+; the canonical form: (vec_select (vec_merge op0 op1)) or (vec_merge
+; (vec_select op0) (vec_select op1)
+; vmrhb, vmrhh, vmrhf, vmrhg
+(define_insn "vec_mergeh<mode>"
+  [(set (match_operand:V_128_NOSINGLE                         0 "register_operand" "=v")
+	(unspec:V_128_NOSINGLE [(match_operand:V_128_NOSINGLE 1 "register_operand"  "v")
+			(match_operand:V_128_NOSINGLE         2 "register_operand"  "v")]
+		       UNSPEC_VEC_MERGEH))]
+  "TARGET_VX"
+  "vmrh<bhfgq>\t%v0,%1,%2"
+  [(set_attr "op_type" "VRR")])
+
+; vmrlb, vmrlh, vmrlf, vmrlg
+(define_insn "vec_mergel<mode>"
+  [(set (match_operand:V_128_NOSINGLE                         0 "register_operand" "=v")
+	(unspec:V_128_NOSINGLE [(match_operand:V_128_NOSINGLE 1 "register_operand"  "v")
+			(match_operand:V_128_NOSINGLE         2 "register_operand"  "v")]
+		     UNSPEC_VEC_MERGEL))]
+  "TARGET_VX"
+  "vmrl<bhfgq>\t%v0,%1,%2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector pack
+
+; vpkh, vpkf, vpkg
+(define_insn "vec_pack<mode>"
+  [(set (match_operand:<vec_half>                    0 "register_operand" "=v")
+	(unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand"  "v")
+			    (match_operand:VI_HW_HSD 2 "register_operand"  "v")]
+			   UNSPEC_VEC_PACK))]
+  "TARGET_VX"
+  "vpk<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector pack saturate
+
+; vpksh, vpksf, vpksg
+(define_insn "vec_packs<mode>"
+  [(set (match_operand:<vec_half>                    0 "register_operand" "=v")
+	(unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand"  "v")
+			    (match_operand:VI_HW_HSD 2 "register_operand"  "v")]
+			   UNSPEC_VEC_PACK_SATURATE))]
+  "TARGET_VX"
+  "vpks<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; This is vec_packs_cc + loading cc into a caller specified memory location.
+(define_expand "vec_packs_cc<mode>"
+  [(parallel
+    [(set (reg:CCRAW CC_REGNUM)
+	  (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "")
+			 (match_operand:VI_HW_HSD 2 "register_operand" "")]
+			UNSPEC_VEC_PACK_SATURATE_GENCC))
+     (set (match_operand:<vec_half> 0 "register_operand" "")
+	  (unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+			     UNSPEC_VEC_PACK_SATURATE_CC))])
+   (set (match_dup 4)
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))
+   (set (match_operand:SI 3 "memory_operand" "")
+	(match_dup 4))]
+  "TARGET_VX"
+{
+  operands[4] = gen_reg_rtx (SImode);
+})
+
+; vpksh, vpksf, vpksg
+(define_insn "*vec_packs_cc<mode>"
+  [(set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+		       (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+		      UNSPEC_VEC_PACK_SATURATE_GENCC))
+   (set (match_operand:<vec_half> 0 "register_operand" "=v")
+	(unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+			   UNSPEC_VEC_PACK_SATURATE_CC))]
+  "TARGET_VX"
+  "vpks<bhfgq>s\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector pack logical saturate
+
+; vpklsh, vpklsf, vpklsg
+(define_insn "vec_packsu<mode>"
+  [(set (match_operand:<vec_half>                    0 "register_operand" "=v")
+	(unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand"  "v")
+			    (match_operand:VI_HW_HSD 2 "register_operand"  "v")]
+			   UNSPEC_VEC_PACK_UNSIGNED_SATURATE))]
+  "TARGET_VX"
+  "vpkls<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; Emulate saturate unsigned pack on signed operands.
+; Zero out negative elements and continue with the unsigned saturating pack.
+(define_expand "vec_packsu_u<mode>"
+  [(set (match_operand:<vec_half>                    0 "register_operand" "=v")
+	(unspec:<vec_half> [(match_operand:VI_HW_HSD 1 "register_operand"  "v")
+			    (match_operand:VI_HW_HSD 2 "register_operand"  "v")]
+			   UNSPEC_VEC_PACK_UNSIGNED_SATURATE))]
+  "TARGET_VX"
+{
+   rtx null_vec = CONST0_RTX(<MODE>mode);
+   machine_mode half_mode;
+   switch (<MODE>mode)
+   {
+     case E_V8HImode: half_mode = V16QImode; break;
+     case E_V4SImode: half_mode = V8HImode; break;
+     case E_V2DImode: half_mode = V4SImode; break;
+     default: gcc_unreachable ();
+   }
+   s390_expand_vcond (operands[1], operands[1], null_vec,
+		      GE, operands[1], null_vec);
+   s390_expand_vcond (operands[2], operands[2], null_vec,
+		      GE, operands[2], null_vec);
+   emit_insn (gen_rtx_SET (operands[0],
+			   gen_rtx_UNSPEC (half_mode,
+					   gen_rtvec (2, operands[1], operands[2]),
+					   UNSPEC_VEC_PACK_UNSIGNED_SATURATE)));
+   DONE;
+})
+
+; This is vec_packsu_cc + loading cc into a caller specified memory location.
+; FIXME: The reg to target mem copy should be issued by reload?!
+(define_expand "vec_packsu_cc<mode>"
+  [(parallel
+    [(set (reg:CCRAW CC_REGNUM)
+	  (unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "")
+			 (match_operand:VI_HW_HSD 2 "register_operand" "")]
+			UNSPEC_VEC_PACK_UNSIGNED_SATURATE_GENCC))
+     (set (match_operand:<vec_half> 0 "register_operand" "")
+	  (unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+			     UNSPEC_VEC_PACK_UNSIGNED_SATURATE_CC))])
+   (set (match_dup 4)
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))
+   (set (match_operand:SI 3 "memory_operand" "")
+	(match_dup 4))]
+  "TARGET_VX"
+{
+  operands[4] = gen_reg_rtx (SImode);
+})
+
+; vpklsh, vpklsf, vpklsg
+(define_insn "*vec_packsu_cc<mode>"
+  [(set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_operand:VI_HW_HSD 1 "register_operand" "v")
+		       (match_operand:VI_HW_HSD 2 "register_operand" "v")]
+		      UNSPEC_VEC_PACK_UNSIGNED_SATURATE_GENCC))
+   (set (match_operand:<vec_half> 0 "register_operand" "=v")
+	(unspec:<vec_half> [(match_dup 1) (match_dup 2)]
+			   UNSPEC_VEC_PACK_UNSIGNED_SATURATE_CC))]
+  "TARGET_VX"
+  "vpkls<bhfgq>s\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector permute
+
+; vec_perm is also RTL standard name, but we can only use it for V16QI
+
+(define_insn "vec_zperm<mode>"
+  [(set (match_operand:V_HW_HSD                   0 "register_operand" "=v")
+	(unspec:V_HW_HSD [(match_operand:V_HW_HSD 1 "register_operand"  "v")
+			  (match_operand:V_HW_HSD 2 "register_operand"  "v")
+			  (match_operand:V16QI    3 "register_operand"  "v")]
+			 UNSPEC_VEC_PERM))]
+  "TARGET_VX"
+  "vperm\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_permi<mode>"
+  [(set (match_operand:V_HW_64                  0 "register_operand"   "")
+	(unspec:V_HW_64 [(match_operand:V_HW_64 1 "register_operand"   "")
+			 (match_operand:V_HW_64 2 "register_operand"   "")
+			 (match_operand:QI      3 "const_mask_operand" "")]
+			UNSPEC_VEC_PERMI))]
+  "TARGET_VX"
+{
+  HOST_WIDE_INT val = INTVAL (operands[3]);
+  operands[3] = GEN_INT ((val & 1) | (val & 2) << 1);
+})
+
+(define_insn "*vec_permi<mode>"
+  [(set (match_operand:V_HW_64                  0 "register_operand"  "=v")
+	(unspec:V_HW_64 [(match_operand:V_HW_64 1 "register_operand"   "v")
+			 (match_operand:V_HW_64 2 "register_operand"   "v")
+			 (match_operand:QI      3 "const_mask_operand" "C")]
+			UNSPEC_VEC_PERMI))]
+  "TARGET_VX && (UINTVAL (operands[3]) & 10) == 0"
+  "vpdi\t%v0,%v1,%v2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector replicate
+
+
+; Replicate from vector element
+(define_expand "vec_splat<mode>"
+  [(set (match_operand:V_HW                      0 "register_operand"  "")
+	(vec_duplicate:V_HW (vec_select:<non_vec>
+			     (match_operand:V_HW 1 "register_operand"  "")
+			     (parallel
+			      [(match_operand:QI 2 "const_mask_operand" "")]))))]
+  "TARGET_VX")
+
+; Vector scatter element
+
+; vscef, vsceg
+
+; A 64 bit target address generated from 32 bit elements
+(define_insn "vec_scatter_element<V_HW_4:mode>_DI"
+  [(set (mem:<non_vec>
+	 (plus:DI (zero_extend:DI
+		   (unspec:SI [(match_operand:V4SI 1 "register_operand"   "v")
+			       (match_operand:QI   3 "const_mask_operand" "C")]
+			      UNSPEC_VEC_EXTRACT))
+		  (match_operand:SI                2 "address_operand"   "ZQ")))
+	(unspec:<non_vec> [(match_operand:V_HW_4          0 "register_operand"   "v")
+			   (match_dup 3)] UNSPEC_VEC_EXTRACT))]
+  "TARGET_VX && TARGET_64BIT && UINTVAL (operands[3]) < 4"
+  "vscef\t%v0,%O2(%v1,%R2),%3"
+  [(set_attr "op_type" "VRV")])
+
+; A 31 bit target address is generated from 64 bit elements
+; vsceg
+(define_insn "vec_scatter_element<V_HW_64:mode>_SI"
+  [(set (mem:<non_vec>
+	 (plus:SI (subreg:SI
+		   (unspec:<non_vec_int> [(match_operand:V_HW_64 1 "register_operand"   "v")
+					  (match_operand:QI      3 "const_mask_operand" "C")]
+					 UNSPEC_VEC_EXTRACT) 4)
+		  (match_operand:SI                              2 "address_operand"   "ZQ")))
+	(unspec:<non_vec> [(match_operand:V_HW_64                0 "register_operand"   "v")
+			   (match_dup 3)] UNSPEC_VEC_EXTRACT))]
+  "TARGET_VX && !TARGET_64BIT && UINTVAL (operands[3]) < GET_MODE_NUNITS (<V_HW_64:MODE>mode)"
+  "vsce<V_HW_64:bhfgq>\t%v0,%O2(%v1,%R2),%3"
+  [(set_attr "op_type" "VRV")])
+
+; Element size and target address size is the same
+; vscef, vsceg
+(define_insn "vec_scatter_element<mode>_<non_vec_int>"
+  [(set (mem:<non_vec>
+	 (plus:<non_vec_int> (unspec:<non_vec_int>
+			      [(match_operand:<tointvec> 1 "register_operand"   "v")
+			       (match_operand:QI         3 "const_mask_operand" "C")]
+			      UNSPEC_VEC_EXTRACT)
+			     (match_operand:DI           2 "address_operand"   "ZQ")))
+	(unspec:<non_vec> [(match_operand:V_HW_32_64     0 "register_operand"   "v")
+			   (match_dup 3)] UNSPEC_VEC_EXTRACT))]
+  "TARGET_VX && UINTVAL (operands[3]) < GET_MODE_NUNITS (<V_HW_32_64:MODE>mode)"
+  "vsce<bhfgq>\t%v0,%O2(%v1,%R2),%3"
+  [(set_attr "op_type" "VRV")])
+
+; Depending on the address size we have to expand a different pattern.
+; This however cannot be represented in s390-builtins.def so we do the
+; multiplexing here in the expander.
+(define_expand "vec_scatter_element<V_HW_32_64:mode>"
+  [(match_operand:V_HW_32_64 0 "register_operand" "")
+   (match_operand:<tointvec> 1 "register_operand" "")
+   (match_operand 2 "address_operand" "")
+   (match_operand:QI 3 "const_mask_operand" "")]
+  "TARGET_VX"
+{
+  if (TARGET_64BIT)
+    {
+      PUT_MODE (operands[2], DImode);
+      emit_insn (
+	gen_vec_scatter_element<V_HW_32_64:mode>_DI (operands[0], operands[1],
+						     operands[2], operands[3]));
+    }
+  else
+    {
+      PUT_MODE (operands[2], SImode);
+      emit_insn (
+	gen_vec_scatter_element<V_HW_32_64:mode>_SI (operands[0], operands[1],
+						     operands[2], operands[3]));
+    }
+  DONE;
+})
+
+
+; Vector select
+
+; Operand 3 selects bits from either OP1 (0) or OP2 (1)
+
+; Comparison operator should not matter as long as we always use the same ?!
+
+; Operands 1 and 2 are swapped in order to match the altivec builtin.
+; If operand 3 is a const_int bitmask this would be vec_merge
+(define_expand "vec_sel<mode>"
+  [(set (match_operand:V_HW 0 "register_operand" "")
+	(if_then_else:V_HW
+	 (eq (match_operand:<tointvec> 3 "register_operand"  "")
+	     (match_dup 4))
+	 (match_operand:V_HW 2 "register_operand"  "")
+	 (match_operand:V_HW 1 "register_operand"  "")))]
+  "TARGET_VX"
+{
+  operands[4] = CONST0_RTX (<tointvec>mode);
+})
+
+
+; Vector sign extend to doubleword
+
+; Sign extend of right most vector element to respective double-word
+; vsegb, vsegh, vsegf
+(define_insn "vec_extend<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "v")]
+			  UNSPEC_VEC_EXTEND))]
+  "TARGET_VX"
+  "vseg<bhfgq>\t%v0,%1"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector store with length
+
+; Store bytes in OP1 from OP0 with the highest indexed byte to be
+; stored from OP0 given by OP2
+(define_insn "vstl<mode>"
+  [(set (match_operand:BLK             2 "memory_operand"   "=Q")
+	(unspec:BLK [(match_operand:V  0 "register_operand"  "v")
+		     (match_operand:SI 1 "register_operand"  "d")]
+		    UNSPEC_VEC_STORE_LEN))]
+  "TARGET_VX"
+  "vstl\t%v0,%1,%2"
+  [(set_attr "op_type" "VRS")])
+
+; Vector store rightmost with length
+
+(define_insn "vstrlrv16qi"
+  [(set (match_operand:BLK                2 "memory_operand"    "=Q,Q")
+	(unspec:BLK [(match_operand:V16QI 0 "register_operand"   "v,v")
+		     (match_operand:SI    1 "nonmemory_operand"  "d,C")]
+		    UNSPEC_VEC_STORE_LEN_R))]
+  "TARGET_VXE"
+  "@
+   vstrlr\t%v0,%2,%1
+   vstrl\t%v0,%1,%2"
+  [(set_attr "op_type" "VRS,VSI")])
+
+
+
+; vector bit permute
+
+(define_insn "vbpermv16qi"
+  [(set (match_operand:V2DI                0 "register_operand" "=v")
+	(unspec:V2DI [(match_operand:V16QI 1 "register_operand"  "v")
+		      (match_operand:V16QI 2 "register_operand"  "v")]
+		     UNSPEC_VEC_VBPERM))]
+  "TARGET_VXE"
+  "vbperm\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; Vector unpack high
+
+; vuphb, vuphh, vuphf
+(define_insn "vec_unpackh<mode>"
+  [(set (match_operand:<vec_double>                    0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand"  "v")]
+			     UNSPEC_VEC_UNPACKH))]
+  "TARGET_VX"
+  "vuph<bhfgq>\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; vuplhb, vuplhh, vuplhf
+(define_insn "vec_unpackh_l<mode>"
+  [(set (match_operand:<vec_double>                    0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand"  "v")]
+			     UNSPEC_VEC_UNPACKH_L))]
+  "TARGET_VX"
+  "vuplh<bhfgq>\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector unpack low
+
+; vuplb, vuplhw, vuplf
+(define_insn "vec_unpackl<mode>"
+  [(set (match_operand:<vec_double>                    0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand"  "v")]
+			     UNSPEC_VEC_UNPACKL))]
+  "TARGET_VX"
+  "vupl<bhfgq><w>\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; vupllb, vupllh, vupllf
+(define_insn "vec_unpackl_l<mode>"
+  [(set (match_operand:<vec_double>                    0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand"  "v")]
+			     UNSPEC_VEC_UNPACKL_L))]
+  "TARGET_VX"
+  "vupll<bhfgq>\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector add
+
+; Vector add compute carry
+
+; vaccb, vacch, vaccf, vaccg, vaccq
+(define_insn "vacc<bhfgq>_<mode>"
+  [(set (match_operand:VIT_HW                 0 "register_operand" "=v")
+	(unspec:VIT_HW [(match_operand:VIT_HW 1 "register_operand" "%v")
+			(match_operand:VIT_HW 2 "register_operand"  "v")]
+		       UNSPEC_VEC_ADDC))]
+  "TARGET_VX"
+  "vacc<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; Vector add with carry
+
+(define_insn "vacq"
+  [(set (match_operand:TI             0 "register_operand" "=v")
+	(unspec:TI [(match_operand:TI 1 "register_operand" "%v")
+		    (match_operand:TI 2 "register_operand"  "v")
+		    (match_operand:TI 3 "register_operand"  "v")]
+		   UNSPEC_VEC_ADDE_U128))]
+  "TARGET_VX"
+  "vacq\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector add with carry compute carry
+
+(define_insn "vacccq"
+  [(set (match_operand:TI             0 "register_operand" "=v")
+	(unspec:TI [(match_operand:TI 1 "register_operand" "%v")
+		    (match_operand:TI 2 "register_operand"  "v")
+		    (match_operand:TI 3 "register_operand"  "v")]
+		   UNSPEC_VEC_ADDEC_U128))]
+  "TARGET_VX"
+  "vacccq\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector and
+
+; Vector and with complement
+
+; vnc
+(define_insn "vec_andc<mode>3"
+  [(set (match_operand:VT_HW                       0 "register_operand" "=v")
+	(and:VT_HW (not:VT_HW (match_operand:VT_HW 2 "register_operand"  "v"))
+		  (match_operand:VT_HW             1 "register_operand"  "v")))]
+  "TARGET_VX"
+  "vnc\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector average
+
+; vavgb, vavgh, vavgf, vavgg
+(define_insn "vec_avg<mode>"
+  [(set (match_operand:VI_HW                0 "register_operand" "=v")
+	(unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "%v")
+		       (match_operand:VI_HW 2 "register_operand"  "v")]
+		      UNSPEC_VEC_AVG))]
+  "TARGET_VX"
+  "vavg<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; Vector average logical
+
+; vavglb, vavglh, vavglf, vavglg
+(define_insn "vec_avgu<mode>"
+  [(set (match_operand:VI_HW                0 "register_operand" "=v")
+	(unspec:VI_HW [(match_operand:VI_HW 1 "register_operand" "%v")
+		       (match_operand:VI_HW 2 "register_operand"  "v")]
+		      UNSPEC_VEC_AVGU))]
+  "TARGET_VX"
+  "vavgl<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector checksum
+
+(define_insn "vec_checksum"
+  [(set (match_operand:V4SI               0 "register_operand" "=v")
+	(unspec:V4SI [(match_operand:V4SI 1 "register_operand"  "v")
+		      (match_operand:V4SI 2 "register_operand"  "v")]
+		     UNSPEC_VEC_CHECKSUM))]
+  "TARGET_VX"
+  "vcksm\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+;;
+;; Vector compare
+;;
+
+; vec_all/any int compares
+
+(define_expand "vec_all_<intcmpcc:code><VI_HW:mode>"
+  [(match_operand:SI                0 "register_operand" "")
+   (intcmpcc (match_operand:VI_HW 1 "register_operand" "")
+	     (match_operand:VI_HW 2 "register_operand" ""))]
+  "TARGET_VX"
+{
+  s390_expand_vec_compare_cc (operands[0],
+			      <intcmpcc:CODE>,
+			      operands[1],
+			      operands[2],
+			      true);
+  DONE;
+})
+
+(define_expand "vec_any_<intcmpcc:code><VI_HW:mode>"
+  [(match_operand:SI                0 "register_operand" "")
+   (intcmpcc (match_operand:VI_HW 1 "register_operand" "")
+	     (match_operand:VI_HW 2 "register_operand" ""))]
+  "TARGET_VX"
+{
+  s390_expand_vec_compare_cc (operands[0],
+			      <intcmpcc:CODE>,
+			      operands[1],
+			      operands[2],
+			      false);
+  DONE;
+})
+
+; vec_all/any fp compares
+
+(define_expand "vec_all_<fpcmpcc:code><mode>"
+  [(match_operand:SI               0 "register_operand" "")
+   (fpcmpcc (match_operand:VECF_HW 1 "register_operand" "")
+	    (match_operand:VECF_HW 2 "register_operand" ""))]
+  "TARGET_VX"
+{
+  s390_expand_vec_compare_cc (operands[0],
+			      <fpcmpcc:CODE>,
+			      operands[1],
+			      operands[2],
+			      true);
+  DONE;
+})
+
+(define_expand "vec_any_<fpcmpcc:code><mode>"
+  [(match_operand:SI               0 "register_operand" "")
+   (fpcmpcc (match_operand:VECF_HW 1 "register_operand" "")
+	    (match_operand:VECF_HW 2 "register_operand" ""))]
+  "TARGET_VX"
+{
+  s390_expand_vec_compare_cc (operands[0],
+			      <fpcmpcc:CODE>,
+			      operands[1],
+			      operands[2],
+			      false);
+  DONE;
+})
+
+
+; Compare without generating CC
+
+(define_expand "vec_cmp<intcmp:code><VI_HW:mode>"
+  [(set (match_operand:VI_HW               0 "register_operand" "=v")
+	(intcmp:VI_HW (match_operand:VI_HW 1 "register_operand"  "v")
+		      (match_operand:VI_HW 2 "register_operand"  "v")))]
+  "TARGET_VX"
+{
+  s390_expand_vec_compare (operands[0], <intcmp:CODE>, operands[1], operands[2]);
+  DONE;
+})
+
+(define_expand "vec_cmp<fpcmp:code><mode>"
+  [(set (match_operand:<tointvec>              0 "register_operand" "=v")
+	(fpcmp:<tointvec> (match_operand:VF_HW 1 "register_operand"  "v")
+		       (match_operand:VF_HW 2 "register_operand"  "v")))]
+  "TARGET_VX"
+{
+  s390_expand_vec_compare (operands[0], <fpcmp:CODE>, operands[1], operands[2]);
+  DONE;
+})
+
+
+; Vector count leading zeros
+
+; vec_cntlz -> clz
+; vec_cnttz -> ctz
+
+; Vector xor
+
+; vec_xor -> xor
+
+; Vector Galois field multiply sum
+
+; vgfmb, vgfmh, vgfmf
+(define_insn "vec_gfmsum<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+			  UNSPEC_VEC_GFMSUM))]
+  "TARGET_VX"
+  "vgfm<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_gfmsum_128"
+  [(set (match_operand:V16QI 0 "register_operand" "=v")
+	(unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v")
+		       (match_operand:V2DI 2 "register_operand" "v")]
+		      UNSPEC_VEC_GFMSUM_128))]
+  "TARGET_VX"
+  "vgfmg\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; vgfmab, vgfmah, vgfmaf
+(define_insn "vec_gfmsum_accum<mode>"
+  [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+			      (match_operand:VI_HW_QHS 2 "register_operand" "v")
+			      (match_operand:<vec_double> 3 "register_operand" "v")]
+			     UNSPEC_VEC_GFMSUM_ACCUM))]
+  "TARGET_VX"
+  "vgfma<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+(define_insn "vec_gfmsum_accum_128"
+  [(set (match_operand:V16QI 0 "register_operand" "=v")
+	(unspec:V16QI [(match_operand:V2DI 1 "register_operand" "v")
+		       (match_operand:V2DI 2 "register_operand" "v")
+		       (match_operand:V16QI 3 "register_operand" "v")]
+		      UNSPEC_VEC_GFMSUM_ACCUM_128))]
+  "TARGET_VX"
+  "vgfmag\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+
+; FIXME: vec_neg ?
+
+; Vector load positive: vec_abs -> abs
+; Vector maximum vec_max -> smax, logical vec_max -> umax
+; Vector maximum vec_min -> smin, logical vec_min -> umin
+
+
+; Vector multiply and add high
+
+; vec_mladd -> vec_vmal
+; vmalb, vmalh, vmalf, vmalg
+(define_insn "vec_vmal<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 3 "register_operand"  "v")]
+			  UNSPEC_VEC_VMAL))]
+  "TARGET_VX"
+  "vmal<bhfgq><w>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+; vec_mhadd -> vec_vmah/vec_vmalh
+
+; vmahb; vmahh, vmahf, vmahg
+(define_insn "vec_vmah<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 3 "register_operand"  "v")]
+			  UNSPEC_VEC_VMAH))]
+  "TARGET_VX"
+  "vmah<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+; vmalhb; vmalhh, vmalhf, vmalhg
+(define_insn "vec_vmalh<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 3 "register_operand"  "v")]
+			  UNSPEC_VEC_VMALH))]
+  "TARGET_VX"
+  "vmalh<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+; vec_meadd -> vec_vmae/vec_vmale
+
+; vmaeb; vmaeh, vmaef, vmaeg
+(define_insn "vec_vmae<mode>"
+  [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand"   "%v")
+			      (match_operand:VI_HW_QHS 2 "register_operand"    "v")
+			      (match_operand:<vec_double> 3 "register_operand" "v")]
+			     UNSPEC_VEC_VMAE))]
+  "TARGET_VX"
+  "vmae<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+; vmaleb; vmaleh, vmalef, vmaleg
+(define_insn "vec_vmale<mode>"
+  [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			      (match_operand:VI_HW_QHS 2 "register_operand" "v")
+			      (match_operand:<vec_double> 3 "register_operand" "v")]
+			     UNSPEC_VEC_VMALE))]
+  "TARGET_VX"
+  "vmale<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+; vec_moadd -> vec_vmao/vec_vmalo
+
+; vmaob; vmaoh, vmaof, vmaog
+(define_insn "vec_vmao<mode>"
+  [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			      (match_operand:VI_HW_QHS 2 "register_operand" "v")
+			      (match_operand:<vec_double> 3 "register_operand" "v")]
+			     UNSPEC_VEC_VMAO))]
+  "TARGET_VX"
+  "vmao<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+; vmalob; vmaloh, vmalof, vmalog
+(define_insn "vec_vmalo<mode>"
+  [(set (match_operand:<vec_double> 0 "register_operand" "=v")
+	(unspec:<vec_double> [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+			      (match_operand:VI_HW_QHS 2 "register_operand" "v")
+			      (match_operand:<vec_double> 3 "register_operand" "v")]
+			     UNSPEC_VEC_VMALO))]
+  "TARGET_VX"
+  "vmalo<bhfgq>\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector multiply high
+
+; vec_mulh -> vec_smulh/vec_umulh
+
+; vmhb, vmhh, vmhf
+(define_insn "vec_smulh<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+			  UNSPEC_VEC_SMULT_HI))]
+  "TARGET_VX"
+  "vmh<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; vmlhb, vmlhh, vmlhf
+(define_insn "vec_umulh<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "%v")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "v")]
+			  UNSPEC_VEC_UMULT_HI))]
+  "TARGET_VX"
+  "vmlh<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector multiply low
+
+; vec_mule -> vec_widen_umult_even/vec_widen_smult_even
+; vec_mulo -> vec_widen_umult_odd/vec_widen_smult_odd
+
+
+; Vector nor
+
+(define_insn "vec_nor<mode>3"
+  [(set (match_operand:VT_HW 0 "register_operand" "=v")
+	(not:VT_HW
+	 (ior:VT_HW (match_operand:VT_HW 1 "register_operand" "%v")
+		    (match_operand:VT_HW 2 "register_operand" "v"))))]
+  "TARGET_VX"
+  "vno\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector or
+
+; Vector population count vec_popcnt -> popcount
+; Vector element rotate left logical vec_rl -> vrotl, vec_rli -> rot
+
+; Vector element rotate and insert under mask
+
+; verimb, verimh, verimf, verimg
+(define_insn "verim<mode>"
+  [(set (match_operand:VI_HW                0 "register_operand" "=v")
+	(unspec:VI_HW [(match_operand:VI_HW 1 "register_operand"  "0")
+		       (match_operand:VI_HW 2 "register_operand"  "v")
+		       (match_operand:VI_HW 3 "register_operand"  "v")
+		       (match_operand:QI    4 "const_int_operand" "C")]
+		      UNSPEC_VEC_RL_MASK))]
+  "TARGET_VX"
+  "verim<bhfgq>\t%v0,%v2,%v3,%b4"
+  [(set_attr "op_type" "VRI")])
+
+
+; Vector shift left
+
+(define_insn "vec_sll<VI_HW:mode><VI_HW_QHS:mode>"
+  [(set (match_operand:VI_HW                    0 "register_operand" "=v")
+	(unspec:VI_HW [(match_operand:VI_HW     1 "register_operand"  "v")
+		       (match_operand:VI_HW_QHS 2 "register_operand"  "v")]
+		      UNSPEC_VEC_SLL))]
+  "TARGET_VX"
+  "vsl\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector shift left by byte
+
+; Pattern definition in vector.md, see vec_vslb
+(define_expand "vec_slb<mode>"
+  [(set (match_operand:V_HW 0 "register_operand"                     "")
+	(unspec:V_HW [(match_operand:V_HW 1 "register_operand"       "")
+		      (match_operand:<tointvec> 2 "register_operand" "")]
+		     UNSPEC_VEC_SLB))]
+  "TARGET_VX"
+{
+  PUT_MODE (operands[2], V16QImode);
+})
+
+; Vector shift left double by byte
+
+(define_insn "vec_sld<mode>"
+  [(set (match_operand:V_HW 0 "register_operand"              "=v")
+	(unspec:V_HW [(match_operand:V_HW 1 "register_operand" "v")
+		      (match_operand:V_HW 2 "register_operand" "v")
+		      (match_operand:QI 3 "const_int_operand"  "C")]
+		     UNSPEC_VEC_SLDB))]
+  "TARGET_VX"
+  "vsldb\t%v0,%v1,%v2,%b3"
+  [(set_attr "op_type" "VRI")])
+
+(define_expand "vec_sldw<mode>"
+  [(set (match_operand:V_HW 0 "register_operand"               "")
+	(unspec:V_HW [(match_operand:V_HW 1 "register_operand" "")
+		      (match_operand:V_HW 2 "register_operand" "")
+		      (match_operand:QI 3 "const_int_operand"  "")]
+		     UNSPEC_VEC_SLDB))]
+  "TARGET_VX"
+{
+  operands[3] = GEN_INT (INTVAL (operands[3]) << 2);
+})
+
+; Vector shift right arithmetic
+
+(define_insn "vec_sral<VI_HW:mode><VI_HW_QHS:mode>"
+  [(set (match_operand:VI_HW                    0 "register_operand" "=v")
+	(unspec:VI_HW [(match_operand:VI_HW     1 "register_operand"  "v")
+		       (match_operand:VI_HW_QHS 2 "register_operand"  "v")]
+		      UNSPEC_VEC_SRAL))]
+  "TARGET_VX"
+  "vsra\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector shift right arithmetic by byte
+
+(define_insn "vec_srab<mode>"
+  [(set (match_operand:V_HW 0 "register_operand"                    "=v")
+	(unspec:V_HW [(match_operand:V_HW 1 "register_operand"       "v")
+		      (match_operand:<tointvec> 2 "register_operand" "v")]
+		     UNSPEC_VEC_SRAB))]
+  "TARGET_VX"
+  "vsrab\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector shift right logical
+
+(define_insn "vec_srl<VI_HW:mode><VI_HW_QHS:mode>"
+  [(set (match_operand:VI_HW                    0 "register_operand" "=v")
+	(unspec:VI_HW [(match_operand:VI_HW     1 "register_operand"  "v")
+		       (match_operand:VI_HW_QHS 2 "register_operand"  "v")]
+		      UNSPEC_VEC_SRL))]
+  "TARGET_VX"
+  "vsrl\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector shift right logical by byte
+
+; Pattern definition in vector.md, see vec_vsrb
+(define_expand "vec_srb<mode>"
+  [(set (match_operand:V_HW 0 "register_operand"                     "")
+	(unspec:V_HW [(match_operand:V_HW 1 "register_operand"       "")
+		      (match_operand:<tointvec> 2 "register_operand" "")]
+		     UNSPEC_VEC_SRLB))]
+  "TARGET_VX"
+{
+  PUT_MODE (operands[2], V16QImode);
+})
+
+; Vector subtract
+
+; Vector subtract compute borrow indication
+
+; vscbib, vscbih, vscbif, vscbig, vscbiq
+(define_insn "vscbi<bhfgq>_<mode>"
+  [(set (match_operand:VIT_HW 0 "register_operand"                "=v")
+	(unspec:VIT_HW [(match_operand:VIT_HW 1 "register_operand" "v")
+			(match_operand:VIT_HW 2 "register_operand" "v")]
+		      UNSPEC_VEC_SUBC))]
+  "TARGET_VX"
+  "vscbi<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; Vector subtract with borrow indication
+
+(define_insn "vsbiq"
+  [(set (match_operand:TI 0 "register_operand"               "=v")
+	(unspec:TI [(match_operand:TI 1 "register_operand"    "v")
+		       (match_operand:TI 2 "register_operand" "v")
+		       (match_operand:TI 3 "register_operand" "v")]
+		      UNSPEC_VEC_SUBE_U128))]
+  "TARGET_VX"
+  "vsbiq\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector subtract with borrow compute and borrow indication
+
+(define_insn "vsbcbiq"
+  [(set (match_operand:TI 0 "register_operand"               "=v")
+	(unspec:TI [(match_operand:TI 1 "register_operand"    "v")
+		       (match_operand:TI 2 "register_operand" "v")
+		       (match_operand:TI 3 "register_operand" "v")]
+		      UNSPEC_VEC_SUBEC_U128))]
+  "TARGET_VX"
+  "vsbcbiq\t%v0,%v1,%v2,%v3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector sum across
+
+; Sum across DImode parts of the 1st operand and add the rightmost
+; element of 2nd operand
+; vsumgh, vsumgf
+(define_expand "vec_sum2<mode>"
+  [(set (match_operand:V2DI 0 "register_operand" "")
+	(unspec:V2DI [(match_operand:VI_HW_HS 1 "register_operand" "")
+		      (match_operand:VI_HW_HS 2 "register_operand" "")]
+		     UNSPEC_VEC_VSUMG))]
+  "TARGET_VX")
+
+; vsumqh, vsumqf
+(define_insn "vec_sum_u128<mode>"
+  [(set (match_operand:V2DI 0 "register_operand" "=v")
+	(unspec:V2DI [(match_operand:VI_HW_SD 1 "register_operand" "v")
+		      (match_operand:VI_HW_SD 2 "register_operand" "v")]
+		     UNSPEC_VEC_VSUMQ))]
+  "TARGET_VX"
+  "vsumq<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+; vsumb, vsumh
+(define_expand "vec_sum4<mode>"
+  [(set (match_operand:V4SI 0 "register_operand" "")
+	(unspec:V4SI [(match_operand:VI_HW_QH 1 "register_operand" "")
+		      (match_operand:VI_HW_QH 2 "register_operand" "")]
+		     UNSPEC_VEC_VSUM))]
+  "TARGET_VX")
+
+
+; Vector test under mask
+
+(define_expand "vec_test_mask_int<mode>"
+  [(set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_operand:V_HW 1 "register_operand" "")
+		       (match_operand:<tointvec> 2 "register_operand" "")]
+		      UNSPEC_VEC_TEST_MASK))
+   (set (match_operand:SI 0 "register_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_insn "*vec_test_mask<mode>"
+  [(set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_operand:V_HW 0 "register_operand" "v")
+		       (match_operand:<tointvec> 1 "register_operand" "v")]
+		      UNSPEC_VEC_TEST_MASK))]
+  "TARGET_VX"
+  "vtm\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector multiply sum logical
+
+(define_insn "vec_msumv2di"
+  [(set (match_operand:V16QI 0 "register_operand" "=v")
+	(unspec:V16QI [(match_operand:V2DI  1 "register_operand"   "v")
+		       (match_operand:V2DI  2 "register_operand"   "v")
+		       (match_operand:V16QI 3 "register_operand"   "v")
+		       (match_operand:QI    4 "const_mask_operand" "C")]
+		      UNSPEC_VEC_MSUM))]
+  "TARGET_VXE"
+  "vmslg\t%v0,%v1,%v2,%v3,%4"
+  [(set_attr "op_type" "VRR")])
+
+(define_insn "vmslg"
+  [(set (match_operand:TI 0 "register_operand" "=v")
+	(unspec:TI [(match_operand:V2DI  1 "register_operand"   "v")
+		    (match_operand:V2DI  2 "register_operand"   "v")
+		    (match_operand:TI    3 "register_operand"   "v")
+		    (match_operand:QI    4 "const_mask_operand" "C")]
+		   UNSPEC_VEC_MSUM))]
+  "TARGET_VXE"
+  "vmslg\t%v0,%v1,%v2,%v3,%4"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector find any element equal
+
+; vfaeb, vfaeh, vfaef
+; vfaezb, vfaezh, vfaezf
+(define_insn "vfae<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "v")
+			   (match_operand:QI        3 "const_mask_operand" "C")]
+			  UNSPEC_VEC_VFAE))]
+  "TARGET_VX"
+{
+  unsigned HOST_WIDE_INT flags = UINTVAL (operands[3]);
+
+  if (flags & VSTRING_FLAG_ZS)
+    {
+      flags &= ~VSTRING_FLAG_ZS;
+      operands[3] = GEN_INT (flags);
+      return "vfaez<bhfgq>\t%v0,%v1,%v2,%b3";
+    }
+  return "vfae<bhfgq>\t%v0,%v1,%v2,%b3";
+}
+[(set_attr "op_type" "VRR")])
+
+; vfaebs, vfaehs, vfaefs
+; vfaezbs, vfaezhs, vfaezfs
+(define_insn "*vfaes<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"   "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"   "v")
+			   (match_operand:QI        3 "const_mask_operand" "C")]
+			  UNSPEC_VEC_VFAE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)]
+		      UNSPEC_VEC_VFAECC))]
+  "TARGET_VX"
+{
+  unsigned HOST_WIDE_INT flags = UINTVAL (operands[3]);
+
+  if (flags & VSTRING_FLAG_ZS)
+    {
+      flags &= ~VSTRING_FLAG_ZS;
+      operands[3] = GEN_INT (flags);
+      return "vfaez<bhfgq>s\t%v0,%v1,%v2,%b3";
+    }
+  return "vfae<bhfgq>s\t%v0,%v1,%v2,%b3";
+}
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vfaez<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "")
+			   (match_operand:QI        3 "const_mask_operand" "")]
+			  UNSPEC_VEC_VFAE))]
+  "TARGET_VX"
+{
+  operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_ZS);
+})
+
+(define_expand "vfaes<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "")
+			   (match_operand:QI        3 "const_mask_operand" "")]
+			  UNSPEC_VEC_VFAE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)]
+		      UNSPEC_VEC_VFAECC))])
+   (set (match_operand:SI 4 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX"
+{
+  operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_CS);
+})
+
+(define_expand "vfaezs<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "")
+			   (match_operand:SI        3 "const_mask_operand" "")]
+			  UNSPEC_VEC_VFAE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)]
+		      UNSPEC_VEC_VFAECC))])
+   (set (match_operand:SI 4 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX"
+{
+  operands[3] = GEN_INT (INTVAL (operands[3]) | VSTRING_FLAG_CS | VSTRING_FLAG_ZS);
+})
+
+
+; Vector find element equal
+
+; vfeebs, vfeehs, vfeefs
+; vfeezbs, vfeezhs, vfeezfs
+(define_insn "*vfees<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "v")
+			   (match_operand:QI 3 "const_mask_operand" "C")]
+			  UNSPEC_VEC_VFEE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)]
+		      UNSPEC_VEC_VFEECC))]
+  "TARGET_VX"
+{
+  unsigned HOST_WIDE_INT flags = UINTVAL (operands[3]);
+
+  gcc_assert (!(flags & ~(VSTRING_FLAG_ZS | VSTRING_FLAG_CS)));
+  flags &= ~VSTRING_FLAG_CS;
+
+  if (flags == VSTRING_FLAG_ZS)
+    return "vfeez<bhfgq>s\t%v0,%v1,%v2";
+  return "vfee<bhfgq>s\t%v0,%v1,%v2,%b3";
+}
+  [(set_attr "op_type" "VRR")])
+
+; vfeeb, vfeeh, vfeef
+(define_insn "vfee<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (const_int 0)]
+			  UNSPEC_VEC_VFEE))]
+  "TARGET_VX"
+  "vfee<bhfgq>\t%v0,%v1,%v2,0"
+  [(set_attr "op_type" "VRR")])
+
+; vfeezb, vfeezh, vfeezf
+(define_insn "vfeez<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (const_int VSTRING_FLAG_ZS)]
+			  UNSPEC_VEC_VFEE))]
+  "TARGET_VX"
+  "vfeez<bhfgq>s\t%v0,%v1,%v2,2"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vfees<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS                    0 "register_operand" "")
+	  (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+			     (match_operand:VI_HW_QHS 2 "register_operand" "")
+			     (const_int VSTRING_FLAG_CS)]
+			    UNSPEC_VEC_VFEE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (const_int VSTRING_FLAG_CS)]
+		      UNSPEC_VEC_VFEECC))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_expand "vfeezs<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS                    0 "register_operand" "")
+	  (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+			     (match_operand:VI_HW_QHS 2 "register_operand" "")
+			     (match_dup 4)]
+			    UNSPEC_VEC_VFEE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 4)]
+		      UNSPEC_VEC_VFEECC))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX"
+{
+  operands[4] = GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS);
+})
+
+; Vector find element not equal
+
+; vfeneb, vfeneh, vfenef
+(define_insn "vfene<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (const_int 0)]
+			  UNSPEC_VEC_VFENE))]
+  "TARGET_VX"
+  "vfene<bhfgq>\t%v0,%v1,%v2,0"
+  [(set_attr "op_type" "VRR")])
+
+; vec_vfenes can be found in vector.md since it is used for strlen
+
+; vfenezb, vfenezh, vfenezf
+(define_insn "vfenez<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"  "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"  "v")
+			   (const_int VSTRING_FLAG_ZS)]
+			  UNSPEC_VEC_VFENE))]
+  "TARGET_VX"
+  "vfenez<bhfgq>\t%v0,%v1,%v2"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vfenes<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS                    0 "register_operand" "")
+	  (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+			     (match_operand:VI_HW_QHS 2 "register_operand" "")
+			     (const_int VSTRING_FLAG_CS)]
+			    UNSPEC_VEC_VFENE))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (const_int VSTRING_FLAG_CS)]
+		      UNSPEC_VEC_VFENECC))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_expand "vfenezs<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS                    0 "register_operand" "")
+	  (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+			     (match_operand:VI_HW_QHS 2 "register_operand" "")
+			     (match_dup 4)]
+			    UNSPEC_VEC_VFENE))
+     (set (reg:CCRAW CC_REGNUM)
+	  (unspec:CCRAW [(match_dup 1)
+			 (match_dup 2)
+			 (match_dup 4)]
+			UNSPEC_VEC_VFENECC))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX"
+{
+  operands[4] = GEN_INT (VSTRING_FLAG_ZS | VSTRING_FLAG_CS);
+})
+
+; Vector isolate string
+
+; vistrb, vistrh, vistrf
+(define_insn "vistr<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+			  UNSPEC_VEC_VISTR))]
+  "TARGET_VX"
+  "vistr<bhfgq>\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; vistrbs, vistrhs, vistrfs
+(define_insn "*vistrs<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "v")]
+			  UNSPEC_VEC_VISTR))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)] UNSPEC_VEC_VISTRCC))]
+  "TARGET_VX"
+  "vistr<bhfgq>s\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vistrs<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS                    0 "register_operand" "")
+	  (unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")]
+			    UNSPEC_VEC_VISTR))
+     (set (reg:CCRAW CC_REGNUM)
+	  (unspec:CCRAW [(match_dup 1)]
+			UNSPEC_VEC_VISTRCC))])
+   (set (match_operand:SI 2 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+
+; Vector compare range
+
+; vstrcb, vstrch, vstrcf
+; vstrczb, vstrczh, vstrczf
+(define_insn "vstrc<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand"  "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"   "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"   "v")
+			   (match_operand:VI_HW_QHS 3 "register_operand"   "v")
+			   (match_operand:QI        4 "const_mask_operand" "C")]
+			  UNSPEC_VEC_VSTRC))]
+  "TARGET_VX"
+{
+  unsigned HOST_WIDE_INT flags = UINTVAL (operands[4]);
+
+  if (flags & VSTRING_FLAG_ZS)
+    {
+      flags &= ~VSTRING_FLAG_ZS;
+      operands[4] = GEN_INT (flags);
+      return "vstrcz<bhfgq>\t%v0,%v1,%v2,%v3,%b4";
+    }
+  return "vstrc<bhfgq>\t%v0,%v1,%v2,%v3,%b4";
+}
+[(set_attr "op_type" "VRR")])
+
+; vstrcbs, vstrchs, vstrcfs
+; vstrczbs, vstrczhs, vstrczfs
+(define_insn "*vstrcs<mode>"
+  [(set (match_operand:VI_HW_QHS                    0 "register_operand"  "=v")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"   "v")
+			   (match_operand:VI_HW_QHS 2 "register_operand"   "v")
+			   (match_operand:VI_HW_QHS 3 "register_operand"   "v")
+			   (match_operand:QI        4 "const_mask_operand" "C")]
+			  UNSPEC_VEC_VSTRC))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)
+		       (match_dup 4)]
+		      UNSPEC_VEC_VSTRCCC))]
+  "TARGET_VX"
+{
+  unsigned HOST_WIDE_INT flags = UINTVAL (operands[4]);
+
+  if (flags & VSTRING_FLAG_ZS)
+    {
+      flags &= ~VSTRING_FLAG_ZS;
+      operands[4] = GEN_INT (flags);
+      return "vstrcz<bhfgq>s\t%v0,%v1,%v2,%v3,%b4";
+    }
+  return "vstrc<bhfgq>s\t%v0,%v1,%v2,%v3,%b4";
+}
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vstrcz<mode>"
+  [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand"   "")
+			   (match_operand:VI_HW_QHS 2 "register_operand"   "")
+			   (match_operand:VI_HW_QHS 3 "register_operand"   "")
+			   (match_operand:QI        4 "const_mask_operand" "")]
+			  UNSPEC_VEC_VSTRC))]
+  "TARGET_VX"
+{
+  operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_ZS);
+})
+
+(define_expand "vstrcs<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "")
+			   (match_operand:VI_HW_QHS 3 "register_operand" "")
+			   (match_operand:QI        4 "const_mask_operand" "")]
+			  UNSPEC_VEC_VSTRC))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)
+		       (match_dup 4)]
+		      UNSPEC_VEC_VSTRCCC))])
+   (set (match_operand:SI 5 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX"
+{
+  operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_CS);
+})
+
+(define_expand "vstrczs<mode>"
+  [(parallel
+    [(set (match_operand:VI_HW_QHS 0 "register_operand" "")
+	(unspec:VI_HW_QHS [(match_operand:VI_HW_QHS 1 "register_operand" "")
+			   (match_operand:VI_HW_QHS 2 "register_operand" "")
+			   (match_operand:VI_HW_QHS 3 "register_operand" "")
+			   (match_operand:QI        4 "const_mask_operand" "")]
+			  UNSPEC_VEC_VSTRC))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1)
+		       (match_dup 2)
+		       (match_dup 3)
+		       (match_dup 4)]
+		      UNSPEC_VEC_VSTRCCC))])
+   (set (match_operand:SI 5 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX"
+{
+  operands[4] = GEN_INT (INTVAL (operands[4]) | VSTRING_FLAG_CS | VSTRING_FLAG_ZS);
+})
+
+(define_insn "vcdgb"
+  [(set (match_operand:V2DF 0 "register_operand"                "=v")
+	(unspec:V2DF [(match_operand:V2DI 1 "register_operand"   "v")
+		      (match_operand:QI   2 "const_mask_operand" "C")  ; inexact suppression
+		      (match_operand:QI   3 "const_mask_operand" "C")] ; rounding mode
+		     UNSPEC_VEC_VCDGB))]
+  "TARGET_VX && UINTVAL (operands[3]) != 2 && UINTVAL (operands[3]) <= 7"
+  "vcdgb\t%v0,%v1,%b2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+
+; The result needs to be multiplied with 2**-op2
+(define_expand "vec_ctd_s64"
+  [(set (match_operand:V2DF               0 "register_operand" "")
+	(unspec:V2DF [(match_operand:V2DI 1 "register_operand" "")
+		      (const_int 4) ; inexact suppressed
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VCDGB))
+   (use (match_operand:QI 2 "const_int_operand" ""))
+   (set (match_dup 0) (mult:V2DF (match_dup 0) (match_dup 3)))]
+  "TARGET_VX"
+{
+  REAL_VALUE_TYPE f;
+  rtx c;
+
+  real_2expN (&f, -INTVAL (operands[2]), DFmode);
+  c = const_double_from_real_value (f, DFmode);
+
+  operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+  operands[3] = force_reg (V2DFmode, operands[3]);
+})
+
+(define_insn "vcdlgb"
+  [(set (match_operand:V2DF 0 "register_operand"                 "=v")
+	(unspec:V2DF [(match_operand:V2DI 1 "register_operand"    "v")
+		      (match_operand:QI   2 "const_mask_operand"  "C")  ; inexact suppression
+		      (match_operand:QI   3 "const_mask_operand"  "C")] ; rounding mode
+		     UNSPEC_VEC_VCDLGB))]
+  "TARGET_VX && UINTVAL (operands[3]) != 2 && UINTVAL (operands[3]) <= 7"
+  "vcdlgb\t%v0,%v1,%b2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+; The result needs to be multiplied with 2**-op2
+(define_expand "vec_ctd_u64"
+  [(set (match_operand:V2DF               0 "register_operand" "")
+	(unspec:V2DF [(match_operand:V2DI 1 "register_operand" "")
+		      (const_int 4) ; inexact suppressed
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VCDLGB))
+   (use (match_operand:QI 2 "const_int_operand" ""))
+   (set (match_dup 0) (mult:V2DF (match_dup 0) (match_dup 3)))]
+  "TARGET_VX"
+{
+  REAL_VALUE_TYPE f;
+  rtx c;
+
+  real_2expN (&f, -INTVAL (operands[2]), DFmode);
+  c = const_double_from_real_value (f, DFmode);
+
+  operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+  operands[3] = force_reg (V2DFmode, operands[3]);
+})
+
+(define_insn "vcgdb"
+  [(set (match_operand:V2DI 0 "register_operand"                "=v")
+	(unspec:V2DI [(match_operand:V2DF 1 "register_operand"   "v")
+		      (match_operand:QI   2 "const_mask_operand" "C")
+		      (match_operand:QI   3 "const_mask_operand" "C")]
+		     UNSPEC_VEC_VCGDB))]
+  "TARGET_VX && UINTVAL (operands[3]) != 2 && UINTVAL (operands[3]) <= 7"
+  "vcgdb\t%v0,%v1,%b2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+; The input needs to be multiplied with 2**op2
+(define_expand "vec_ctsl"
+  [(use (match_operand:QI 2 "const_int_operand" ""))
+   (set (match_dup 4) (mult:V2DF (match_operand:V2DF 1 "register_operand" "")
+				 (match_dup 3)))
+   (set (match_operand:V2DI 0 "register_operand" "")
+	(unspec:V2DI [(match_dup 4)
+		      (const_int 4) ; inexact suppressed
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VCGDB))]
+  "TARGET_VX"
+{
+  REAL_VALUE_TYPE f;
+  rtx c;
+
+  real_2expN (&f, INTVAL (operands[2]), DFmode);
+  c = const_double_from_real_value (f, DFmode);
+
+  operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+  operands[3] = force_reg (V2DFmode, operands[3]);
+  operands[4] = gen_reg_rtx (V2DFmode);
+})
+
+(define_insn "vclgdb"
+  [(set (match_operand:V2DI 0 "register_operand"               "=v")
+	(unspec:V2DI [(match_operand:V2DF 1 "register_operand"  "v")
+		      (match_operand:QI   2 "const_mask_operand" "C")
+		      (match_operand:QI   3 "const_mask_operand" "C")]
+		     UNSPEC_VEC_VCLGDB))]
+  "TARGET_VX && UINTVAL (operands[3]) != 2 && UINTVAL (operands[3]) <= 7"
+  "vclgdb\t%v0,%v1,%b2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+; The input needs to be multiplied with 2**op2
+(define_expand "vec_ctul"
+  [(use (match_operand:QI 2 "const_int_operand" ""))
+   (set (match_dup 4) (mult:V2DF (match_operand:V2DF 1 "register_operand" "")
+				 (match_dup 3)))
+   (set (match_operand:V2DI 0 "register_operand" "")
+	(unspec:V2DI [(match_dup 4)
+		      (const_int 4) ; inexact suppressed
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VCLGDB))]
+  "TARGET_VX"
+{
+  REAL_VALUE_TYPE f;
+  rtx c;
+
+  real_2expN (&f, INTVAL (operands[2]), DFmode);
+  c = const_double_from_real_value (f, DFmode);
+
+  operands[3] = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, c, c));
+  operands[3] = force_reg (V2DFmode, operands[3]);
+  operands[4] = gen_reg_rtx (V2DFmode);
+})
+
+; Vector load fp integer - IEEE inexact exception is suppressed
+; vfisb, vfidb, wfisb, wfidb, wfixb
+(define_insn "vec_fpint<mode>"
+  [(set (match_operand:VFT              0 "register_operand"  "=v")
+	(unspec:VFT [(match_operand:VFT 1 "register_operand"   "v")
+		     (match_operand:QI  2 "const_mask_operand" "C")  ; inexact suppression control
+		     (match_operand:QI  3 "const_mask_operand" "C")] ; rounding mode
+		     UNSPEC_VEC_VFI))]
+  "TARGET_VX"
+  "<vw>fi<sdx>b\t%v0,%v1,%b2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+
+; Vector load lengthened - V4SF -> V2DF
+
+(define_insn "vflls"
+  [(set (match_operand:V2DF 0 "register_operand"               "=v")
+	(unspec:V2DF [(match_operand:V4SF 1 "register_operand"  "v")]
+		     UNSPEC_VEC_VFLL))]
+  "TARGET_VX"
+  "vldeb\t%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_ld2f"
+  [; Initialize a vector to all zeroes.  FIXME: This should not be
+   ; necessary since all elements of the vector will be set anyway.
+   ; This is just to make it explicit to the data flow framework.
+   (set (match_dup 2) (match_dup 3))
+   (set (match_dup 2) (unspec:V4SF [(match_operand:SF 1 "memory_operand" "")
+				    (const_int 0)
+				    (match_dup 2)]
+				    UNSPEC_VEC_SET))
+   (set (match_dup 2) (unspec:V4SF [(match_dup 4)
+				    (const_int 2)
+				    (match_dup 2)]
+				    UNSPEC_VEC_SET))
+   (set (match_operand:V2DF 0 "register_operand" "")
+	(unspec:V2DF [(match_dup 2)] UNSPEC_VEC_VFLL))]
+  "TARGET_VX"
+{
+  operands[2] = gen_reg_rtx (V4SFmode);
+  operands[3] = CONST0_RTX (V4SFmode);
+  operands[4] = adjust_address (operands[1], SFmode, 4);
+})
+
+
+; Vector load rounded - V2DF -> V4SF
+
+(define_insn "vflrd"
+  [(set (match_operand:V4SF 0 "register_operand"                "=v")
+	(unspec:V4SF [(match_operand:V2DF 1 "register_operand"   "v")
+		      (match_operand:QI   2 "const_mask_operand" "C")
+		      (match_operand:QI   3 "const_mask_operand" "C")]
+		     UNSPEC_VEC_VFLR))]
+  "TARGET_VX"
+  "vledb\t%v0,%v1,%b2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_st2f"
+  [(set (match_dup 2)
+	(unspec:V4SF [(match_operand:V2DF 0 "register_operand" "")
+		      (const_int VEC_INEXACT)
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VFLR))
+   (set (match_operand:SF 1 "memory_operand" "")
+	(unspec:SF [(match_dup 2) (const_int 0)] UNSPEC_VEC_EXTRACT))
+   (set (match_dup 3)
+	(unspec:SF [(match_dup 2) (const_int 2)] UNSPEC_VEC_EXTRACT))]
+  "TARGET_VX"
+{
+  operands[2] = gen_reg_rtx (V4SFmode);
+  operands[3] = adjust_address (operands[1], SFmode, 4);
+})
+
+
+; Vector square root fp vec_sqrt -> sqrt rtx standard name
+
+;; Vector FP test data class immediate
+
+; vec_all_nan, vec_all_numeric, vec_any_nan, vec_any_numeric
+; These ignore the vector result and only want CC stored to an int
+; pointer.
+
+; vftcisb, vftcidb
+(define_insn "*vftci<mode>_cconly"
+  [(set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_operand:VECF_HW 1 "register_operand")
+		       (match_operand:HI      2 "const_int_operand")]
+		      UNSPEC_VEC_VFTCICC))
+   (clobber (match_scratch:<tointvec> 0))]
+  "TARGET_VX && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[2]), 'J', \"J\")"
+  "vftci<sdx>b\t%v0,%v1,%x2"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vftci<mode>_intcconly"
+  [(parallel
+    [(set (reg:CCRAW CC_REGNUM)
+	  (unspec:CCRAW [(match_operand:VECF_HW 0 "register_operand")
+			 (match_operand:HI      1 "const_int_operand")]
+			UNSPEC_VEC_VFTCICC))
+     (clobber (scratch:<tointvec>))])
+   (set (match_operand:SI 2 "register_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[1]), 'J', \"J\")")
+
+; vec_fp_test_data_class wants the result vector and the CC stored to
+; an int pointer.
+
+; vftcisb, vftcidb
+(define_insn "*vftci<mode>"
+  [(set (match_operand:VECF_HW                  0 "register_operand"  "=v")
+	(unspec:VECF_HW [(match_operand:VECF_HW 1 "register_operand"   "v")
+			 (match_operand:HI      2 "const_int_operand"  "J")]
+			UNSPEC_VEC_VFTCI))
+   (set (reg:CCRAW CC_REGNUM)
+	(unspec:CCRAW [(match_dup 1) (match_dup 2)] UNSPEC_VEC_VFTCICC))]
+  "TARGET_VX && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[2]), 'J', \"J\")"
+  "vftci<sdx>b\t%v0,%v1,%x2"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vftci<mode>_intcc"
+  [(parallel
+    [(set (match_operand:VECF_HW                  0 "register_operand")
+	  (unspec:VECF_HW [(match_operand:VECF_HW 1 "register_operand")
+			   (match_operand:HI      2 "const_int_operand")]
+			  UNSPEC_VEC_VFTCI))
+     (set (reg:CCRAW CC_REGNUM)
+	  (unspec:CCRAW [(match_dup 1) (match_dup 2)] UNSPEC_VEC_VFTCICC))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCRAW CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX && CONST_OK_FOR_CONSTRAINT_P (INTVAL (operands[2]), 'J', \"J\")")
+
+;;
+;; Integer compares
+;;
+
+; All comparisons which produce a CC need fully populated (VI_HW)
+; vector arguments.  Otherwise the any/all CCs would be just bogus.
+
+(define_insn "*vec_cmp<VICMP:insn_cmp><VI_HW:mode>_cconly"
+  [(set (reg:VICMP CC_REGNUM)
+	(compare:VICMP (match_operand:VI_HW 0 "register_operand" "v")
+		       (match_operand:VI_HW 1 "register_operand" "v")))
+   (clobber (match_scratch:VI_HW 2 "=v"))]
+  "TARGET_VX"
+  "vc<VICMP:insn_cmp><VI_HW:bhfgq>s\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; FIXME: The following 2x3 definitions should be merged into 2 with
+; VICMP like above but I could not find a way to set the comparison
+; operator (eq) depending on the mode CCVEQ (mode_iterator). Or the
+; other way around - setting the mode depending on the code
+; (code_iterator).
+(define_expand "vec_cmpeq<VI_HW:mode>_cc"
+  [(parallel
+    [(set (reg:CCVEQ CC_REGNUM)
+	(compare:CCVEQ (match_operand:VI_HW 1 "register_operand" "v")
+		       (match_operand:VI_HW 2 "register_operand" "v")))
+     (set (match_operand:VI_HW 0 "register_operand" "=v")
+	  (eq:VI_HW (match_dup 1) (match_dup 2)))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCVEQ CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_expand "vec_cmph<VI_HW:mode>_cc"
+  [(parallel
+    [(set (reg:CCVIH CC_REGNUM)
+	  (compare:CCVIH (match_operand:VI_HW 1 "register_operand" "v")
+			 (match_operand:VI_HW 2 "register_operand" "v")))
+     (set (match_operand:VI_HW 0 "register_operand" "=v")
+	  (gt:VI_HW (match_dup 1) (match_dup 2)))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCVIH CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_expand "vec_cmphl<VI_HW:mode>_cc"
+  [(parallel
+    [(set (reg:CCVIHU CC_REGNUM)
+	  (compare:CCVIHU (match_operand:VI_HW 1 "register_operand" "v")
+			  (match_operand:VI_HW 2 "register_operand" "v")))
+     (set (match_operand:VI_HW 0 "register_operand" "=v")
+	  (gtu:VI_HW (match_dup 1) (match_dup 2)))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCVIHU CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+
+(define_insn "*vec_cmpeq<VI_HW:mode>_cc"
+  [(set (reg:CCVEQ CC_REGNUM)
+	(compare:CCVEQ (match_operand:VI_HW 0 "register_operand"  "v")
+		       (match_operand:VI_HW 1 "register_operand"  "v")))
+   (set (match_operand:VI_HW                2 "register_operand" "=v")
+	(eq:VI_HW (match_dup 0) (match_dup 1)))]
+  "TARGET_VX"
+  "vceq<VI_HW:bhfgq>s\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmph<VI_HW:mode>_cc"
+  [(set (reg:CCVIH CC_REGNUM)
+	(compare:CCVIH (match_operand:VI_HW 0 "register_operand"  "v")
+		       (match_operand:VI_HW 1 "register_operand"  "v")))
+   (set (match_operand:VI_HW               2 "register_operand" "=v")
+	(gt:VI_HW (match_dup 0) (match_dup 1)))]
+  "TARGET_VX"
+  "vch<VI_HW:bhfgq>s\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+(define_insn "*vec_cmphl<VI_HW:mode>_cc"
+  [(set (reg:CCVIHU CC_REGNUM)
+	(compare:CCVIHU (match_operand:VI_HW 0 "register_operand"  "v")
+			(match_operand:VI_HW 1 "register_operand"  "v")))
+   (set (match_operand:VI_HW                2 "register_operand" "=v")
+	(gtu:VI_HW (match_dup 0) (match_dup 1)))]
+  "TARGET_VX"
+  "vchl<VI_HW:bhfgq>s\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+;;
+;; Floating point compares
+;;
+
+; vfcesbs, vfcedbs, wfcexbs, vfchsbs, vfchdbs, wfchxbs, vfchesbs, vfchedbs, wfchexbs
+(define_insn "*vec_cmp<insn_cmp><mode>_cconly"
+  [(set (reg:VFCMP CC_REGNUM)
+	(compare:VFCMP (match_operand:VF_HW 0 "register_operand" "v")
+		       (match_operand:VF_HW 1 "register_operand" "v")))
+   (clobber (match_scratch:<tointvec> 2 "=v"))]
+  "TARGET_VX"
+  "<vw>fc<asm_fcmp><sdx>bs\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; FIXME: Merge the following 2x3 patterns with VFCMP
+(define_expand "vec_cmpeq<mode>_cc"
+  [(parallel
+    [(set (reg:CCVEQ CC_REGNUM)
+	  (compare:CCVEQ (match_operand:VF_HW 1 "register_operand"  "v")
+			 (match_operand:VF_HW 2 "register_operand"  "v")))
+     (set (match_operand:<tointvec> 0 "register_operand" "=v")
+	  (eq:<tointvec> (match_dup 1) (match_dup 2)))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCVEQ CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_expand "vec_cmph<mode>_cc"
+  [(parallel
+    [(set (reg:CCVFH CC_REGNUM)
+	  (compare:CCVFH (match_operand:VF_HW 1 "register_operand"  "v")
+			 (match_operand:VF_HW 2 "register_operand"  "v")))
+     (set (match_operand:<tointvec> 0 "register_operand" "=v")
+	  (gt:<tointvec> (match_dup 1) (match_dup 2)))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCVIH CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+(define_expand "vec_cmphe<mode>_cc"
+  [(parallel
+    [(set (reg:CCVFHE CC_REGNUM)
+	  (compare:CCVFHE (match_operand:VF_HW 1 "register_operand"  "v")
+			  (match_operand:VF_HW 2 "register_operand"  "v")))
+     (set (match_operand:<tointvec> 0 "register_operand" "=v")
+	  (ge:<tointvec> (match_dup 1) (match_dup 2)))])
+   (set (match_operand:SI 3 "memory_operand" "")
+	(unspec:SI [(reg:CCVFHE CC_REGNUM)] UNSPEC_CC_TO_INT))]
+  "TARGET_VX")
+
+; These 3 cannot be merged as the insn defintion above since it also
+; requires to rewrite the RTL equality operator that the same time as
+; the CC mode.
+
+; vfcesbs, vfcedbs, wfcexbs
+(define_insn "*vec_cmpeq<mode>_cc"
+  [(set (reg:CCVEQ CC_REGNUM)
+	(compare:CCVEQ (match_operand:VF_HW 0 "register_operand"  "v")
+		       (match_operand:VF_HW 1 "register_operand"  "v")))
+   (set (match_operand:<tointvec>              2 "register_operand" "=v")
+	(eq:<tointvec> (match_dup 0) (match_dup 1)))]
+  "TARGET_VX"
+  "<vw>fce<sdx>bs\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; vfchsbs, vfchdbs, wfchxbs
+(define_insn "*vec_cmph<mode>_cc"
+  [(set (reg:CCVFH CC_REGNUM)
+	(compare:CCVFH (match_operand:VF_HW 0 "register_operand"  "v")
+		       (match_operand:VF_HW 1 "register_operand"  "v")))
+   (set (match_operand:<tointvec>              2 "register_operand" "=v")
+	(gt:<tointvec> (match_dup 0) (match_dup 1)))]
+  "TARGET_VX"
+  "<vw>fch<sdx>bs\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+; vfchesbs, vfchedbs, wfchexbs
+(define_insn "*vec_cmphe<mode>_cc"
+  [(set (reg:CCVFHE CC_REGNUM)
+	(compare:CCVFHE (match_operand:VF_HW 0 "register_operand"  "v")
+			(match_operand:VF_HW 1 "register_operand"  "v")))
+   (set (match_operand:<tointvec>               2 "register_operand" "=v")
+	(ge:<tointvec> (match_dup 0) (match_dup 1)))]
+  "TARGET_VX"
+  "<vw>fche<sdx>bs\t%v2,%v0,%v1"
+  [(set_attr "op_type" "VRR")])
+
+(define_expand "vec_double_s64"
+  [(set (match_operand:V2DF               0 "register_operand")
+	(unspec:V2DF [(match_operand:V2DI 1 "register_operand")
+		      (const_int 0)  ; inexact suppression disabled
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VCDGB))]
+  "TARGET_VX")
+
+(define_expand "vec_double_u64"
+  [(set (match_operand:V2DF               0 "register_operand")
+	(unspec:V2DF [(match_operand:V2DI 1 "register_operand")
+		      (const_int 0)  ; inexact suppression disabled
+		      (const_int VEC_RND_CURRENT)]
+		     UNSPEC_VEC_VCDLGB))]
+  "TARGET_VX")
+
+
+(define_insn "vfmin<mode>"
+  [(set (match_operand:VF_HW                0 "register_operand" "=v")
+	(unspec:VF_HW [(match_operand:VF_HW 1 "register_operand" "%v")
+		       (match_operand:VF_HW 2 "register_operand"  "v")
+		       (match_operand:QI    3 "const_mask_operand" "C")]
+		      UNSPEC_VEC_VFMIN))]
+  "TARGET_VXE"
+  "<vw>fmin<sdx>b\t%v0,%v1,%v2,%b3"
+  [(set_attr "op_type" "VRR")])
+
+(define_insn "vfmax<mode>"
+  [(set (match_operand:VF_HW                0 "register_operand" "=v")
+	(unspec:VF_HW [(match_operand:VF_HW 1 "register_operand" "%v")
+		       (match_operand:VF_HW 2 "register_operand"  "v")
+		       (match_operand:QI    3 "const_mask_operand" "C")]
+		      UNSPEC_VEC_VFMAX))]
+  "TARGET_VXE"
+  "<vw>fmax<sdx>b\t%v0,%v1,%v2,%b3"
+  [(set_attr "op_type" "VRR")])