Mercurial > hg > CbC > CbC_gcc
diff gcc/config/rs6000/altivec.md @ 131:84e7813d76e9
gcc-8.2
author | mir3636 |
---|---|
date | Thu, 25 Oct 2018 07:37:49 +0900 |
parents | 04ced10e8804 |
children | 1830386684a0 |
line wrap: on
line diff
--- a/gcc/config/rs6000/altivec.md Fri Oct 27 22:46:09 2017 +0900 +++ b/gcc/config/rs6000/altivec.md Thu Oct 25 07:37:49 2018 +0900 @@ -1,5 +1,5 @@ ;; AltiVec patterns. -;; Copyright (C) 2002-2017 Free Software Foundation, Inc. +;; Copyright (C) 2002-2018 Free Software Foundation, Inc. ;; Contributed by Aldy Hernandez (aldy@quesejoda.com) ;; This file is part of GCC. @@ -245,21 +245,16 @@ "VECTOR_MEM_ALTIVEC_P (<MODE>mode) && (register_operand (operands[0], <MODE>mode) || register_operand (operands[1], <MODE>mode))" -{ - switch (which_alternative) - { - case 0: return "stvx %1,%y0"; - case 1: return "lvx %0,%y1"; - case 2: return "vor %0,%1,%1"; - case 3: return "#"; - case 4: return "#"; - case 5: return "#"; - case 6: return "vxor %0,%0,%0"; - case 7: return output_vec_const_move (operands); - case 8: return "#"; - default: gcc_unreachable (); - } -} + "@ + stvx %1,%y0 + lvx %0,%y1 + vor %0,%1,%1 + # + # + # + vxor %0,%0,%0 + * return output_vec_const_move (operands); + #" [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*,*") (set_attr "length" "4,4,4,20,20,20,4,8,32")]) @@ -272,27 +267,22 @@ "VECTOR_MEM_ALTIVEC_P (TImode) && (register_operand (operands[0], TImode) || register_operand (operands[1], TImode))" -{ - switch (which_alternative) - { - case 0: return "stvx %1,%y0"; - case 1: return "lvx %0,%y1"; - case 2: return "vor %0,%1,%1"; - case 3: return "#"; - case 4: return "#"; - case 5: return "#"; - case 6: return "vxor %0,%0,%0"; - case 7: return output_vec_const_move (operands); - default: gcc_unreachable (); - } -} + "@ + stvx %1,%y0 + lvx %0,%y1 + vor %0,%1,%1 + # + # + # + vxor %0,%0,%0 + * return output_vec_const_move (operands);" [(set_attr "type" "vecstore,vecload,veclogical,store,load,*,veclogical,*")]) ;; Load up a vector with the most significant bit set by loading up -1 and ;; doing a shift left (define_split - [(set (match_operand:VM 0 "altivec_register_operand" "") - (match_operand:VM 1 "easy_vector_constant_msb" ""))] + [(set (match_operand:VM 0 "altivec_register_operand") + (match_operand:VM 1 "easy_vector_constant_msb"))] "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed" [(const_int 0)] { @@ -318,8 +308,8 @@ }) (define_split - [(set (match_operand:VM 0 "altivec_register_operand" "") - (match_operand:VM 1 "easy_vector_constant_add_self" ""))] + [(set (match_operand:VM 0 "altivec_register_operand") + (match_operand:VM 1 "easy_vector_constant_add_self"))] "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && reload_completed" [(set (match_dup 0) (match_dup 3)) (set (match_dup 0) (match_dup 4))] @@ -347,8 +337,8 @@ }) (define_split - [(set (match_operand:VM 0 "altivec_register_operand" "") - (match_operand:VM 1 "easy_vector_constant_vsldoi" ""))] + [(set (match_operand:VM 0 "altivec_register_operand") + (match_operand:VM 1 "easy_vector_constant_vsldoi"))] "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode) && can_create_pseudo_p ()" [(set (match_dup 2) (match_dup 3)) (set (match_dup 4) (match_dup 5)) @@ -363,12 +353,10 @@ HOST_WIDE_INT val = const_vector_elt_as_int (op1, elt); rtx rtx_val = GEN_INT (val); int shift = vspltis_shifted (op1); - int nunits = GET_MODE_NUNITS (<MODE>mode); - int i; gcc_assert (shift != 0); operands[2] = gen_reg_rtx (<MODE>mode); - operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, rtvec_alloc (nunits)); + operands[3] = gen_const_vec_duplicate (<MODE>mode, rtx_val); operands[4] = gen_reg_rtx (<MODE>mode); if (shift < 0) @@ -381,10 +369,6 @@ operands[5] = CONST0_RTX (<MODE>mode); operands[6] = GEN_INT (shift); } - - /* Populate the constant vectors. */ - for (i = 0; i < nunits; i++) - XVECEXP (operands[3], 0, i) = rtx_val; }) (define_insn "get_vrsave_internal" @@ -419,13 +403,11 @@ (use (match_operand:SI 1 "call_operand" "s"))])] "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT" "bl %z1" - [(set_attr "type" "branch") - (set_attr "length" "4")]) + [(set_attr "type" "branch")]) (define_insn "*restore_world" [(match_parallel 0 "restore_world_operation" [(return) - (use (reg:SI LR_REGNO)) (use (match_operand:SI 1 "call_operand" "s")) (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])] "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT" @@ -448,8 +430,7 @@ (match_operand:V4SI 4 "altivec_register_operand" "v"))])] "TARGET_ALTIVEC" "bl %1" - [(set_attr "type" "branch") - (set_attr "length" "4")]) + [(set_attr "type" "branch")]) (define_insn "*save_vregs_<mode>_r12" [(match_parallel 0 "any_parallel_operand" @@ -462,8 +443,7 @@ (match_operand:V4SI 4 "altivec_register_operand" "v"))])] "TARGET_ALTIVEC" "bl %1" - [(set_attr "type" "branch") - (set_attr "length" "4")]) + [(set_attr "type" "branch")]) (define_insn "*restore_vregs_<mode>_r11" [(match_parallel 0 "any_parallel_operand" @@ -476,8 +456,7 @@ (match_operand:P 4 "short_cint_operand" "I"))))])] "TARGET_ALTIVEC" "bl %1" - [(set_attr "type" "branch") - (set_attr "length" "4")]) + [(set_attr "type" "branch")]) (define_insn "*restore_vregs_<mode>_r12" [(match_parallel 0 "any_parallel_operand" @@ -490,8 +469,7 @@ (match_operand:P 4 "short_cint_operand" "I"))))])] "TARGET_ALTIVEC" "bl %1" - [(set_attr "type" "branch") - (set_attr "length" "4")]) + [(set_attr "type" "branch")]) ;; Simple binary operations. @@ -615,7 +593,7 @@ "vcmpbfp %0,%1,%2" [(set_attr "type" "veccmp")]) -(define_insn "*altivec_eq<mode>" +(define_insn "altivec_eq<mode>" [(set (match_operand:VI2 0 "altivec_register_operand" "=v") (eq:VI2 (match_operand:VI2 1 "altivec_register_operand" "v") (match_operand:VI2 2 "altivec_register_operand" "v")))] @@ -699,9 +677,9 @@ ;; We do multiply as a fused multiply-add with an add of a -0.0 vector. (define_expand "altivec_mulv4sf3" - [(set (match_operand:V4SF 0 "register_operand" "") - (fma:V4SF (match_operand:V4SF 1 "register_operand" "") - (match_operand:V4SF 2 "register_operand" "") + [(set (match_operand:V4SF 0 "register_operand") + (fma:V4SF (match_operand:V4SF 1 "register_operand") + (match_operand:V4SF 2 "register_operand") (match_dup 3)))] "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" { @@ -735,9 +713,9 @@ [(set_attr "type" "veccomplex")]) (define_expand "mulv4si3" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V4SI 1 "register_operand" "")) - (use (match_operand:V4SI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V4SI 1 "register_operand")) + (use (match_operand:V4SI 2 "register_operand"))] "TARGET_ALTIVEC" { rtx zero; @@ -787,19 +765,20 @@ }) (define_expand "mulv8hi3" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { rtx zero = gen_reg_rtx (V8HImode); emit_insn (gen_altivec_vspltish (zero, const0_rtx)); - emit_insn (gen_altivec_vmladduhm(operands[0], operands[1], operands[2], zero)); + emit_insn (gen_fmav8hi4 (operands[0], operands[1], operands[2], zero)); DONE; }) + ;; Fused multiply subtract (define_insn "*altivec_vnmsubfp" [(set (match_operand:V4SF 0 "register_operand" "=v") @@ -936,7 +915,7 @@ "vmhraddshs %0,%1,%2,%3" [(set_attr "type" "veccomplex")]) -(define_insn "altivec_vmladduhm" +(define_insn "fmav8hi4" [(set (match_operand:V8HI 0 "register_operand" "=v") (plus:V8HI (mult:V8HI (match_operand:V8HI 1 "register_operand" "v") (match_operand:V8HI 2 "register_operand" "v")) @@ -946,32 +925,16 @@ [(set_attr "type" "veccomplex")]) (define_expand "altivec_vmrghb" - [(use (match_operand:V16QI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:V16QI 2 "register_operand" ""))] + [(use (match_operand:V16QI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand"))] "TARGET_ALTIVEC" { - rtvec v; - rtx x; - - /* Special handling for LE with -maltivec=be. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25), - GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27), - GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29), - GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31)); - x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]); - } - else - { - v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17), - GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19), - GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21), - GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23)); - x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]); - } - + rtvec v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17), + GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19), + GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21), + GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23)); + rtx x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]); x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v)); emit_insn (gen_rtx_SET (operands[0], x)); DONE; @@ -1002,35 +965,22 @@ (define_insn "altivec_vmrghb_direct" [(set (match_operand:V16QI 0 "register_operand" "=v") - (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") - (match_operand:V16QI 2 "register_operand" "v")] + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] UNSPEC_VMRGH_DIRECT))] "TARGET_ALTIVEC" "vmrghb %0,%1,%2" [(set_attr "type" "vecperm")]) (define_expand "altivec_vmrghh" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { - rtvec v; - rtx x; - - /* Special handling for LE with -maltivec=be. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13), - GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15)); - x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]); - } - else - { - v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9), - GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11)); - x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]); - } + rtvec v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9), + GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11)); + rtx x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]); x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v)); emit_insn (gen_rtx_SET (operands[0], x)); @@ -1066,26 +1016,13 @@ [(set_attr "type" "vecperm")]) (define_expand "altivec_vmrghw" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V4SI 1 "register_operand" "")) - (use (match_operand:V4SI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V4SI 1 "register_operand")) + (use (match_operand:V4SI 2 "register_operand"))] "VECTOR_MEM_ALTIVEC_P (V4SImode)" { - rtvec v; - rtx x; - - /* Special handling for LE with -maltivec=be. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7)); - x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]); - } - else - { - v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5)); - x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]); - } - + rtvec v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5)); + rtx x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]); x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v)); emit_insn (gen_rtx_SET (operands[0], x)); DONE; @@ -1109,12 +1046,14 @@ [(set_attr "type" "vecperm")]) (define_insn "altivec_vmrghw_direct" - [(set (match_operand:V4SI 0 "register_operand" "=v") - (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") - (match_operand:V4SI 2 "register_operand" "v")] - UNSPEC_VMRGH_DIRECT))] + [(set (match_operand:V4SI 0 "register_operand" "=v,wa") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v,wa") + (match_operand:V4SI 2 "register_operand" "v,wa")] + UNSPEC_VMRGH_DIRECT))] "TARGET_ALTIVEC" - "vmrghw %0,%1,%2" + "@ + vmrghw %0,%1,%2 + xxmrghw %x0,%x1,%x2" [(set_attr "type" "vecperm")]) (define_insn "*altivec_vmrghsf" @@ -1135,32 +1074,16 @@ [(set_attr "type" "vecperm")]) (define_expand "altivec_vmrglb" - [(use (match_operand:V16QI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:V16QI 2 "register_operand" ""))] + [(use (match_operand:V16QI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand"))] "TARGET_ALTIVEC" { - rtvec v; - rtx x; - - /* Special handling for LE with -maltivec=be. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - v = gen_rtvec (16, GEN_INT (0), GEN_INT (16), GEN_INT (1), GEN_INT (17), - GEN_INT (2), GEN_INT (18), GEN_INT (3), GEN_INT (19), - GEN_INT (4), GEN_INT (20), GEN_INT (5), GEN_INT (21), - GEN_INT (6), GEN_INT (22), GEN_INT (7), GEN_INT (23)); - x = gen_rtx_VEC_CONCAT (V32QImode, operands[2], operands[1]); - } - else - { - v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25), - GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27), - GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29), - GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31)); - x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]); - } - + rtvec v = gen_rtvec (16, GEN_INT (8), GEN_INT (24), GEN_INT (9), GEN_INT (25), + GEN_INT (10), GEN_INT (26), GEN_INT (11), GEN_INT (27), + GEN_INT (12), GEN_INT (28), GEN_INT (13), GEN_INT (29), + GEN_INT (14), GEN_INT (30), GEN_INT (15), GEN_INT (31)); + rtx x = gen_rtx_VEC_CONCAT (V32QImode, operands[1], operands[2]); x = gen_rtx_VEC_SELECT (V16QImode, x, gen_rtx_PARALLEL (VOIDmode, v)); emit_insn (gen_rtx_SET (operands[0], x)); DONE; @@ -1191,36 +1114,22 @@ (define_insn "altivec_vmrglb_direct" [(set (match_operand:V16QI 0 "register_operand" "=v") - (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") - (match_operand:V16QI 2 "register_operand" "v")] - UNSPEC_VMRGL_DIRECT))] + (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")] + UNSPEC_VMRGL_DIRECT))] "TARGET_ALTIVEC" "vmrglb %0,%1,%2" [(set_attr "type" "vecperm")]) (define_expand "altivec_vmrglh" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { - rtvec v; - rtx x; - - /* Special handling for LE with -maltivec=be. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - v = gen_rtvec (8, GEN_INT (0), GEN_INT (8), GEN_INT (1), GEN_INT (9), - GEN_INT (2), GEN_INT (10), GEN_INT (3), GEN_INT (11)); - x = gen_rtx_VEC_CONCAT (V16HImode, operands[2], operands[1]); - } - else - { - v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13), - GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15)); - x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]); - } - + rtvec v = gen_rtvec (8, GEN_INT (4), GEN_INT (12), GEN_INT (5), GEN_INT (13), + GEN_INT (6), GEN_INT (14), GEN_INT (7), GEN_INT (15)); + rtx x = gen_rtx_VEC_CONCAT (V16HImode, operands[1], operands[2]); x = gen_rtx_VEC_SELECT (V8HImode, x, gen_rtx_PARALLEL (VOIDmode, v)); emit_insn (gen_rtx_SET (operands[0], x)); DONE; @@ -1249,32 +1158,19 @@ [(set (match_operand:V8HI 0 "register_operand" "=v") (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v") (match_operand:V8HI 2 "register_operand" "v")] - UNSPEC_VMRGL_DIRECT))] + UNSPEC_VMRGL_DIRECT))] "TARGET_ALTIVEC" "vmrglh %0,%1,%2" [(set_attr "type" "vecperm")]) (define_expand "altivec_vmrglw" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V4SI 1 "register_operand" "")) - (use (match_operand:V4SI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V4SI 1 "register_operand")) + (use (match_operand:V4SI 2 "register_operand"))] "VECTOR_MEM_ALTIVEC_P (V4SImode)" { - rtvec v; - rtx x; - - /* Special handling for LE with -maltivec=be. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - v = gen_rtvec (4, GEN_INT (0), GEN_INT (4), GEN_INT (1), GEN_INT (5)); - x = gen_rtx_VEC_CONCAT (V8SImode, operands[2], operands[1]); - } - else - { - v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7)); - x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]); - } - + rtvec v = gen_rtvec (4, GEN_INT (2), GEN_INT (6), GEN_INT (3), GEN_INT (7)); + rtx x = gen_rtx_VEC_CONCAT (V8SImode, operands[1], operands[2]); x = gen_rtx_VEC_SELECT (V4SImode, x, gen_rtx_PARALLEL (VOIDmode, v)); emit_insn (gen_rtx_SET (operands[0], x)); DONE; @@ -1298,12 +1194,14 @@ [(set_attr "type" "vecperm")]) (define_insn "altivec_vmrglw_direct" - [(set (match_operand:V4SI 0 "register_operand" "=v") - (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") - (match_operand:V4SI 2 "register_operand" "v")] - UNSPEC_VMRGL_DIRECT))] + [(set (match_operand:V4SI 0 "register_operand" "=v,wa") + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v,wa") + (match_operand:V4SI 2 "register_operand" "v,wa")] + UNSPEC_VMRGL_DIRECT))] "TARGET_ALTIVEC" - "vmrglw %0,%1,%2" + "@ + vmrglw %0,%1,%2 + xxmrglw %x0,%x1,%x2" [(set_attr "type" "vecperm")]) (define_insn "*altivec_vmrglsf" @@ -1323,6 +1221,24 @@ } [(set_attr "type" "vecperm")]) +;; Power8 vector merge two V2DF/V2DI even words to V2DF +(define_expand "p8_vmrgew_<mode>" + [(use (match_operand:VSX_D 0 "vsx_register_operand")) + (use (match_operand:VSX_D 1 "vsx_register_operand")) + (use (match_operand:VSX_D 2 "vsx_register_operand"))] + "VECTOR_MEM_VSX_P (<MODE>mode)" +{ + rtvec v; + rtx x; + + v = gen_rtvec (2, GEN_INT (0), GEN_INT (2)); + x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]); + + x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v)); + emit_insn (gen_rtx_SET (operands[0], x)); + DONE; +}) + ;; Power8 vector merge two V4SF/V4SI even words to V4SF (define_insn "p8_vmrgew_<mode>" [(set (match_operand:VSX_W 0 "register_operand" "=v") @@ -1341,12 +1257,12 @@ } [(set_attr "type" "vecperm")]) -(define_insn "p8_vmrgow" - [(set (match_operand:V4SI 0 "register_operand" "=v") - (vec_select:V4SI - (vec_concat:V8SI - (match_operand:V4SI 1 "register_operand" "v") - (match_operand:V4SI 2 "register_operand" "v")) +(define_insn "p8_vmrgow_<mode>" + [(set (match_operand:VSX_W 0 "register_operand" "=v") + (vec_select:VSX_W + (vec_concat:<VS_double> + (match_operand:VSX_W 1 "register_operand" "v") + (match_operand:VSX_W 2 "register_operand" "v")) (parallel [(const_int 1) (const_int 5) (const_int 3) (const_int 7)])))] "TARGET_P8_VECTOR" @@ -1358,6 +1274,23 @@ } [(set_attr "type" "vecperm")]) +(define_expand "p8_vmrgow_<mode>" + [(use (match_operand:VSX_D 0 "vsx_register_operand")) + (use (match_operand:VSX_D 1 "vsx_register_operand")) + (use (match_operand:VSX_D 2 "vsx_register_operand"))] + "VECTOR_MEM_VSX_P (<MODE>mode)" +{ + rtvec v; + rtx x; + + v = gen_rtvec (2, GEN_INT (1), GEN_INT (3)); + x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]); + + x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v)); + emit_insn (gen_rtx_SET (operands[0], x)); + DONE; +}) + (define_insn "p8_vmrgew_<mode>_direct" [(set (match_operand:VSX_W 0 "register_operand" "=v") (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v") @@ -1377,12 +1310,12 @@ [(set_attr "type" "vecperm")]) (define_expand "vec_widen_umult_even_v16qi" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:V16QI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2])); @@ -1390,12 +1323,12 @@ }) (define_expand "vec_widen_smult_even_v16qi" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:V16QI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2])); @@ -1403,12 +1336,12 @@ }) (define_expand "vec_widen_umult_even_v8hi" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2])); @@ -1416,12 +1349,12 @@ }) (define_expand "vec_widen_smult_even_v8hi" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2])); @@ -1434,7 +1367,7 @@ (use (match_operand:V4SI 2 "register_operand"))] "TARGET_P8_VECTOR" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2])); @@ -1447,7 +1380,7 @@ (use (match_operand:V4SI 2 "register_operand"))] "TARGET_P8_VECTOR" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2])); @@ -1455,12 +1388,12 @@ }) (define_expand "vec_widen_umult_odd_v16qi" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:V16QI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmuloub (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmuleub (operands[0], operands[1], operands[2])); @@ -1468,12 +1401,12 @@ }) (define_expand "vec_widen_smult_odd_v16qi" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:V16QI 2 "register_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulosb (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulesb (operands[0], operands[1], operands[2])); @@ -1481,12 +1414,12 @@ }) (define_expand "vec_widen_umult_odd_v8hi" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulouh (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmuleuh (operands[0], operands[1], operands[2])); @@ -1494,12 +1427,12 @@ }) (define_expand "vec_widen_smult_odd_v8hi" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:V8HI 2 "register_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulosh (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulesh (operands[0], operands[1], operands[2])); @@ -1512,7 +1445,7 @@ (use (match_operand:V4SI 2 "register_operand"))] "TARGET_P8_VECTOR" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulouw (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmuleuw (operands[0], operands[1], operands[2])); @@ -1525,7 +1458,7 @@ (use (match_operand:V4SI 2 "register_operand"))] "TARGET_P8_VECTOR" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vmulosw (operands[0], operands[1], operands[2])); else emit_insn (gen_altivec_vmulesw (operands[0], operands[1], operands[2])); @@ -1647,13 +1580,12 @@ (match_operand:V4SI 2 "register_operand" "v")] UNSPEC_VPKPX))] "TARGET_ALTIVEC" - "* - { - if (VECTOR_ELT_ORDER_BIG) - return \"vpkpx %0,%1,%2\"; - else - return \"vpkpx %0,%2,%1\"; - }" +{ + if (BYTES_BIG_ENDIAN) + return "vpkpx %0,%1,%2"; + else + return "vpkpx %0,%2,%1"; +} [(set_attr "type" "vecperm")]) (define_insn "altivec_vpks<VI_char>ss" @@ -1662,13 +1594,12 @@ (match_operand:VP 2 "register_operand" "v")] UNSPEC_VPACK_SIGN_SIGN_SAT))] "<VI_unit>" - "* - { - if (VECTOR_ELT_ORDER_BIG) - return \"vpks<VI_char>ss %0,%1,%2\"; - else - return \"vpks<VI_char>ss %0,%2,%1\"; - }" +{ + if (BYTES_BIG_ENDIAN) + return "vpks<VI_char>ss %0,%1,%2"; + else + return "vpks<VI_char>ss %0,%2,%1"; +} [(set_attr "type" "vecperm")]) (define_insn "altivec_vpks<VI_char>us" @@ -1677,13 +1608,12 @@ (match_operand:VP 2 "register_operand" "v")] UNSPEC_VPACK_SIGN_UNS_SAT))] "<VI_unit>" - "* - { - if (VECTOR_ELT_ORDER_BIG) - return \"vpks<VI_char>us %0,%1,%2\"; - else - return \"vpks<VI_char>us %0,%2,%1\"; - }" +{ + if (BYTES_BIG_ENDIAN) + return "vpks<VI_char>us %0,%1,%2"; + else + return "vpks<VI_char>us %0,%2,%1"; +} [(set_attr "type" "vecperm")]) (define_insn "altivec_vpku<VI_char>us" @@ -1692,13 +1622,12 @@ (match_operand:VP 2 "register_operand" "v")] UNSPEC_VPACK_UNS_UNS_SAT))] "<VI_unit>" - "* - { - if (VECTOR_ELT_ORDER_BIG) - return \"vpku<VI_char>us %0,%1,%2\"; - else - return \"vpku<VI_char>us %0,%2,%1\"; - }" +{ + if (BYTES_BIG_ENDIAN) + return "vpku<VI_char>us %0,%1,%2"; + else + return "vpku<VI_char>us %0,%2,%1"; +} [(set_attr "type" "vecperm")]) (define_insn "altivec_vpku<VI_char>um" @@ -1707,13 +1636,12 @@ (match_operand:VP 2 "register_operand" "v")] UNSPEC_VPACK_UNS_UNS_MOD))] "<VI_unit>" - "* - { - if (VECTOR_ELT_ORDER_BIG) - return \"vpku<VI_char>um %0,%1,%2\"; - else - return \"vpku<VI_char>um %0,%2,%1\"; - }" +{ + if (BYTES_BIG_ENDIAN) + return "vpku<VI_char>um %0,%1,%2"; + else + return "vpku<VI_char>um %0,%2,%1"; +} [(set_attr "type" "vecperm")]) (define_insn "altivec_vpku<VI_char>um_direct" @@ -1722,13 +1650,12 @@ (match_operand:VP 2 "register_operand" "v")] UNSPEC_VPACK_UNS_UNS_MOD_DIRECT))] "<VI_unit>" - "* - { - if (BYTES_BIG_ENDIAN) - return \"vpku<VI_char>um %0,%1,%2\"; - else - return \"vpku<VI_char>um %0,%2,%1\"; - }" +{ + if (BYTES_BIG_ENDIAN) + return "vpku<VI_char>um %0,%1,%2"; + else + return "vpku<VI_char>um %0,%2,%1"; +} [(set_attr "type" "vecperm")]) (define_insn "*altivec_vrl<VI_char>" @@ -1862,7 +1789,7 @@ (use (match_operand:V4SI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vsum2sws_direct (operands[0], operands[1], operands[2])); else @@ -1895,7 +1822,7 @@ (use (match_operand:V4SI 2 "register_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_vsumsws_direct (operands[0], operands[1], operands[2])); else @@ -1922,20 +1849,13 @@ [(set_attr "type" "veccomplex")]) (define_expand "altivec_vspltb" - [(use (match_operand:V16QI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "register_operand" "")) - (use (match_operand:QI 2 "u5bit_cint_operand" ""))] + [(use (match_operand:V16QI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:QI 2 "const_0_to_15_operand"))] "TARGET_ALTIVEC" { - rtvec v; + rtvec v = gen_rtvec (1, operands[2]); rtx x; - - /* Special handling for LE with -maltivec=be. We have to reflect - the actual selected index for the splat in the RTL. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - operands[2] = GEN_INT (15 - INTVAL (operands[2])); - - v = gen_rtvec (1, operands[2]); x = gen_rtx_VEC_SELECT (QImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); x = gen_rtx_VEC_DUPLICATE (V16QImode, x); emit_insn (gen_rtx_SET (operands[0], x)); @@ -1947,12 +1867,9 @@ (vec_duplicate:V16QI (vec_select:QI (match_operand:V16QI 1 "register_operand" "v") (parallel - [(match_operand:QI 2 "u5bit_cint_operand" "")]))))] + [(match_operand:QI 2 "const_0_to_15_operand" "")]))))] "TARGET_ALTIVEC" { - /* For true LE, this adjusts the selected index. For LE with - -maltivec=be, this reverses what was done in the define_expand - because the instruction already has big-endian bias. */ if (!BYTES_BIG_ENDIAN) operands[2] = GEN_INT (15 - INTVAL (operands[2])); @@ -1963,27 +1880,20 @@ (define_insn "altivec_vspltb_direct" [(set (match_operand:V16QI 0 "register_operand" "=v") (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v") - (match_operand:QI 2 "u5bit_cint_operand" "i")] + (match_operand:QI 2 "const_0_to_15_operand" "i")] UNSPEC_VSPLT_DIRECT))] "TARGET_ALTIVEC" "vspltb %0,%1,%2" [(set_attr "type" "vecperm")]) (define_expand "altivec_vsplth" - [(use (match_operand:V8HI 0 "register_operand" "")) - (use (match_operand:V8HI 1 "register_operand" "")) - (use (match_operand:QI 2 "u5bit_cint_operand" ""))] + [(use (match_operand:V8HI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:QI 2 "const_0_to_7_operand"))] "TARGET_ALTIVEC" { - rtvec v; + rtvec v = gen_rtvec (1, operands[2]); rtx x; - - /* Special handling for LE with -maltivec=be. We have to reflect - the actual selected index for the splat in the RTL. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - operands[2] = GEN_INT (7 - INTVAL (operands[2])); - - v = gen_rtvec (1, operands[2]); x = gen_rtx_VEC_SELECT (HImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); x = gen_rtx_VEC_DUPLICATE (V8HImode, x); emit_insn (gen_rtx_SET (operands[0], x)); @@ -1995,12 +1905,9 @@ (vec_duplicate:V8HI (vec_select:HI (match_operand:V8HI 1 "register_operand" "v") (parallel - [(match_operand:QI 2 "u5bit_cint_operand" "")]))))] + [(match_operand:QI 2 "const_0_to_7_operand" "")]))))] "TARGET_ALTIVEC" { - /* For true LE, this adjusts the selected index. For LE with - -maltivec=be, this reverses what was done in the define_expand - because the instruction already has big-endian bias. */ if (!BYTES_BIG_ENDIAN) operands[2] = GEN_INT (7 - INTVAL (operands[2])); @@ -2011,27 +1918,20 @@ (define_insn "altivec_vsplth_direct" [(set (match_operand:V8HI 0 "register_operand" "=v") (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v") - (match_operand:QI 2 "u5bit_cint_operand" "i")] + (match_operand:QI 2 "const_0_to_7_operand" "i")] UNSPEC_VSPLT_DIRECT))] "TARGET_ALTIVEC" "vsplth %0,%1,%2" [(set_attr "type" "vecperm")]) (define_expand "altivec_vspltw" - [(use (match_operand:V4SI 0 "register_operand" "")) - (use (match_operand:V4SI 1 "register_operand" "")) - (use (match_operand:QI 2 "u5bit_cint_operand" ""))] + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V4SI 1 "register_operand")) + (use (match_operand:QI 2 "const_0_to_3_operand"))] "TARGET_ALTIVEC" { - rtvec v; + rtvec v = gen_rtvec (1, operands[2]); rtx x; - - /* Special handling for LE with -maltivec=be. We have to reflect - the actual selected index for the splat in the RTL. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - operands[2] = GEN_INT (3 - INTVAL (operands[2])); - - v = gen_rtvec (1, operands[2]); x = gen_rtx_VEC_SELECT (SImode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); x = gen_rtx_VEC_DUPLICATE (V4SImode, x); emit_insn (gen_rtx_SET (operands[0], x)); @@ -2043,12 +1943,9 @@ (vec_duplicate:V4SI (vec_select:SI (match_operand:V4SI 1 "register_operand" "v") (parallel - [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))] + [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))] "TARGET_ALTIVEC" { - /* For true LE, this adjusts the selected index. For LE with - -maltivec=be, this reverses what was done in the define_expand - because the instruction already has big-endian bias. */ if (!BYTES_BIG_ENDIAN) operands[2] = GEN_INT (3 - INTVAL (operands[2])); @@ -2059,27 +1956,20 @@ (define_insn "altivec_vspltw_direct" [(set (match_operand:V4SI 0 "register_operand" "=v") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v") - (match_operand:QI 2 "u5bit_cint_operand" "i")] + (match_operand:QI 2 "const_0_to_3_operand" "i")] UNSPEC_VSPLT_DIRECT))] "TARGET_ALTIVEC" "vspltw %0,%1,%2" [(set_attr "type" "vecperm")]) (define_expand "altivec_vspltsf" - [(use (match_operand:V4SF 0 "register_operand" "")) - (use (match_operand:V4SF 1 "register_operand" "")) - (use (match_operand:QI 2 "u5bit_cint_operand" ""))] + [(use (match_operand:V4SF 0 "register_operand")) + (use (match_operand:V4SF 1 "register_operand")) + (use (match_operand:QI 2 "const_0_to_3_operand"))] "TARGET_ALTIVEC" { - rtvec v; + rtvec v = gen_rtvec (1, operands[2]); rtx x; - - /* Special handling for LE with -maltivec=be. We have to reflect - the actual selected index for the splat in the RTL. */ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - operands[2] = GEN_INT (3 - INTVAL (operands[2])); - - v = gen_rtvec (1, operands[2]); x = gen_rtx_VEC_SELECT (SFmode, operands[1], gen_rtx_PARALLEL (VOIDmode, v)); x = gen_rtx_VEC_DUPLICATE (V4SFmode, x); emit_insn (gen_rtx_SET (operands[0], x)); @@ -2091,12 +1981,9 @@ (vec_duplicate:V4SF (vec_select:SF (match_operand:V4SF 1 "register_operand" "v") (parallel - [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))] + [(match_operand:QI 2 "const_0_to_3_operand" "i")]))))] "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" { - /* For true LE, this adjusts the selected index. For LE with - -maltivec=be, this reverses what was done in the define_expand - because the instruction already has big-endian bias. */ if (!BYTES_BIG_ENDIAN) operands[2] = GEN_INT (3 - INTVAL (operands[2])); @@ -2120,14 +2007,14 @@ [(set_attr "type" "vecfloat")]) (define_expand "altivec_vperm_<mode>" - [(set (match_operand:VM 0 "register_operand" "") - (unspec:VM [(match_operand:VM 1 "register_operand" "") - (match_operand:VM 2 "register_operand" "") - (match_operand:V16QI 3 "register_operand" "")] + [(set (match_operand:VM 0 "register_operand") + (unspec:VM [(match_operand:VM 1 "register_operand") + (match_operand:VM 2 "register_operand") + (match_operand:V16QI 3 "register_operand")] UNSPEC_VPERM))] "TARGET_ALTIVEC" { - if (!VECTOR_ELT_ORDER_BIG) + if (!BYTES_BIG_ENDIAN) { altivec_expand_vec_perm_le (operands); DONE; @@ -2135,7 +2022,7 @@ }) ;; Slightly prefer vperm, since the target does not overlap the source -(define_insn "*altivec_vperm_<mode>_internal" +(define_insn "altivec_vperm_<mode>_direct" [(set (match_operand:VM 0 "register_operand" "=v,?wo") (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo") (match_operand:VM 2 "register_operand" "v,0") @@ -2145,8 +2032,7 @@ "@ vperm %0,%1,%2,%3 xxperm %x0,%x1,%x3" - [(set_attr "type" "vecperm") - (set_attr "length" "4")]) + [(set_attr "type" "vecperm")]) (define_insn "altivec_vperm_v8hiv16qi" [(set (match_operand:V16QI 0 "register_operand" "=v,?wo") @@ -2158,18 +2044,17 @@ "@ vperm %0,%1,%2,%3 xxperm %x0,%x1,%x3" - [(set_attr "type" "vecperm") - (set_attr "length" "4")]) + [(set_attr "type" "vecperm")]) (define_expand "altivec_vperm_<mode>_uns" - [(set (match_operand:VM 0 "register_operand" "") - (unspec:VM [(match_operand:VM 1 "register_operand" "") - (match_operand:VM 2 "register_operand" "") - (match_operand:V16QI 3 "register_operand" "")] + [(set (match_operand:VM 0 "register_operand") + (unspec:VM [(match_operand:VM 1 "register_operand") + (match_operand:VM 2 "register_operand") + (match_operand:V16QI 3 "register_operand")] UNSPEC_VPERM_UNS))] "TARGET_ALTIVEC" { - if (!VECTOR_ELT_ORDER_BIG) + if (!BYTES_BIG_ENDIAN) { altivec_expand_vec_perm_le (operands); DONE; @@ -2186,14 +2071,13 @@ "@ vperm %0,%1,%2,%3 xxperm %x0,%x1,%x3" - [(set_attr "type" "vecperm") - (set_attr "length" "4")]) + [(set_attr "type" "vecperm")]) (define_expand "vec_permv16qi" - [(set (match_operand:V16QI 0 "register_operand" "") - (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "") - (match_operand:V16QI 2 "register_operand" "") - (match_operand:V16QI 3 "register_operand" "")] + [(set (match_operand:V16QI 0 "register_operand") + (unspec:V16QI [(match_operand:V16QI 1 "register_operand") + (match_operand:V16QI 2 "register_operand") + (match_operand:V16QI 3 "register_operand")] UNSPEC_VPERM))] "TARGET_ALTIVEC" { @@ -2203,19 +2087,6 @@ } }) -(define_expand "vec_perm_constv16qi" - [(match_operand:V16QI 0 "register_operand" "") - (match_operand:V16QI 1 "register_operand" "") - (match_operand:V16QI 2 "register_operand" "") - (match_operand:V16QI 3 "" "")] - "TARGET_ALTIVEC" -{ - if (altivec_expand_vec_perm_const (operands)) - DONE; - else - FAIL; -}) - (define_insn "*altivec_vpermr_<mode>_internal" [(set (match_operand:VM 0 "register_operand" "=v,?wo") (unspec:VM [(match_operand:VM 1 "register_operand" "v,wo") @@ -2224,10 +2095,9 @@ UNSPEC_VPERMR))] "TARGET_P9_VECTOR" "@ - vpermr %0,%2,%1,%3 + vpermr %0,%1,%2,%3 xxpermr %x0,%x1,%x3" - [(set_attr "type" "vecperm") - (set_attr "length" "4")]) + [(set_attr "type" "vecperm")]) (define_insn "altivec_vrfip" ; ceil [(set (match_operand:V4SF 0 "register_operand" "=v") @@ -2324,11 +2194,10 @@ [(set_attr "type" "vecfloat")]) (define_expand "altivec_copysign_v4sf3" - [(use (match_operand:V4SF 0 "register_operand" "")) - (use (match_operand:V4SF 1 "register_operand" "")) - (use (match_operand:V4SF 2 "register_operand" ""))] + [(use (match_operand:V4SF 0 "register_operand")) + (use (match_operand:V4SF 1 "register_operand")) + (use (match_operand:V4SF 2 "register_operand"))] "VECTOR_UNIT_ALTIVEC_P (V4SFmode)" - " { rtx mask = gen_reg_rtx (V4SImode); rtvec v = rtvec_alloc (4); @@ -2343,7 +2212,7 @@ emit_insn (gen_vector_select_v4sf (operands[0], operands[1], operands[2], gen_lowpart (V4SFmode, mask))); DONE; -}") +}) (define_insn "altivec_vsldoi_<mode>" [(set (match_operand:VM 0 "register_operand" "=v") @@ -2361,7 +2230,7 @@ UNSPEC_VUNPACK_HI_SIGN))] "<VI_unit>" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) return "vupkhs<VU_char> %0,%1"; else return "vupkls<VU_char> %0,%1"; @@ -2382,7 +2251,7 @@ UNSPEC_VUNPACK_LO_SIGN))] "<VI_unit>" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) return "vupkls<VU_char> %0,%1"; else return "vupkhs<VU_char> %0,%1"; @@ -2403,7 +2272,7 @@ UNSPEC_VUPKHPX))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) return "vupkhpx %0,%1"; else return "vupklpx %0,%1"; @@ -2416,7 +2285,7 @@ UNSPEC_VUPKLPX))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) return "vupklpx %0,%1"; else return "vupkhpx %0,%1"; @@ -2425,7 +2294,7 @@ ;; Compare vectors producing a vector result and a predicate, setting CR6 to ;; indicate a combined status -(define_insn "*altivec_vcmpequ<VI_char>_p" +(define_insn "altivec_vcmpequ<VI_char>_p" [(set (reg:CC CR6_REGNO) (unspec:CC [(eq:CC (match_operand:VI2 1 "register_operand" "v") (match_operand:VI2 2 "register_operand" "v"))] @@ -2571,21 +2440,18 @@ [(set_attr "type" "vecsimple")]) (define_expand "altivec_lvsl" - [(use (match_operand:V16QI 0 "register_operand" "")) - (use (match_operand:V16QI 1 "memory_operand" ""))] + [(use (match_operand:V16QI 0 "register_operand")) + (use (match_operand:V16QI 1 "memory_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_lvsl_direct (operands[0], operands[1])); else { - int i; - rtx mask, perm[16], constv, vperm; + rtx mask, constv, vperm; mask = gen_reg_rtx (V16QImode); emit_insn (gen_altivec_lvsl_direct (mask, operands[1])); - for (i = 0; i < 16; ++i) - perm[i] = GEN_INT (i); - constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)); + constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx); constv = force_reg (V16QImode, constv); vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv), UNSPEC_VPERM); @@ -2616,17 +2482,14 @@ (use (match_operand:V16QI 1 "memory_operand"))] "TARGET_ALTIVEC" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) emit_insn (gen_altivec_lvsr_direct (operands[0], operands[1])); else { - int i; - rtx mask, perm[16], constv, vperm; + rtx mask, constv, vperm; mask = gen_reg_rtx (V16QImode); emit_insn (gen_altivec_lvsr_direct (mask, operands[1])); - for (i = 0; i < 16; ++i) - perm[i] = GEN_INT (i); - constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)); + constv = gen_const_vec_series (V16QImode, const0_rtx, const1_rtx); constv = force_reg (V16QImode, constv); vperm = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, mask, mask, constv), UNSPEC_VPERM); @@ -2653,11 +2516,10 @@ [(set_attr "type" "vecload")]) (define_expand "build_vector_mask_for_load" - [(set (match_operand:V16QI 0 "register_operand" "") - (unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))] + [(set (match_operand:V16QI 0 "register_operand") + (unspec:V16QI [(match_operand 1 "memory_operand")] UNSPEC_LVSR))] "TARGET_ALTIVEC" - " -{ +{ rtx addr; rtx temp; @@ -2669,26 +2531,12 @@ emit_insn (gen_altivec_lvsr (operands[0], replace_equiv_address (operands[1], temp))); DONE; -}") +}) ;; Parallel some of the LVE* and STV*'s with unspecs because some have ;; identical rtl but different instructions-- and gcc gets confused. -(define_expand "altivec_lve<VI_char>x" - [(parallel - [(set (match_operand:VI 0 "register_operand" "=v") - (match_operand:VI 1 "memory_operand" "Z")) - (unspec [(const_int 0)] UNSPEC_LVE)])] - "TARGET_ALTIVEC" -{ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_LVE); - DONE; - } -}) - -(define_insn "*altivec_lve<VI_char>x_internal" +(define_insn "altivec_lve<VI_char>x" [(parallel [(set (match_operand:VI 0 "register_operand" "=v") (match_operand:VI 1 "memory_operand" "Z")) @@ -2706,21 +2554,7 @@ "lvewx %0,%y1" [(set_attr "type" "vecload")]) -(define_expand "altivec_lvxl_<mode>" - [(parallel - [(set (match_operand:VM2 0 "register_operand" "=v") - (match_operand:VM2 1 "memory_operand" "Z")) - (unspec [(const_int 0)] UNSPEC_SET_VSCR)])] - "TARGET_ALTIVEC" -{ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - altivec_expand_lvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_SET_VSCR); - DONE; - } -}) - -(define_insn "*altivec_lvxl_<mode>_internal" +(define_insn "altivec_lvxl_<mode>" [(parallel [(set (match_operand:VM2 0 "register_operand" "=v") (match_operand:VM2 1 "memory_operand" "Z")) @@ -2741,39 +2575,47 @@ "lvx %0,%y1" [(set_attr "type" "vecload")]) +; The following patterns embody what lvx should usually look like. +(define_expand "altivec_lvx_<VM2:mode>" + [(set (match_operand:VM2 0 "register_operand") + (match_operand:VM2 1 "altivec_indexed_or_indirect_operand"))] + "TARGET_ALTIVEC" +{ + rtx addr = XEXP (operand1, 0); + if (rs6000_sum_of_two_registers_p (addr)) + { + rtx op1 = XEXP (addr, 0); + rtx op2 = XEXP (addr, 1); + if (TARGET_64BIT) + emit_insn (gen_altivec_lvx_<VM2:mode>_2op_di (operand0, op1, op2)); + else + emit_insn (gen_altivec_lvx_<VM2:mode>_2op_si (operand0, op1, op2)); + } + else + { + if (TARGET_64BIT) + emit_insn (gen_altivec_lvx_<VM2:mode>_1op_di (operand0, addr)); + else + emit_insn (gen_altivec_lvx_<VM2:mode>_1op_si (operand0, addr)); + } + DONE; +}) + ; The next two patterns embody what lvx should usually look like. -(define_insn "altivec_lvx_<mode>_2op" +(define_insn "altivec_lvx_<VM2:mode>_2op_<P:mptrsize>" [(set (match_operand:VM2 0 "register_operand" "=v") - (mem:VM2 (and:DI (plus:DI (match_operand:DI 1 "register_operand" "b") - (match_operand:DI 2 "register_operand" "r")) - (const_int -16))))] - "TARGET_ALTIVEC && TARGET_64BIT" + (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b") + (match_operand:P 2 "register_operand" "r")) + (const_int -16))))] + "TARGET_ALTIVEC" "lvx %0,%1,%2" [(set_attr "type" "vecload")]) -(define_insn "altivec_lvx_<mode>_1op" - [(set (match_operand:VM2 0 "register_operand" "=v") - (mem:VM2 (and:DI (match_operand:DI 1 "register_operand" "r") - (const_int -16))))] - "TARGET_ALTIVEC && TARGET_64BIT" - "lvx %0,0,%1" - [(set_attr "type" "vecload")]) - -; 32-bit versions of the above. -(define_insn "altivec_lvx_<mode>_2op_si" +(define_insn "altivec_lvx_<VM2:mode>_1op_<P:mptrsize>" [(set (match_operand:VM2 0 "register_operand" "=v") - (mem:VM2 (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b") - (match_operand:SI 2 "register_operand" "r")) - (const_int -16))))] - "TARGET_ALTIVEC && TARGET_32BIT" - "lvx %0,%1,%2" - [(set_attr "type" "vecload")]) - -(define_insn "altivec_lvx_<mode>_1op_si" - [(set (match_operand:VM2 0 "register_operand" "=v") - (mem:VM2 (and:SI (match_operand:SI 1 "register_operand" "r") - (const_int -16))))] - "TARGET_ALTIVEC && TARGET_32BIT" + (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r") + (const_int -16))))] + "TARGET_ALTIVEC" "lvx %0,0,%1" [(set_attr "type" "vecload")]) @@ -2789,57 +2631,51 @@ "stvx %1,%y0" [(set_attr "type" "vecstore")]) +; The following patterns embody what stvx should usually look like. +(define_expand "altivec_stvx_<VM2:mode>" + [(set (match_operand:VM2 1 "altivec_indexed_or_indirect_operand") + (match_operand:VM2 0 "register_operand"))] + "TARGET_ALTIVEC" +{ + rtx addr = XEXP (operand1, 0); + if (rs6000_sum_of_two_registers_p (addr)) + { + rtx op1 = XEXP (addr, 0); + rtx op2 = XEXP (addr, 1); + if (TARGET_64BIT) + emit_insn (gen_altivec_stvx_<VM2:mode>_2op_di (operand0, op1, op2)); + else + emit_insn (gen_altivec_stvx_<VM2:mode>_2op_si (operand0, op1, op2)); + } + else + { + if (TARGET_64BIT) + emit_insn (gen_altivec_stvx_<VM2:mode>_1op_di (operand0, addr)); + else + emit_insn (gen_altivec_stvx_<VM2:mode>_1op_si (operand0, addr)); + } + DONE; +}) + ; The next two patterns embody what stvx should usually look like. -(define_insn "altivec_stvx_<mode>_2op" - [(set (mem:VM2 (and:DI (plus:DI (match_operand:DI 1 "register_operand" "b") - (match_operand:DI 2 "register_operand" "r")) - (const_int -16))) - (match_operand:VM2 0 "register_operand" "v"))] - "TARGET_ALTIVEC && TARGET_64BIT" +(define_insn "altivec_stvx_<VM2:mode>_2op_<P:mptrsize>" + [(set (mem:VM2 (and:P (plus:P (match_operand:P 1 "register_operand" "b") + (match_operand:P 2 "register_operand" "r")) + (const_int -16))) + (match_operand:VM2 0 "register_operand" "v"))] + "TARGET_ALTIVEC" "stvx %0,%1,%2" [(set_attr "type" "vecstore")]) -(define_insn "altivec_stvx_<mode>_1op" - [(set (mem:VM2 (and:DI (match_operand:DI 1 "register_operand" "r") - (const_int -16))) - (match_operand:VM2 0 "register_operand" "v"))] - "TARGET_ALTIVEC && TARGET_64BIT" +(define_insn "altivec_stvx_<VM2:mode>_1op_<P:mptrsize>" + [(set (mem:VM2 (and:P (match_operand:P 1 "register_operand" "r") + (const_int -16))) + (match_operand:VM2 0 "register_operand" "v"))] + "TARGET_ALTIVEC" "stvx %0,0,%1" [(set_attr "type" "vecstore")]) -; 32-bit versions of the above. -(define_insn "altivec_stvx_<mode>_2op_si" - [(set (mem:VM2 (and:SI (plus:SI (match_operand:SI 1 "register_operand" "b") - (match_operand:SI 2 "register_operand" "r")) - (const_int -16))) - (match_operand:VM2 0 "register_operand" "v"))] - "TARGET_ALTIVEC && TARGET_32BIT" - "stvx %0,%1,%2" - [(set_attr "type" "vecstore")]) - -(define_insn "altivec_stvx_<mode>_1op_si" - [(set (mem:VM2 (and:SI (match_operand:SI 1 "register_operand" "r") - (const_int -16))) - (match_operand:VM2 0 "register_operand" "v"))] - "TARGET_ALTIVEC && TARGET_32BIT" - "stvx %0,0,%1" - [(set_attr "type" "vecstore")]) - -(define_expand "altivec_stvxl_<mode>" - [(parallel - [(set (match_operand:VM2 0 "memory_operand" "=Z") - (match_operand:VM2 1 "register_operand" "v")) - (unspec [(const_int 0)] UNSPEC_STVXL)])] - "TARGET_ALTIVEC" -{ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - altivec_expand_stvx_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVXL); - DONE; - } -}) - -(define_insn "*altivec_stvxl_<mode>_internal" +(define_insn "altivec_stvxl_<mode>" [(parallel [(set (match_operand:VM2 0 "memory_operand" "=Z") (match_operand:VM2 1 "register_operand" "v")) @@ -2848,19 +2684,7 @@ "stvxl %1,%y0" [(set_attr "type" "vecstore")]) -(define_expand "altivec_stve<VI_char>x" - [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z") - (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))] - "TARGET_ALTIVEC" -{ - if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG) - { - altivec_expand_stvex_be (operands[0], operands[1], <MODE>mode, UNSPEC_STVE); - DONE; - } -}) - -(define_insn "*altivec_stve<VI_char>x_internal" +(define_insn "altivec_stve<VI_char>x" [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z") (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVE))] "TARGET_ALTIVEC" @@ -2883,7 +2707,7 @@ { machine_mode op_mode = GET_MODE (operands[1]); - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for words in operand is 0 1 2 3. Input words 0 and 2 are where they need to be. */ @@ -2915,7 +2739,7 @@ (match_operand:V4SI 1 "register_operand" "v"))] "TARGET_VSX" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for words in operand is 0 1 2 3. Input words 0 and 2 are where they need to be. */ @@ -2949,7 +2773,7 @@ { machine_mode op_mode = GET_MODE (operands[1]); - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for words in operand is 0 1 2 3. take (operand[1] operand[1]) and shift left one word @@ -2981,7 +2805,7 @@ (match_operand:V4SI 1 "register_operand" "v"))] "TARGET_VSX" { - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for words in operand is 0 1 2 3. take (operand[1] operand[1]) and shift left one word @@ -3020,7 +2844,7 @@ machine_mode op_mode = GET_MODE (operands[1]); rtx_tmp = gen_reg_rtx (op_mode); - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for words in operand is 0 1 2 3. Shift operand left one word, rtx_tmp word order is now 1 2 3 0. @@ -3068,7 +2892,7 @@ rtx rtx_tmp = gen_reg_rtx (V4SImode); rtx rtx_val = GEN_INT (12); - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for words in operand is 0 1 2 3. Shift operand left one word, rtx_tmp word order is now 1 2 3 0. @@ -3121,7 +2945,7 @@ machine_mode op_mode = GET_MODE (operands[1]); rtx_tmp = gen_reg_rtx (op_mode); - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for operand is 0 1 2 3. Shift operand left three words, rtx_tmp word order is now 3 0 1 2. @@ -3169,7 +2993,7 @@ rtx rtx_tmp = gen_reg_rtx (V4SImode); rtx rtx_val = GEN_INT (12); - if (VECTOR_ELT_ORDER_BIG) + if (BYTES_BIG_ENDIAN) { /* Big endian word numbering for operand is 0 1 2 3. Shift operand left three words, rtx_tmp word order is now 3 0 1 2. @@ -3237,15 +3061,8 @@ (smax:VI2 (match_dup 1) (match_dup 4)))] "<VI_unit>" { - int i, n_elt = GET_MODE_NUNITS (<MODE>mode); - rtvec v = rtvec_alloc (n_elt); - - /* Create an all 0 constant. */ - for (i = 0; i < n_elt; ++i) - RTVEC_ELT (v, i) = const0_rtx; - operands[2] = gen_reg_rtx (<MODE>mode); - operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, v); + operands[3] = CONST0_RTX (<MODE>mode); operands[4] = gen_reg_rtx (<MODE>mode); }) @@ -3262,17 +3079,8 @@ (smin:VI2 (match_dup 1) (match_dup 4)))] "<VI_unit>" { - int i; - int n_elt = GET_MODE_NUNITS (<MODE>mode); - - rtvec v = rtvec_alloc (n_elt); - - /* Create an all 0 constant. */ - for (i = 0; i < n_elt; ++i) - RTVEC_ELT (v, i) = const0_rtx; - operands[2] = gen_reg_rtx (<MODE>mode); - operands[3] = gen_rtx_CONST_VECTOR (<MODE>mode, v); + operands[3] = CONST0_RTX (<MODE>mode); operands[4] = gen_reg_rtx (<MODE>mode); }) @@ -3324,7 +3132,7 @@ rtx vtmp1 = gen_reg_rtx (V4SImode); rtx vtmp2 = gen_reg_rtx (<MODE>mode); rtx dest = gen_lowpart (V4SImode, vtmp2); - int elt = VECTOR_ELT_ORDER_BIG ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0; + int elt = BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (<MODE>mode) - 1 : 0; emit_insn (gen_altivec_vspltisw (vzero, const0_rtx)); emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero)); @@ -3341,8 +3149,8 @@ [(set_attr "type" "vecsimple")]) (define_expand "neg<mode>2" - [(set (match_operand:VI2 0 "register_operand" "") - (neg:VI2 (match_operand:VI2 1 "register_operand" "")))] + [(set (match_operand:VI2 0 "register_operand") + (neg:VI2 (match_operand:VI2 1 "register_operand")))] "<VI_unit>" { if (!TARGET_P9_VECTOR || (<MODE>mode != V4SImode && <MODE>mode != V2DImode)) @@ -3363,12 +3171,11 @@ (match_operand:VIshort 2 "register_operand" "v")] UNSPEC_VMSUMU)))] "TARGET_ALTIVEC" - " -{ +{ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3])); DONE; -}") - +}) + (define_expand "sdot_prodv8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") (plus:V4SI (match_operand:V4SI 3 "register_operand" "v") @@ -3376,11 +3183,10 @@ (match_operand:V8HI 2 "register_operand" "v")] UNSPEC_VMSUMSHM)))] "TARGET_ALTIVEC" - " { emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3])); DONE; -}") +}) (define_expand "widen_usum<mode>3" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3388,14 +3194,13 @@ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")] UNSPEC_VMSUMU)))] "TARGET_ALTIVEC" - " { rtx vones = gen_reg_rtx (GET_MODE (operands[1])); emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx)); emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2])); DONE; -}") +}) (define_expand "widen_ssumv16qi3" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3403,14 +3208,13 @@ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")] UNSPEC_VMSUMM)))] "TARGET_ALTIVEC" - " { rtx vones = gen_reg_rtx (V16QImode); emit_insn (gen_altivec_vspltisb (vones, const1_rtx)); emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2])); DONE; -}") +}) (define_expand "widen_ssumv8hi3" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3418,14 +3222,13 @@ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] UNSPEC_VMSUMSHM)))] "TARGET_ALTIVEC" - " { rtx vones = gen_reg_rtx (V8HImode); emit_insn (gen_altivec_vspltish (vones, const1_rtx)); emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2])); DONE; -}") +}) (define_expand "vec_unpacks_hi_<VP_small_lc>" [(set (match_operand:VP 0 "register_operand" "=v") @@ -3451,8 +3254,7 @@ "@ vperm %0,%1,%2,%3 xxperm %x0,%x1,%x3" - [(set_attr "type" "vecperm") - (set_attr "length" "4")]) + [(set_attr "type" "vecperm")]) (define_insn "vperm_v16qiv8hi" [(set (match_operand:V8HI 0 "register_operand" "=v,?wo") @@ -3464,8 +3266,7 @@ "@ vperm %0,%1,%2,%3 xxperm %x0,%x1,%x3" - [(set_attr "type" "vecperm") - (set_attr "length" "4")]) + [(set_attr "type" "vecperm")]) (define_expand "vec_unpacku_hi_v16qi" @@ -3473,7 +3274,6 @@ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] UNSPEC_VUPKHUB))] "TARGET_ALTIVEC" - " { rtx vzero = gen_reg_rtx (V8HImode); rtx mask = gen_reg_rtx (V16QImode); @@ -3502,14 +3302,13 @@ emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v))); emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask)); DONE; -}") +}) (define_expand "vec_unpacku_hi_v8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] UNSPEC_VUPKHUH))] "TARGET_ALTIVEC" - " { rtx vzero = gen_reg_rtx (V4SImode); rtx mask = gen_reg_rtx (V16QImode); @@ -3538,14 +3337,13 @@ emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v))); emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask)); DONE; -}") +}) (define_expand "vec_unpacku_lo_v16qi" [(set (match_operand:V8HI 0 "register_operand" "=v") (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] UNSPEC_VUPKLUB))] "TARGET_ALTIVEC" - " { rtx vzero = gen_reg_rtx (V8HImode); rtx mask = gen_reg_rtx (V16QImode); @@ -3574,14 +3372,13 @@ emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v))); emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask)); DONE; -}") +}) (define_expand "vec_unpacku_lo_v8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] UNSPEC_VUPKLUH))] "TARGET_ALTIVEC" - " { rtx vzero = gen_reg_rtx (V4SImode); rtx mask = gen_reg_rtx (V16QImode); @@ -3610,7 +3407,7 @@ emit_insn (gen_vec_initv16qiqi (mask, gen_rtx_PARALLEL (V16QImode, v))); emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask)); DONE; -}") +}) (define_expand "vec_widen_umult_hi_v16qi" [(set (match_operand:V8HI 0 "register_operand" "=v") @@ -3618,7 +3415,6 @@ (match_operand:V16QI 2 "register_operand" "v")] UNSPEC_VMULWHUB))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V8HImode); rtx vo = gen_reg_rtx (V8HImode); @@ -3636,7 +3432,7 @@ emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_umult_lo_v16qi" [(set (match_operand:V8HI 0 "register_operand" "=v") @@ -3644,7 +3440,6 @@ (match_operand:V16QI 2 "register_operand" "v")] UNSPEC_VMULWLUB))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V8HImode); rtx vo = gen_reg_rtx (V8HImode); @@ -3662,7 +3457,7 @@ emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_smult_hi_v16qi" [(set (match_operand:V8HI 0 "register_operand" "=v") @@ -3670,7 +3465,6 @@ (match_operand:V16QI 2 "register_operand" "v")] UNSPEC_VMULWHSB))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V8HImode); rtx vo = gen_reg_rtx (V8HImode); @@ -3688,7 +3482,7 @@ emit_insn (gen_altivec_vmrghh_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_smult_lo_v16qi" [(set (match_operand:V8HI 0 "register_operand" "=v") @@ -3696,7 +3490,6 @@ (match_operand:V16QI 2 "register_operand" "v")] UNSPEC_VMULWLSB))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V8HImode); rtx vo = gen_reg_rtx (V8HImode); @@ -3714,7 +3507,7 @@ emit_insn (gen_altivec_vmrglh_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_umult_hi_v8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3722,7 +3515,6 @@ (match_operand:V8HI 2 "register_operand" "v")] UNSPEC_VMULWHUH))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V4SImode); rtx vo = gen_reg_rtx (V4SImode); @@ -3740,7 +3532,7 @@ emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_umult_lo_v8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3748,7 +3540,6 @@ (match_operand:V8HI 2 "register_operand" "v")] UNSPEC_VMULWLUH))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V4SImode); rtx vo = gen_reg_rtx (V4SImode); @@ -3766,7 +3557,7 @@ emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_smult_hi_v8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3774,7 +3565,6 @@ (match_operand:V8HI 2 "register_operand" "v")] UNSPEC_VMULWHSH))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V4SImode); rtx vo = gen_reg_rtx (V4SImode); @@ -3792,7 +3582,7 @@ emit_insn (gen_altivec_vmrghw_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_widen_smult_lo_v8hi" [(set (match_operand:V4SI 0 "register_operand" "=v") @@ -3800,7 +3590,6 @@ (match_operand:V8HI 2 "register_operand" "v")] UNSPEC_VMULWLSH))] "TARGET_ALTIVEC" - " { rtx ve = gen_reg_rtx (V4SImode); rtx vo = gen_reg_rtx (V4SImode); @@ -3818,7 +3607,7 @@ emit_insn (gen_altivec_vmrglw_direct (operands[0], vo, ve)); } DONE; -}") +}) (define_expand "vec_pack_trunc_<mode>" [(set (match_operand:<VP_small> 0 "register_operand" "=v") @@ -3833,7 +3622,6 @@ (mult:V16QI (match_operand:V16QI 1 "register_operand" "v") (match_operand:V16QI 2 "register_operand" "v")))] "TARGET_ALTIVEC" - " { rtx even = gen_reg_rtx (V8HImode); rtx odd = gen_reg_rtx (V8HImode); @@ -3853,13 +3641,35 @@ emit_insn (gen_altivec_vmulosb (odd, operands[1], operands[2])); emit_insn (gen_altivec_vperm_v8hiv16qi (operands[0], even, odd, mask)); DONE; -}") +}) + +(define_expand "altivec_vpermxor" + [(use (match_operand:V16QI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand")) + (use (match_operand:V16QI 3 "register_operand"))] + "TARGET_P8_VECTOR" +{ + if (!BYTES_BIG_ENDIAN) + { + /* vpermxor indexes the bytes using Big Endian numbering. If LE, + change indexing in operand[3] to BE index. */ + rtx be_index = gen_reg_rtx (V16QImode); + + emit_insn (gen_one_cmplv16qi2 (be_index, operands[3])); + emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1], + operands[2], be_index)); + } + else + emit_insn (gen_crypto_vpermxor_v16qi (operands[0], operands[1], + operands[2], operands[3])); + DONE; +}) (define_expand "altivec_negv4sf2" - [(use (match_operand:V4SF 0 "register_operand" "")) - (use (match_operand:V4SF 1 "register_operand" ""))] + [(use (match_operand:V4SF 0 "register_operand")) + (use (match_operand:V4SF 1 "register_operand"))] "TARGET_ALTIVEC" - " { rtx neg0; @@ -3873,7 +3683,7 @@ gen_lowpart (V4SFmode, neg0), operands[1])); DONE; -}") +}) ;; Vector reverse elements (define_expand "altivec_vreve<mode>2" @@ -3971,60 +3781,56 @@ [(set_attr "type" "vecstore")]) (define_expand "vec_unpacks_float_hi_v8hi" - [(set (match_operand:V4SF 0 "register_operand" "") - (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + [(set (match_operand:V4SF 0 "register_operand") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand")] UNSPEC_VUPKHS_V4SF))] "TARGET_ALTIVEC" - " { rtx tmp = gen_reg_rtx (V4SImode); emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1])); emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx)); DONE; -}") +}) (define_expand "vec_unpacks_float_lo_v8hi" - [(set (match_operand:V4SF 0 "register_operand" "") - (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + [(set (match_operand:V4SF 0 "register_operand") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand")] UNSPEC_VUPKLS_V4SF))] "TARGET_ALTIVEC" - " { rtx tmp = gen_reg_rtx (V4SImode); emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1])); emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx)); DONE; -}") +}) (define_expand "vec_unpacku_float_hi_v8hi" - [(set (match_operand:V4SF 0 "register_operand" "") - (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + [(set (match_operand:V4SF 0 "register_operand") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand")] UNSPEC_VUPKHU_V4SF))] "TARGET_ALTIVEC" - " { rtx tmp = gen_reg_rtx (V4SImode); emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1])); emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx)); DONE; -}") +}) (define_expand "vec_unpacku_float_lo_v8hi" - [(set (match_operand:V4SF 0 "register_operand" "") - (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")] + [(set (match_operand:V4SF 0 "register_operand") + (unspec:V4SF [(match_operand:V8HI 1 "register_operand")] UNSPEC_VUPKLU_V4SF))] "TARGET_ALTIVEC" - " { rtx tmp = gen_reg_rtx (V4SImode); emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1])); emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx)); DONE; -}") +}) ;; Power8/power9 vector instructions encoded as Altivec instructions @@ -4035,8 +3841,7 @@ (clz:VI2 (match_operand:VI2 1 "register_operand" "v")))] "TARGET_P8_VECTOR" "vclz<wd> %0,%1" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; Vector absolute difference unsigned (define_expand "vadu<mode>3" @@ -4047,7 +3852,7 @@ "TARGET_P9_VECTOR") ;; Vector absolute difference unsigned -(define_insn "*p9_vadu<mode>3" +(define_insn "p9_vadu<mode>3" [(set (match_operand:VI 0 "register_operand" "=v") (unspec:VI [(match_operand:VI 1 "register_operand" "v") (match_operand:VI 2 "register_operand" "v")] @@ -4062,8 +3867,7 @@ (ctz:VI2 (match_operand:VI2 1 "register_operand" "v")))] "TARGET_P9_VECTOR" "vctz<wd> %0,%1" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; Vector population count (define_insn "*p8v_popcount<mode>2" @@ -4071,8 +3875,7 @@ (popcount:VI2 (match_operand:VI2 1 "register_operand" "v")))] "TARGET_P8_VECTOR" "vpopcnt<wd> %0,%1" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; Vector parity (define_insn "*p9v_parity<mode>2" @@ -4080,8 +3883,7 @@ (parity:VParity (match_operand:VParity 1 "register_operand" "v")))] "TARGET_P9_VECTOR" "vprtyb<wd> %0,%1" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; Vector Gather Bits by Bytes by Doubleword (define_insn "p8v_vgbbd" @@ -4090,8 +3892,7 @@ UNSPEC_VGBBD))] "TARGET_P8_VECTOR" "vgbbd %0,%1" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; 128-bit binary integer arithmetic @@ -4105,8 +3906,7 @@ (match_operand:V1TI 2 "register_operand" "v")))] "TARGET_VADDUQM" "vadduqm %0,%1,%2" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vaddcuq" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4115,8 +3915,7 @@ UNSPEC_VADDCUQ))] "TARGET_VADDUQM" "vaddcuq %0,%1,%2" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vsubuqm" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4124,8 +3923,7 @@ (match_operand:V1TI 2 "register_operand" "v")))] "TARGET_VADDUQM" "vsubuqm %0,%1,%2" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vsubcuq" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4134,8 +3932,7 @@ UNSPEC_VSUBCUQ))] "TARGET_VADDUQM" "vsubcuq %0,%1,%2" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vaddeuqm" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4145,8 +3942,7 @@ UNSPEC_VADDEUQM))] "TARGET_VADDUQM" "vaddeuqm %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vaddecuq" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4156,8 +3952,7 @@ UNSPEC_VADDECUQ))] "TARGET_VADDUQM" "vaddecuq %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vsubeuqm" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4167,8 +3962,7 @@ UNSPEC_VSUBEUQM))] "TARGET_VADDUQM" "vsubeuqm %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "altivec_vsubecuq" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4178,8 +3972,7 @@ UNSPEC_VSUBECUQ))] "TARGET_VADDUQM" "vsubecuq %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; We use V2DI as the output type to simplify converting the permute ;; bits into an integer @@ -4211,6 +4004,49 @@ "vbpermd %0,%1,%2" [(set_attr "type" "vecsimple")]) +;; Support for SAD (sum of absolute differences). + +;; Due to saturating semantics, we can't combine the sum-across +;; with the vector accumulate in vsum4ubs. A vadduwm is needed. +(define_expand "usadv16qi" + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V16QI 1 "register_operand")) + (use (match_operand:V16QI 2 "register_operand")) + (use (match_operand:V4SI 3 "register_operand"))] + "TARGET_P9_VECTOR" +{ + rtx absd = gen_reg_rtx (V16QImode); + rtx zero = gen_reg_rtx (V4SImode); + rtx psum = gen_reg_rtx (V4SImode); + + emit_insn (gen_p9_vaduv16qi3 (absd, operands[1], operands[2])); + emit_insn (gen_altivec_vspltisw (zero, const0_rtx)); + emit_insn (gen_altivec_vsum4ubs (psum, absd, zero)); + emit_insn (gen_addv4si3 (operands[0], psum, operands[3])); + DONE; +}) + +;; Since vsum4shs is saturating and further performs signed +;; arithmetic, we can't combine the sum-across with the vector +;; accumulate in vsum4shs. A vadduwm is needed. +(define_expand "usadv8hi" + [(use (match_operand:V4SI 0 "register_operand")) + (use (match_operand:V8HI 1 "register_operand")) + (use (match_operand:V8HI 2 "register_operand")) + (use (match_operand:V4SI 3 "register_operand"))] + "TARGET_P9_VECTOR" +{ + rtx absd = gen_reg_rtx (V8HImode); + rtx zero = gen_reg_rtx (V4SImode); + rtx psum = gen_reg_rtx (V4SImode); + + emit_insn (gen_p9_vaduv8hi3 (absd, operands[1], operands[2])); + emit_insn (gen_altivec_vspltisw (zero, const0_rtx)); + emit_insn (gen_altivec_vsum4shs (psum, absd, zero)); + emit_insn (gen_addv4si3 (operands[0], psum, operands[3])); + DONE; +}) + ;; Decimal Integer operations (define_int_iterator UNSPEC_BCD_ADD_SUB [UNSPEC_BCDADD UNSPEC_BCDSUB]) @@ -4228,8 +4064,7 @@ (clobber (reg:CCFP CR6_REGNO))] "TARGET_P8_VECTOR" "bcd<bcd_add_sub>. %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) ;; Use a floating point type (V2DFmode) for the compare to set CR6 so that we ;; can use the unordered test for BCD nans and add/subtracts that overflow. An @@ -4247,8 +4082,7 @@ (clobber (match_scratch:V1TI 0 "=v"))] "TARGET_P8_VECTOR" "bcd<bcd_add_sub>. %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "*bcd<bcd_add_sub>_test2" [(set (match_operand:V1TI 0 "register_operand" "=v") @@ -4265,8 +4099,7 @@ (match_operand:V2DF 4 "zero_constant" "j")))] "TARGET_P8_VECTOR" "bcd<bcd_add_sub>. %0,%1,%2,%3" - [(set_attr "length" "4") - (set_attr "type" "vecsimple")]) + [(set_attr "type" "vecsimple")]) (define_insn "darn_32" [(set (match_operand:SI 0 "register_operand" "=r") @@ -4488,13 +4321,13 @@ (define_expand "bcd<bcd_add_sub>_<code>" [(parallel [(set (reg:CCFP CR6_REGNO) (compare:CCFP - (unspec:V2DF [(match_operand:V1TI 1 "register_operand" "") - (match_operand:V1TI 2 "register_operand" "") - (match_operand:QI 3 "const_0_to_1_operand" "")] + (unspec:V2DF [(match_operand:V1TI 1 "register_operand") + (match_operand:V1TI 2 "register_operand") + (match_operand:QI 3 "const_0_to_1_operand")] UNSPEC_BCD_ADD_SUB) (match_dup 4))) - (clobber (match_scratch:V1TI 5 ""))]) - (set (match_operand:SI 0 "register_operand" "") + (clobber (match_scratch:V1TI 5))]) + (set (match_operand:SI 0 "register_operand") (BCD_TEST:SI (reg:CCFP CR6_REGNO) (const_int 0)))] "TARGET_P8_VECTOR" @@ -4509,10 +4342,10 @@ ;; a register class for CR6. (define_peephole2 - [(parallel [(set (match_operand:V1TI 0 "register_operand" "") - (unspec:V1TI [(match_operand:V1TI 1 "register_operand" "") - (match_operand:V1TI 2 "register_operand" "") - (match_operand:QI 3 "const_0_to_1_operand" "")] + [(parallel [(set (match_operand:V1TI 0 "register_operand") + (unspec:V1TI [(match_operand:V1TI 1 "register_operand") + (match_operand:V1TI 2 "register_operand") + (match_operand:QI 3 "const_0_to_1_operand")] UNSPEC_BCD_ADD_SUB)) (clobber (reg:CCFP CR6_REGNO))]) (parallel [(set (reg:CCFP CR6_REGNO) @@ -4521,8 +4354,8 @@ (match_dup 2) (match_dup 3)] UNSPEC_BCD_ADD_SUB) - (match_operand:V2DF 4 "zero_constant" ""))) - (clobber (match_operand:V1TI 5 "register_operand" ""))])] + (match_operand:V2DF 4 "zero_constant"))) + (clobber (match_operand:V1TI 5 "register_operand"))])] "TARGET_P8_VECTOR" [(parallel [(set (match_dup 0) (unspec:V1TI [(match_dup 1)