diff gcc/config/rs6000/mmintrin.h @ 131:84e7813d76e9

gcc-8.2
author mir3636
date Thu, 25 Oct 2018 07:37:49 +0900
parents 04ced10e8804
children 1830386684a0
line wrap: on
line diff
--- a/gcc/config/rs6000/mmintrin.h	Fri Oct 27 22:46:09 2017 +0900
+++ b/gcc/config/rs6000/mmintrin.h	Thu Oct 25 07:37:49 2018 +0900
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
 
    This file is part of GCC.
 
@@ -49,7 +49,7 @@
    C language 64-bit scalar operation or optimized to use the newer
    128-bit SSE/Altivec operations.  We recomend this for new
    applications.  */
-#warning "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this warning."
+#error "Please read comment above.  Use -DNO_WARN_X86_INTRINSICS to disable this error."
 #endif
 
 #ifndef _MMINTRIN_H_INCLUDED
@@ -236,7 +236,7 @@
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_mergel (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -317,7 +317,7 @@
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_mergel (a, b);
-  return (__builtin_unpack_vector_int128 ((vector __int128_t)c, 1));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 1));
 #else
   __m64_union m1, m2, res;
 
@@ -398,7 +398,7 @@
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_add (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -434,7 +434,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_add (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -463,10 +463,10 @@
 #if _ARCH_PWR9
   __vector signed int a, b, c;
 
-  a = (__vector signed int)vec_splats (__m1, __m1);
-  b = (__vector signed int)vec_splats (__m2, __m2);
+  a = (__vector signed int)vec_splats (__m1);
+  b = (__vector signed int)vec_splats (__m2);
   c = vec_add (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -496,7 +496,7 @@
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_sub (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -532,7 +532,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_sub (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -564,7 +564,7 @@
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
   c = vec_sub (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -754,7 +754,7 @@
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = (__vector signed char)vec_cmpgt (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -791,7 +791,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = (__vector signed short)vec_cmpeq (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -822,7 +822,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = (__vector signed short)vec_cmpgt (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -854,8 +854,8 @@
 
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
-  c = (__vector signed short)vec_cmpeq (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  c = (__vector signed int)vec_cmpeq (a, b);
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -883,8 +883,8 @@
 
   a = (__vector signed int)vec_splats (__m1);
   b = (__vector signed int)vec_splats (__m2);
-  c = (__vector signed short)vec_cmpgt (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  c = (__vector signed int)vec_cmpgt (a, b);
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 #else
   __m64_union m1, m2, res;
 
@@ -915,7 +915,7 @@
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -933,7 +933,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -951,7 +951,7 @@
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -970,7 +970,7 @@
   a = (__vector unsigned short)vec_splats (__m1);
   b = (__vector unsigned short)vec_splats (__m2);
   c = vec_adds (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -989,7 +989,7 @@
   a = (__vector signed char)vec_splats (__m1);
   b = (__vector signed char)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1008,7 +1008,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1027,7 +1027,7 @@
   a = (__vector unsigned char)vec_splats (__m1);
   b = (__vector unsigned char)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1046,7 +1046,7 @@
   a = (__vector unsigned short)vec_splats (__m1);
   b = (__vector unsigned short)vec_splats (__m2);
   c = vec_subs (a, b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1068,7 +1068,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = vec_vmsumshm (a, b, zero);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1096,7 +1096,7 @@
   w1 = vec_vmulosh (a, b);
   c = (__vector signed short)vec_perm (w0, w1, xform1);
 
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1115,7 +1115,7 @@
   a = (__vector signed short)vec_splats (__m1);
   b = (__vector signed short)vec_splats (__m2);
   c = a * b;
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)c, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)c, 0));
 }
 
 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -1136,7 +1136,7 @@
       m = (__vector signed short)vec_splats (__m);
       c = (__vector unsigned short)vec_splats ((unsigned short)__count);
       r = vec_sl (m, (__vector unsigned short)c);
-      return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0));
+      return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0));
     }
   else
   return (0);
@@ -1205,7 +1205,7 @@
 	m = (__vector signed short)vec_splats (__m);
 	c = (__vector unsigned short)vec_splats ((unsigned short)__count);
 	r = vec_sra (m, (__vector unsigned short)c);
-	return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0));
+	return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0));
     }
   else
   return (0);
@@ -1274,7 +1274,7 @@
 	m = (__vector unsigned short)vec_splats (__m);
 	c = (__vector unsigned short)vec_splats ((unsigned short)__count);
 	r = vec_sr (m, (__vector unsigned short)c);
-	return (__builtin_unpack_vector_int128 ((__vector __int128_t)r, 0));
+	return (__builtin_unpack_vector_int128 ((__vector __int128)r, 0));
     }
   else
     return (0);
@@ -1417,7 +1417,7 @@
   __vector signed short w;
 
   w = (__vector signed short)vec_splats (__w);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)w, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)w, 0));
 #else
   __m64_union res;
 
@@ -1437,7 +1437,7 @@
   __vector signed char b;
 
   b = (__vector signed char)vec_splats (__b);
-  return (__builtin_unpack_vector_int128 ((__vector __int128_t)b, 0));
+  return (__builtin_unpack_vector_int128 ((__vector __int128)b, 0));
 #else
   __m64_union res;