comparison gcc/config/rs6000/smmintrin.h @ 145:1830386684a0

gcc-9.2.0
author anatofuz
date Thu, 13 Feb 2020 11:34:05 +0900
parents
children
comparison
equal deleted inserted replaced
131:84e7813d76e9 145:1830386684a0
1 /* Copyright (C) 2018-2020 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
18
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
23
24 /* Implemented from the specification included in the Intel C++ Compiler
25 User Guide and Reference, version 9.0.
26
27 NOTE: This is NOT a complete implementation of the SSE4 intrinsics! */
28
29 #ifndef NO_WARN_X86_INTRINSICS
30 /* This header is distributed to simplify porting x86_64 code that
31 makes explicit use of Intel intrinsics to powerpc64le.
32 It is the user's responsibility to determine if the results are
33 acceptable and make additional changes as necessary.
34 Note that much code that uses Intel intrinsics can be rewritten in
35 standard C or GNU C extensions, which are more portable and better
36 optimized across multiple targets. */
37 #endif
38
39 #ifndef SMMINTRIN_H_
40 #define SMMINTRIN_H_
41
42 #include <altivec.h>
43 #include <tmmintrin.h>
44
45 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
46 _mm_extract_epi8 (__m128i __X, const int __N)
47 {
48 return (unsigned char) ((__v16qi)__X)[__N & 15];
49 }
50
51 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
52 _mm_extract_epi32 (__m128i __X, const int __N)
53 {
54 return ((__v4si)__X)[__N & 3];
55 }
56
57 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
58 _mm_extract_epi64 (__m128i __X, const int __N)
59 {
60 return ((__v2di)__X)[__N & 1];
61 }
62
63 extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
64 _mm_extract_ps (__m128 __X, const int __N)
65 {
66 return ((__v4si)__X)[__N & 3];
67 }
68
69 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
70 _mm_blend_epi16 (__m128i __A, __m128i __B, const int __imm8)
71 {
72 __v16qi __charmask = vec_splats ((signed char) __imm8);
73 __charmask = vec_gb (__charmask);
74 __v8hu __shortmask = (__v8hu) vec_unpackh (__charmask);
75 #ifdef __BIG_ENDIAN__
76 __shortmask = vec_reve (__shortmask);
77 #endif
78 return (__m128i) vec_sel ((__v8hu) __A, (__v8hu) __B, __shortmask);
79 }
80
81 extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
82 _mm_blendv_epi8 (__m128i __A, __m128i __B, __m128i __mask)
83 {
84 const __v16qu __seven = vec_splats ((unsigned char) 0x07);
85 __v16qu __lmask = vec_sra ((__v16qu) __mask, __seven);
86 return (__m128i) vec_sel ((__v16qu) __A, (__v16qu) __B, __lmask);
87 }
88
89 #endif