111
|
1 /* { dg-do compile } */
|
|
2 /* { dg-require-effective-target double64 } */
|
|
3 /* { dg-options "-O -fdump-tree-forwprop1" } */
|
|
4
|
|
5 #include <stdint.h>
|
|
6
|
|
7 /* All of these optimizations happen for unsupported vector modes as a
|
|
8 consequence of the lowering pass. We need to test with a vector mode
|
|
9 that is supported by default on at least some architectures, or make
|
|
10 the test target specific so we can pass a flag like -mavx. */
|
|
11
|
|
12 typedef double vecf __attribute__ ((vector_size (2 * sizeof (double))));
|
|
13 typedef int64_t veci __attribute__ ((vector_size (2 * sizeof (int64_t))));
|
|
14
|
|
15 void f (double d, vecf* r)
|
|
16 {
|
|
17 vecf x = { -d, 5 };
|
|
18 vecf y = { 1, 4 };
|
|
19 veci m = { 2, 0 };
|
|
20 *r = __builtin_shuffle (x, y, m); // { 1, -d }
|
|
21 }
|
|
22
|
|
23 void g (float d, vecf* r)
|
|
24 {
|
|
25 vecf x = { d, 5 };
|
|
26 vecf y = { 1, 4 };
|
|
27 veci m = { 2, 1 };
|
|
28 *r = __builtin_shuffle (x, y, m); // { 1, 5 }
|
|
29 }
|
|
30
|
|
31 void h (double d, vecf* r)
|
|
32 {
|
|
33 vecf x = { d + 1, 5 };
|
|
34 vecf y = { 1 , 4 };
|
|
35 veci m = { 2 , 0 };
|
|
36 *r = __builtin_shuffle (y, x, m); // { d + 1, 1 }
|
|
37 }
|
|
38
|
|
39 void i (float d, vecf* r)
|
|
40 {
|
|
41 vecf x = { d, 5 };
|
|
42 veci m = { 1, 0 };
|
|
43 *r = __builtin_shuffle (x, m); // { 5, d }
|
|
44 }
|
|
45
|
|
46 void j (vecf* r)
|
|
47 {
|
|
48 vecf y = { 1, 2 };
|
|
49 veci m = { 0, 0 };
|
|
50 *r = __builtin_shuffle (y, m); // { 1, 1 }
|
|
51 }
|
|
52
|
|
53 void k (vecf* r)
|
|
54 {
|
|
55 vecf x = { 3, 4 };
|
|
56 vecf y = { 1, 2 };
|
|
57 veci m = { 3, 0 };
|
|
58 *r = __builtin_shuffle (x, y, m); // { 2, 3 }
|
|
59 }
|
|
60
|
|
61 void l (double d, vecf* r)
|
|
62 {
|
|
63 vecf x = { -d, 5 };
|
|
64 vecf y = { d, 4 };
|
|
65 veci m = { 2, 0 };
|
|
66 *r = __builtin_shuffle (x, y, m); // { d, -d }
|
|
67 }
|
|
68
|
|
69 /* { dg-final { scan-tree-dump-not "VEC_PERM_EXPR" "forwprop1" } } */
|