Mercurial > hg > CbC > CbC_gcc
comparison gcc/config/rs6000/vector.md @ 55:77e2b8dfacca gcc-4.4.5
update it from 4.4.3 to 4.5.0
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 12 Feb 2010 23:39:51 +0900 |
parents | |
children | b7f97abdc517 |
comparison
equal
deleted
inserted
replaced
52:c156f1bd5cd9 | 55:77e2b8dfacca |
---|---|
1 ;; Expander definitions for vector support between altivec & vsx. No | |
2 ;; instructions are in this file, this file provides the generic vector | |
3 ;; expander, and the actual vector instructions will be in altivec.md and | |
4 ;; vsx.md | |
5 | |
6 ;; Copyright (C) 2009 | |
7 ;; Free Software Foundation, Inc. | |
8 ;; Contributed by Michael Meissner <meissner@linux.vnet.ibm.com> | |
9 | |
10 ;; This file is part of GCC. | |
11 | |
12 ;; GCC is free software; you can redistribute it and/or modify it | |
13 ;; under the terms of the GNU General Public License as published | |
14 ;; by the Free Software Foundation; either version 3, or (at your | |
15 ;; option) any later version. | |
16 | |
17 ;; GCC is distributed in the hope that it will be useful, but WITHOUT | |
18 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
19 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public | |
20 ;; License for more details. | |
21 | |
22 ;; You should have received a copy of the GNU General Public License | |
23 ;; along with GCC; see the file COPYING3. If not see | |
24 ;; <http://www.gnu.org/licenses/>. | |
25 | |
26 | |
27 ;; Vector int modes | |
28 (define_mode_iterator VEC_I [V16QI V8HI V4SI]) | |
29 | |
30 ;; Vector float modes | |
31 (define_mode_iterator VEC_F [V4SF V2DF]) | |
32 | |
33 ;; Vector arithmetic modes | |
34 (define_mode_iterator VEC_A [V16QI V8HI V4SI V4SF V2DF]) | |
35 | |
36 ;; Vector modes that need alginment via permutes | |
37 (define_mode_iterator VEC_K [V16QI V8HI V4SI V4SF]) | |
38 | |
39 ;; Vector logical modes | |
40 (define_mode_iterator VEC_L [V16QI V8HI V4SI V2DI V4SF V2DF TI]) | |
41 | |
42 ;; Vector modes for moves. Don't do TImode here. | |
43 (define_mode_iterator VEC_M [V16QI V8HI V4SI V2DI V4SF V2DF]) | |
44 | |
45 ;; Vector modes for types that don't need a realignment under VSX | |
46 (define_mode_iterator VEC_N [V4SI V4SF V2DI V2DF]) | |
47 | |
48 ;; Vector comparison modes | |
49 (define_mode_iterator VEC_C [V16QI V8HI V4SI V4SF V2DF]) | |
50 | |
51 ;; Vector init/extract modes | |
52 (define_mode_iterator VEC_E [V16QI V8HI V4SI V2DI V4SF V2DF]) | |
53 | |
54 ;; Vector reload iterator | |
55 (define_mode_iterator VEC_R [V16QI V8HI V4SI V2DI V4SF V2DF DF TI]) | |
56 | |
57 ;; Base type from vector mode | |
58 (define_mode_attr VEC_base [(V16QI "QI") | |
59 (V8HI "HI") | |
60 (V4SI "SI") | |
61 (V2DI "DI") | |
62 (V4SF "SF") | |
63 (V2DF "DF") | |
64 (TI "TI")]) | |
65 | |
66 ;; Same size integer type for floating point data | |
67 (define_mode_attr VEC_int [(V4SF "v4si") | |
68 (V2DF "v2di")]) | |
69 | |
70 (define_mode_attr VEC_INT [(V4SF "V4SI") | |
71 (V2DF "V2DI")]) | |
72 | |
73 ;; constants for unspec | |
74 (define_constants | |
75 [(UNSPEC_PREDICATE 400)]) | |
76 | |
77 | |
78 ;; Vector move instructions. | |
79 (define_expand "mov<mode>" | |
80 [(set (match_operand:VEC_M 0 "nonimmediate_operand" "") | |
81 (match_operand:VEC_M 1 "any_operand" ""))] | |
82 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
83 { | |
84 if (can_create_pseudo_p ()) | |
85 { | |
86 if (CONSTANT_P (operands[1]) | |
87 && !easy_vector_constant (operands[1], <MODE>mode)) | |
88 operands[1] = force_const_mem (<MODE>mode, operands[1]); | |
89 | |
90 else if (!vlogical_operand (operands[0], <MODE>mode) | |
91 && !vlogical_operand (operands[1], <MODE>mode)) | |
92 operands[1] = force_reg (<MODE>mode, operands[1]); | |
93 } | |
94 }) | |
95 | |
96 ;; Generic vector floating point load/store instructions. These will match | |
97 ;; insns defined in vsx.md or altivec.md depending on the switches. | |
98 (define_expand "vector_load_<mode>" | |
99 [(set (match_operand:VEC_M 0 "vfloat_operand" "") | |
100 (match_operand:VEC_M 1 "memory_operand" ""))] | |
101 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
102 "") | |
103 | |
104 (define_expand "vector_store_<mode>" | |
105 [(set (match_operand:VEC_M 0 "memory_operand" "") | |
106 (match_operand:VEC_M 1 "vfloat_operand" ""))] | |
107 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
108 "") | |
109 | |
110 ;; Splits if a GPR register was chosen for the move | |
111 (define_split | |
112 [(set (match_operand:VEC_L 0 "nonimmediate_operand" "") | |
113 (match_operand:VEC_L 1 "input_operand" ""))] | |
114 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode) | |
115 && reload_completed | |
116 && gpr_or_gpr_p (operands[0], operands[1])" | |
117 [(pc)] | |
118 { | |
119 rs6000_split_multireg_move (operands[0], operands[1]); | |
120 DONE; | |
121 }) | |
122 | |
123 | |
124 ;; Reload patterns for vector operations. We may need an addtional base | |
125 ;; register to convert the reg+offset addressing to reg+reg for vector | |
126 ;; registers and reg+reg or (reg+reg)&(-16) addressing to just an index | |
127 ;; register for gpr registers. | |
128 (define_expand "reload_<VEC_R:mode>_<P:mptrsize>_store" | |
129 [(parallel [(match_operand:VEC_R 0 "memory_operand" "m") | |
130 (match_operand:VEC_R 1 "gpc_reg_operand" "r") | |
131 (match_operand:P 2 "register_operand" "=&b")])] | |
132 "<P:tptrsize>" | |
133 { | |
134 rs6000_secondary_reload_inner (operands[1], operands[0], operands[2], true); | |
135 DONE; | |
136 }) | |
137 | |
138 (define_expand "reload_<VEC_R:mode>_<P:mptrsize>_load" | |
139 [(parallel [(match_operand:VEC_R 0 "gpc_reg_operand" "=&r") | |
140 (match_operand:VEC_R 1 "memory_operand" "m") | |
141 (match_operand:P 2 "register_operand" "=&b")])] | |
142 "<P:tptrsize>" | |
143 { | |
144 rs6000_secondary_reload_inner (operands[0], operands[1], operands[2], false); | |
145 DONE; | |
146 }) | |
147 | |
148 ;; Reload sometimes tries to move the address to a GPR, and can generate | |
149 ;; invalid RTL for addresses involving AND -16. Allow addresses involving | |
150 ;; reg+reg, reg+small constant, or just reg, all wrapped in an AND -16. | |
151 | |
152 (define_insn_and_split "*vec_reload_and_plus_<mptrsize>" | |
153 [(set (match_operand:P 0 "gpc_reg_operand" "=b") | |
154 (and:P (plus:P (match_operand:P 1 "gpc_reg_operand" "r") | |
155 (match_operand:P 2 "reg_or_cint_operand" "rI")) | |
156 (const_int -16)))] | |
157 "(TARGET_ALTIVEC || TARGET_VSX) && (reload_in_progress || reload_completed)" | |
158 "#" | |
159 "&& reload_completed" | |
160 [(set (match_dup 0) | |
161 (plus:P (match_dup 1) | |
162 (match_dup 2))) | |
163 (parallel [(set (match_dup 0) | |
164 (and:P (match_dup 0) | |
165 (const_int -16))) | |
166 (clobber:CC (scratch:CC))])]) | |
167 | |
168 ;; The normal ANDSI3/ANDDI3 won't match if reload decides to move an AND -16 | |
169 ;; address to a register because there is no clobber of a (scratch), so we add | |
170 ;; it here. | |
171 (define_insn_and_split "*vec_reload_and_reg_<mptrsize>" | |
172 [(set (match_operand:P 0 "gpc_reg_operand" "=b") | |
173 (and:P (match_operand:P 1 "gpc_reg_operand" "r") | |
174 (const_int -16)))] | |
175 "(TARGET_ALTIVEC || TARGET_VSX) && (reload_in_progress || reload_completed)" | |
176 "#" | |
177 "&& reload_completed" | |
178 [(parallel [(set (match_dup 0) | |
179 (and:P (match_dup 1) | |
180 (const_int -16))) | |
181 (clobber:CC (scratch:CC))])]) | |
182 | |
183 ;; Generic floating point vector arithmetic support | |
184 (define_expand "add<mode>3" | |
185 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
186 (plus:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") | |
187 (match_operand:VEC_F 2 "vfloat_operand" "")))] | |
188 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
189 "") | |
190 | |
191 (define_expand "sub<mode>3" | |
192 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
193 (minus:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") | |
194 (match_operand:VEC_F 2 "vfloat_operand" "")))] | |
195 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
196 "") | |
197 | |
198 (define_expand "mul<mode>3" | |
199 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
200 (mult:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") | |
201 (match_operand:VEC_F 2 "vfloat_operand" "")))] | |
202 "(VECTOR_UNIT_VSX_P (<MODE>mode) | |
203 || (VECTOR_UNIT_ALTIVEC_P (<MODE>mode) && TARGET_FUSED_MADD))" | |
204 " | |
205 { | |
206 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
207 { | |
208 emit_insn (gen_altivec_mulv4sf3 (operands[0], operands[1], operands[2])); | |
209 DONE; | |
210 } | |
211 }") | |
212 | |
213 (define_expand "div<mode>3" | |
214 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
215 (div:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "") | |
216 (match_operand:VEC_F 2 "vfloat_operand" "")))] | |
217 "VECTOR_UNIT_VSX_P (<MODE>mode)" | |
218 "") | |
219 | |
220 (define_expand "neg<mode>2" | |
221 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
222 (neg:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
223 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
224 " | |
225 { | |
226 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
227 { | |
228 emit_insn (gen_altivec_negv4sf2 (operands[0], operands[1])); | |
229 DONE; | |
230 } | |
231 }") | |
232 | |
233 (define_expand "abs<mode>2" | |
234 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
235 (abs:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
236 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
237 " | |
238 { | |
239 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
240 { | |
241 emit_insn (gen_altivec_absv4sf2 (operands[0], operands[1])); | |
242 DONE; | |
243 } | |
244 }") | |
245 | |
246 (define_expand "smin<mode>3" | |
247 [(set (match_operand:VEC_F 0 "register_operand" "") | |
248 (smin:VEC_F (match_operand:VEC_F 1 "register_operand" "") | |
249 (match_operand:VEC_F 2 "register_operand" "")))] | |
250 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
251 "") | |
252 | |
253 (define_expand "smax<mode>3" | |
254 [(set (match_operand:VEC_F 0 "register_operand" "") | |
255 (smax:VEC_F (match_operand:VEC_F 1 "register_operand" "") | |
256 (match_operand:VEC_F 2 "register_operand" "")))] | |
257 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
258 "") | |
259 | |
260 | |
261 (define_expand "sqrt<mode>2" | |
262 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
263 (sqrt:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
264 "VECTOR_UNIT_VSX_P (<MODE>mode)" | |
265 "") | |
266 | |
267 (define_expand "ftrunc<mode>2" | |
268 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
269 (fix:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
270 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
271 "") | |
272 | |
273 (define_expand "vector_ceil<mode>2" | |
274 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
275 (unspec:VEC_F [(match_operand:VEC_F 1 "vfloat_operand" "")] | |
276 UNSPEC_FRIP))] | |
277 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
278 "") | |
279 | |
280 (define_expand "vector_floor<mode>2" | |
281 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
282 (unspec:VEC_F [(match_operand:VEC_F 1 "vfloat_operand" "")] | |
283 UNSPEC_FRIM))] | |
284 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
285 "") | |
286 | |
287 (define_expand "vector_btrunc<mode>2" | |
288 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
289 (fix:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
290 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
291 "") | |
292 | |
293 (define_expand "vector_copysign<mode>3" | |
294 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
295 (if_then_else:VEC_F | |
296 (ge:VEC_F (match_operand:VEC_F 2 "vfloat_operand" "") | |
297 (match_dup 3)) | |
298 (abs:VEC_F (match_operand:VEC_F 1 "vfloat_operand" "")) | |
299 (neg:VEC_F (abs:VEC_F (match_dup 1)))))] | |
300 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
301 " | |
302 { | |
303 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
304 { | |
305 emit_insn (gen_altivec_copysign_v4sf3 (operands[0], operands[1], | |
306 operands[2])); | |
307 DONE; | |
308 } | |
309 | |
310 operands[3] = CONST0_RTX (<MODE>mode); | |
311 }") | |
312 | |
313 | |
314 ;; Vector comparisons | |
315 (define_expand "vcond<mode>" | |
316 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
317 (if_then_else:VEC_F | |
318 (match_operator 3 "comparison_operator" | |
319 [(match_operand:VEC_F 4 "vfloat_operand" "") | |
320 (match_operand:VEC_F 5 "vfloat_operand" "")]) | |
321 (match_operand:VEC_F 1 "vfloat_operand" "") | |
322 (match_operand:VEC_F 2 "vfloat_operand" "")))] | |
323 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
324 " | |
325 { | |
326 if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2], | |
327 operands[3], operands[4], operands[5])) | |
328 DONE; | |
329 else | |
330 FAIL; | |
331 }") | |
332 | |
333 (define_expand "vcond<mode>" | |
334 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
335 (if_then_else:VEC_I | |
336 (match_operator 3 "comparison_operator" | |
337 [(match_operand:VEC_I 4 "vint_operand" "") | |
338 (match_operand:VEC_I 5 "vint_operand" "")]) | |
339 (match_operand:VEC_I 1 "vint_operand" "") | |
340 (match_operand:VEC_I 2 "vint_operand" "")))] | |
341 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" | |
342 " | |
343 { | |
344 if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2], | |
345 operands[3], operands[4], operands[5])) | |
346 DONE; | |
347 else | |
348 FAIL; | |
349 }") | |
350 | |
351 (define_expand "vcondu<mode>" | |
352 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
353 (if_then_else:VEC_I | |
354 (match_operator 3 "comparison_operator" | |
355 [(match_operand:VEC_I 4 "vint_operand" "") | |
356 (match_operand:VEC_I 5 "vint_operand" "")]) | |
357 (match_operand:VEC_I 1 "vint_operand" "") | |
358 (match_operand:VEC_I 2 "vint_operand" "")))] | |
359 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" | |
360 " | |
361 { | |
362 if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2], | |
363 operands[3], operands[4], operands[5])) | |
364 DONE; | |
365 else | |
366 FAIL; | |
367 }") | |
368 | |
369 (define_expand "vector_eq<mode>" | |
370 [(set (match_operand:VEC_C 0 "vlogical_operand" "") | |
371 (eq:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "") | |
372 (match_operand:VEC_C 2 "vlogical_operand" "")))] | |
373 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
374 "") | |
375 | |
376 (define_expand "vector_gt<mode>" | |
377 [(set (match_operand:VEC_C 0 "vlogical_operand" "") | |
378 (gt:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "") | |
379 (match_operand:VEC_C 2 "vlogical_operand" "")))] | |
380 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
381 "") | |
382 | |
383 (define_expand "vector_ge<mode>" | |
384 [(set (match_operand:VEC_C 0 "vlogical_operand" "") | |
385 (ge:VEC_C (match_operand:VEC_C 1 "vlogical_operand" "") | |
386 (match_operand:VEC_C 2 "vlogical_operand" "")))] | |
387 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
388 "") | |
389 | |
390 (define_expand "vector_gtu<mode>" | |
391 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
392 (gtu:VEC_I (match_operand:VEC_I 1 "vint_operand" "") | |
393 (match_operand:VEC_I 2 "vint_operand" "")))] | |
394 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" | |
395 "") | |
396 | |
397 (define_expand "vector_geu<mode>" | |
398 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
399 (geu:VEC_I (match_operand:VEC_I 1 "vint_operand" "") | |
400 (match_operand:VEC_I 2 "vint_operand" "")))] | |
401 "VECTOR_UNIT_ALTIVEC_P (<MODE>mode)" | |
402 "") | |
403 | |
404 ;; Note the arguments for __builtin_altivec_vsel are op2, op1, mask | |
405 ;; which is in the reverse order that we want | |
406 (define_expand "vector_select_<mode>" | |
407 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
408 (if_then_else:VEC_L | |
409 (ne:CC (match_operand:VEC_L 3 "vlogical_operand" "") | |
410 (const_int 0)) | |
411 (match_operand:VEC_L 2 "vlogical_operand" "") | |
412 (match_operand:VEC_L 1 "vlogical_operand" "")))] | |
413 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
414 "") | |
415 | |
416 (define_expand "vector_select_<mode>_uns" | |
417 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
418 (if_then_else:VEC_L | |
419 (ne:CCUNS (match_operand:VEC_L 3 "vlogical_operand" "") | |
420 (const_int 0)) | |
421 (match_operand:VEC_L 2 "vlogical_operand" "") | |
422 (match_operand:VEC_L 1 "vlogical_operand" "")))] | |
423 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
424 "") | |
425 | |
426 ;; Expansions that compare vectors producing a vector result and a predicate, | |
427 ;; setting CR6 to indicate a combined status | |
428 (define_expand "vector_eq_<mode>_p" | |
429 [(parallel | |
430 [(set (reg:CC 74) | |
431 (unspec:CC [(eq:CC (match_operand:VEC_A 1 "vlogical_operand" "") | |
432 (match_operand:VEC_A 2 "vlogical_operand" ""))] | |
433 UNSPEC_PREDICATE)) | |
434 (set (match_operand:VEC_A 0 "vlogical_operand" "") | |
435 (eq:VEC_A (match_dup 1) | |
436 (match_dup 2)))])] | |
437 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
438 "") | |
439 | |
440 (define_expand "vector_gt_<mode>_p" | |
441 [(parallel | |
442 [(set (reg:CC 74) | |
443 (unspec:CC [(gt:CC (match_operand:VEC_A 1 "vlogical_operand" "") | |
444 (match_operand:VEC_A 2 "vlogical_operand" ""))] | |
445 UNSPEC_PREDICATE)) | |
446 (set (match_operand:VEC_A 0 "vlogical_operand" "") | |
447 (gt:VEC_A (match_dup 1) | |
448 (match_dup 2)))])] | |
449 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
450 "") | |
451 | |
452 (define_expand "vector_ge_<mode>_p" | |
453 [(parallel | |
454 [(set (reg:CC 74) | |
455 (unspec:CC [(ge:CC (match_operand:VEC_F 1 "vfloat_operand" "") | |
456 (match_operand:VEC_F 2 "vfloat_operand" ""))] | |
457 UNSPEC_PREDICATE)) | |
458 (set (match_operand:VEC_F 0 "vfloat_operand" "") | |
459 (ge:VEC_F (match_dup 1) | |
460 (match_dup 2)))])] | |
461 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
462 "") | |
463 | |
464 (define_expand "vector_gtu_<mode>_p" | |
465 [(parallel | |
466 [(set (reg:CC 74) | |
467 (unspec:CC [(gtu:CC (match_operand:VEC_I 1 "vint_operand" "") | |
468 (match_operand:VEC_I 2 "vint_operand" ""))] | |
469 UNSPEC_PREDICATE)) | |
470 (set (match_operand:VEC_I 0 "vlogical_operand" "") | |
471 (gtu:VEC_I (match_dup 1) | |
472 (match_dup 2)))])] | |
473 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
474 "") | |
475 | |
476 ;; AltiVec/VSX predicates. | |
477 | |
478 (define_expand "cr6_test_for_zero" | |
479 [(set (match_operand:SI 0 "register_operand" "=r") | |
480 (eq:SI (reg:CC 74) | |
481 (const_int 0)))] | |
482 "TARGET_ALTIVEC || TARGET_VSX" | |
483 "") | |
484 | |
485 (define_expand "cr6_test_for_zero_reverse" | |
486 [(set (match_operand:SI 0 "register_operand" "=r") | |
487 (eq:SI (reg:CC 74) | |
488 (const_int 0))) | |
489 (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))] | |
490 "TARGET_ALTIVEC || TARGET_VSX" | |
491 "") | |
492 | |
493 (define_expand "cr6_test_for_lt" | |
494 [(set (match_operand:SI 0 "register_operand" "=r") | |
495 (lt:SI (reg:CC 74) | |
496 (const_int 0)))] | |
497 "TARGET_ALTIVEC || TARGET_VSX" | |
498 "") | |
499 | |
500 (define_expand "cr6_test_for_lt_reverse" | |
501 [(set (match_operand:SI 0 "register_operand" "=r") | |
502 (lt:SI (reg:CC 74) | |
503 (const_int 0))) | |
504 (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))] | |
505 "TARGET_ALTIVEC || TARGET_VSX" | |
506 "") | |
507 | |
508 | |
509 ;; Vector logical instructions | |
510 (define_expand "xor<mode>3" | |
511 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
512 (xor:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") | |
513 (match_operand:VEC_L 2 "vlogical_operand" "")))] | |
514 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
515 "") | |
516 | |
517 (define_expand "ior<mode>3" | |
518 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
519 (ior:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") | |
520 (match_operand:VEC_L 2 "vlogical_operand" "")))] | |
521 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
522 "") | |
523 | |
524 (define_expand "and<mode>3" | |
525 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
526 (and:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") | |
527 (match_operand:VEC_L 2 "vlogical_operand" "")))] | |
528 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
529 "") | |
530 | |
531 (define_expand "one_cmpl<mode>2" | |
532 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
533 (not:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "")))] | |
534 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
535 "") | |
536 | |
537 (define_expand "nor<mode>3" | |
538 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
539 (not:VEC_L (ior:VEC_L (match_operand:VEC_L 1 "vlogical_operand" "") | |
540 (match_operand:VEC_L 2 "vlogical_operand" ""))))] | |
541 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
542 "") | |
543 | |
544 (define_expand "andc<mode>3" | |
545 [(set (match_operand:VEC_L 0 "vlogical_operand" "") | |
546 (and:VEC_L (not:VEC_L (match_operand:VEC_L 2 "vlogical_operand" "")) | |
547 (match_operand:VEC_L 1 "vlogical_operand" "")))] | |
548 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
549 "") | |
550 | |
551 ;; Same size conversions | |
552 (define_expand "float<VEC_int><mode>2" | |
553 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
554 (float:VEC_F (match_operand:<VEC_INT> 1 "vint_operand" "")))] | |
555 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
556 " | |
557 { | |
558 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
559 { | |
560 emit_insn (gen_altivec_vcfsx (operands[0], operands[1], const0_rtx)); | |
561 DONE; | |
562 } | |
563 }") | |
564 | |
565 (define_expand "unsigned_float<VEC_int><mode>2" | |
566 [(set (match_operand:VEC_F 0 "vfloat_operand" "") | |
567 (unsigned_float:VEC_F (match_operand:<VEC_INT> 1 "vint_operand" "")))] | |
568 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
569 " | |
570 { | |
571 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
572 { | |
573 emit_insn (gen_altivec_vcfux (operands[0], operands[1], const0_rtx)); | |
574 DONE; | |
575 } | |
576 }") | |
577 | |
578 (define_expand "fix_trunc<mode><VEC_int>2" | |
579 [(set (match_operand:<VEC_INT> 0 "vint_operand" "") | |
580 (fix:<VEC_INT> (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
581 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
582 " | |
583 { | |
584 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
585 { | |
586 emit_insn (gen_altivec_vctsxs (operands[0], operands[1], const0_rtx)); | |
587 DONE; | |
588 } | |
589 }") | |
590 | |
591 (define_expand "fixuns_trunc<mode><VEC_int>2" | |
592 [(set (match_operand:<VEC_INT> 0 "vint_operand" "") | |
593 (unsigned_fix:<VEC_INT> (match_operand:VEC_F 1 "vfloat_operand" "")))] | |
594 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
595 " | |
596 { | |
597 if (<MODE>mode == V4SFmode && VECTOR_UNIT_ALTIVEC_P (<MODE>mode)) | |
598 { | |
599 emit_insn (gen_altivec_vctuxs (operands[0], operands[1], const0_rtx)); | |
600 DONE; | |
601 } | |
602 }") | |
603 | |
604 | |
605 ;; Vector initialization, set, extract | |
606 (define_expand "vec_init<mode>" | |
607 [(match_operand:VEC_E 0 "vlogical_operand" "") | |
608 (match_operand:VEC_E 1 "" "")] | |
609 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
610 { | |
611 rs6000_expand_vector_init (operands[0], operands[1]); | |
612 DONE; | |
613 }) | |
614 | |
615 (define_expand "vec_set<mode>" | |
616 [(match_operand:VEC_E 0 "vlogical_operand" "") | |
617 (match_operand:<VEC_base> 1 "register_operand" "") | |
618 (match_operand 2 "const_int_operand" "")] | |
619 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
620 { | |
621 rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2])); | |
622 DONE; | |
623 }) | |
624 | |
625 (define_expand "vec_extract<mode>" | |
626 [(match_operand:<VEC_base> 0 "register_operand" "") | |
627 (match_operand:VEC_E 1 "vlogical_operand" "") | |
628 (match_operand 2 "const_int_operand" "")] | |
629 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
630 { | |
631 rs6000_expand_vector_extract (operands[0], operands[1], | |
632 INTVAL (operands[2])); | |
633 DONE; | |
634 }) | |
635 | |
636 ;; Interleave patterns | |
637 (define_expand "vec_interleave_highv4sf" | |
638 [(set (match_operand:V4SF 0 "vfloat_operand" "") | |
639 (vec_merge:V4SF | |
640 (vec_select:V4SF (match_operand:V4SF 1 "vfloat_operand" "") | |
641 (parallel [(const_int 0) | |
642 (const_int 2) | |
643 (const_int 1) | |
644 (const_int 3)])) | |
645 (vec_select:V4SF (match_operand:V4SF 2 "vfloat_operand" "") | |
646 (parallel [(const_int 2) | |
647 (const_int 0) | |
648 (const_int 3) | |
649 (const_int 1)])) | |
650 (const_int 5)))] | |
651 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" | |
652 "") | |
653 | |
654 (define_expand "vec_interleave_lowv4sf" | |
655 [(set (match_operand:V4SF 0 "vfloat_operand" "") | |
656 (vec_merge:V4SF | |
657 (vec_select:V4SF (match_operand:V4SF 1 "vfloat_operand" "") | |
658 (parallel [(const_int 2) | |
659 (const_int 0) | |
660 (const_int 3) | |
661 (const_int 1)])) | |
662 (vec_select:V4SF (match_operand:V4SF 2 "vfloat_operand" "") | |
663 (parallel [(const_int 0) | |
664 (const_int 2) | |
665 (const_int 1) | |
666 (const_int 3)])) | |
667 (const_int 5)))] | |
668 "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" | |
669 "") | |
670 | |
671 (define_expand "vec_interleave_highv2df" | |
672 [(set (match_operand:V2DF 0 "vfloat_operand" "") | |
673 (vec_concat:V2DF | |
674 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "") | |
675 (parallel [(const_int 0)])) | |
676 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "") | |
677 (parallel [(const_int 0)]))))] | |
678 "VECTOR_UNIT_VSX_P (V2DFmode)" | |
679 "") | |
680 | |
681 (define_expand "vec_interleave_lowv2df" | |
682 [(set (match_operand:V2DF 0 "vfloat_operand" "") | |
683 (vec_concat:V2DF | |
684 (vec_select:DF (match_operand:V2DF 1 "vfloat_operand" "") | |
685 (parallel [(const_int 1)])) | |
686 (vec_select:DF (match_operand:V2DF 2 "vfloat_operand" "") | |
687 (parallel [(const_int 1)]))))] | |
688 "VECTOR_UNIT_VSX_P (V2DFmode)" | |
689 "") | |
690 | |
691 | |
692 ;; Convert double word types to single word types | |
693 (define_expand "vec_pack_trunc_v2df" | |
694 [(match_operand:V4SF 0 "vfloat_operand" "") | |
695 (match_operand:V2DF 1 "vfloat_operand" "") | |
696 (match_operand:V2DF 2 "vfloat_operand" "")] | |
697 "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC" | |
698 { | |
699 rtx r1 = gen_reg_rtx (V4SFmode); | |
700 rtx r2 = gen_reg_rtx (V4SFmode); | |
701 | |
702 emit_insn (gen_vsx_xvcvdpsp (r1, operands[1])); | |
703 emit_insn (gen_vsx_xvcvdpsp (r2, operands[2])); | |
704 emit_insn (gen_vec_extract_evenv4sf (operands[0], r1, r2)); | |
705 DONE; | |
706 }) | |
707 | |
708 (define_expand "vec_pack_sfix_trunc_v2df" | |
709 [(match_operand:V4SI 0 "vint_operand" "") | |
710 (match_operand:V2DF 1 "vfloat_operand" "") | |
711 (match_operand:V2DF 2 "vfloat_operand" "")] | |
712 "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC" | |
713 { | |
714 rtx r1 = gen_reg_rtx (V4SImode); | |
715 rtx r2 = gen_reg_rtx (V4SImode); | |
716 | |
717 emit_insn (gen_vsx_xvcvdpsxws (r1, operands[1])); | |
718 emit_insn (gen_vsx_xvcvdpsxws (r2, operands[2])); | |
719 emit_insn (gen_vec_extract_evenv4si (operands[0], r1, r2)); | |
720 DONE; | |
721 }) | |
722 | |
723 (define_expand "vec_pack_ufix_trunc_v2df" | |
724 [(match_operand:V4SI 0 "vint_operand" "") | |
725 (match_operand:V2DF 1 "vfloat_operand" "") | |
726 (match_operand:V2DF 2 "vfloat_operand" "")] | |
727 "VECTOR_UNIT_VSX_P (V2DFmode) && TARGET_ALTIVEC" | |
728 { | |
729 rtx r1 = gen_reg_rtx (V4SImode); | |
730 rtx r2 = gen_reg_rtx (V4SImode); | |
731 | |
732 emit_insn (gen_vsx_xvcvdpuxws (r1, operands[1])); | |
733 emit_insn (gen_vsx_xvcvdpuxws (r2, operands[2])); | |
734 emit_insn (gen_vec_extract_evenv4si (operands[0], r1, r2)); | |
735 DONE; | |
736 }) | |
737 | |
738 ;; Convert single word types to double word | |
739 (define_expand "vec_unpacks_hi_v4sf" | |
740 [(match_operand:V2DF 0 "vfloat_operand" "") | |
741 (match_operand:V4SF 1 "vfloat_operand" "")] | |
742 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" | |
743 { | |
744 rtx reg = gen_reg_rtx (V4SFmode); | |
745 | |
746 emit_insn (gen_vec_interleave_highv4sf (reg, operands[1], operands[1])); | |
747 emit_insn (gen_vsx_xvcvspdp (operands[0], reg)); | |
748 DONE; | |
749 }) | |
750 | |
751 (define_expand "vec_unpacks_lo_v4sf" | |
752 [(match_operand:V2DF 0 "vfloat_operand" "") | |
753 (match_operand:V4SF 1 "vfloat_operand" "")] | |
754 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)" | |
755 { | |
756 rtx reg = gen_reg_rtx (V4SFmode); | |
757 | |
758 emit_insn (gen_vec_interleave_lowv4sf (reg, operands[1], operands[1])); | |
759 emit_insn (gen_vsx_xvcvspdp (operands[0], reg)); | |
760 DONE; | |
761 }) | |
762 | |
763 (define_expand "vec_unpacks_float_hi_v4si" | |
764 [(match_operand:V2DF 0 "vfloat_operand" "") | |
765 (match_operand:V4SI 1 "vint_operand" "")] | |
766 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" | |
767 { | |
768 rtx reg = gen_reg_rtx (V4SImode); | |
769 | |
770 emit_insn (gen_vec_interleave_highv4si (reg, operands[1], operands[1])); | |
771 emit_insn (gen_vsx_xvcvsxwdp (operands[0], reg)); | |
772 DONE; | |
773 }) | |
774 | |
775 (define_expand "vec_unpacks_float_lo_v4si" | |
776 [(match_operand:V2DF 0 "vfloat_operand" "") | |
777 (match_operand:V4SI 1 "vint_operand" "")] | |
778 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" | |
779 { | |
780 rtx reg = gen_reg_rtx (V4SImode); | |
781 | |
782 emit_insn (gen_vec_interleave_lowv4si (reg, operands[1], operands[1])); | |
783 emit_insn (gen_vsx_xvcvsxwdp (operands[0], reg)); | |
784 DONE; | |
785 }) | |
786 | |
787 (define_expand "vec_unpacku_float_hi_v4si" | |
788 [(match_operand:V2DF 0 "vfloat_operand" "") | |
789 (match_operand:V4SI 1 "vint_operand" "")] | |
790 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" | |
791 { | |
792 rtx reg = gen_reg_rtx (V4SImode); | |
793 | |
794 emit_insn (gen_vec_interleave_highv4si (reg, operands[1], operands[1])); | |
795 emit_insn (gen_vsx_xvcvuxwdp (operands[0], reg)); | |
796 DONE; | |
797 }) | |
798 | |
799 (define_expand "vec_unpacku_float_lo_v4si" | |
800 [(match_operand:V2DF 0 "vfloat_operand" "") | |
801 (match_operand:V4SI 1 "vint_operand" "")] | |
802 "VECTOR_UNIT_VSX_P (V2DFmode) && VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SImode)" | |
803 { | |
804 rtx reg = gen_reg_rtx (V4SImode); | |
805 | |
806 emit_insn (gen_vec_interleave_lowv4si (reg, operands[1], operands[1])); | |
807 emit_insn (gen_vsx_xvcvuxwdp (operands[0], reg)); | |
808 DONE; | |
809 }) | |
810 | |
811 | |
812 ;; Align vector loads with a permute. | |
813 (define_expand "vec_realign_load_<mode>" | |
814 [(match_operand:VEC_K 0 "vlogical_operand" "") | |
815 (match_operand:VEC_K 1 "vlogical_operand" "") | |
816 (match_operand:VEC_K 2 "vlogical_operand" "") | |
817 (match_operand:V16QI 3 "vlogical_operand" "")] | |
818 "VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)" | |
819 { | |
820 emit_insn (gen_altivec_vperm_<mode> (operands[0], operands[1], operands[2], | |
821 operands[3])); | |
822 DONE; | |
823 }) | |
824 | |
825 ;; Under VSX, vectors of 4/8 byte alignments do not need to be aligned | |
826 ;; since the load already handles it. | |
827 (define_expand "movmisalign<mode>" | |
828 [(set (match_operand:VEC_N 0 "vfloat_operand" "") | |
829 (match_operand:VEC_N 1 "vfloat_operand" ""))] | |
830 "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_ALLOW_MOVMISALIGN" | |
831 "") | |
832 | |
833 | |
834 ;; Vector shift left in bits. Currently supported ony for shift | |
835 ;; amounts that can be expressed as byte shifts (divisible by 8). | |
836 ;; General shift amounts can be supported using vslo + vsl. We're | |
837 ;; not expecting to see these yet (the vectorizer currently | |
838 ;; generates only shifts divisible by byte_size). | |
839 (define_expand "vec_shl_<mode>" | |
840 [(match_operand:VEC_L 0 "vlogical_operand" "") | |
841 (match_operand:VEC_L 1 "vlogical_operand" "") | |
842 (match_operand:QI 2 "reg_or_short_operand" "")] | |
843 "TARGET_ALTIVEC" | |
844 " | |
845 { | |
846 rtx bitshift = operands[2]; | |
847 rtx shift; | |
848 rtx insn; | |
849 HOST_WIDE_INT bitshift_val; | |
850 HOST_WIDE_INT byteshift_val; | |
851 | |
852 if (! CONSTANT_P (bitshift)) | |
853 FAIL; | |
854 bitshift_val = INTVAL (bitshift); | |
855 if (bitshift_val & 0x7) | |
856 FAIL; | |
857 byteshift_val = bitshift_val >> 3; | |
858 if (TARGET_VSX && (byteshift_val & 0x3) == 0) | |
859 { | |
860 shift = gen_rtx_CONST_INT (QImode, byteshift_val >> 2); | |
861 insn = gen_vsx_xxsldwi_<mode> (operands[0], operands[1], operands[1], | |
862 shift); | |
863 } | |
864 else | |
865 { | |
866 shift = gen_rtx_CONST_INT (QImode, byteshift_val); | |
867 insn = gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1], | |
868 shift); | |
869 } | |
870 | |
871 emit_insn (insn); | |
872 DONE; | |
873 }") | |
874 | |
875 ;; Vector shift right in bits. Currently supported ony for shift | |
876 ;; amounts that can be expressed as byte shifts (divisible by 8). | |
877 ;; General shift amounts can be supported using vsro + vsr. We're | |
878 ;; not expecting to see these yet (the vectorizer currently | |
879 ;; generates only shifts divisible by byte_size). | |
880 (define_expand "vec_shr_<mode>" | |
881 [(match_operand:VEC_L 0 "vlogical_operand" "") | |
882 (match_operand:VEC_L 1 "vlogical_operand" "") | |
883 (match_operand:QI 2 "reg_or_short_operand" "")] | |
884 "TARGET_ALTIVEC" | |
885 " | |
886 { | |
887 rtx bitshift = operands[2]; | |
888 rtx shift; | |
889 rtx insn; | |
890 HOST_WIDE_INT bitshift_val; | |
891 HOST_WIDE_INT byteshift_val; | |
892 | |
893 if (! CONSTANT_P (bitshift)) | |
894 FAIL; | |
895 bitshift_val = INTVAL (bitshift); | |
896 if (bitshift_val & 0x7) | |
897 FAIL; | |
898 byteshift_val = 16 - (bitshift_val >> 3); | |
899 if (TARGET_VSX && (byteshift_val & 0x3) == 0) | |
900 { | |
901 shift = gen_rtx_CONST_INT (QImode, byteshift_val >> 2); | |
902 insn = gen_vsx_xxsldwi_<mode> (operands[0], operands[1], operands[1], | |
903 shift); | |
904 } | |
905 else | |
906 { | |
907 shift = gen_rtx_CONST_INT (QImode, byteshift_val); | |
908 insn = gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1], | |
909 shift); | |
910 } | |
911 | |
912 emit_insn (insn); | |
913 DONE; | |
914 }") | |
915 | |
916 ;; Expanders for rotate each element in a vector | |
917 (define_expand "vrotl<mode>3" | |
918 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
919 (rotate:VEC_I (match_operand:VEC_I 1 "vint_operand" "") | |
920 (match_operand:VEC_I 2 "vint_operand" "")))] | |
921 "TARGET_ALTIVEC" | |
922 "") | |
923 | |
924 ;; Expanders for arithmetic shift left on each vector element | |
925 (define_expand "vashl<mode>3" | |
926 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
927 (ashift:VEC_I (match_operand:VEC_I 1 "vint_operand" "") | |
928 (match_operand:VEC_I 2 "vint_operand" "")))] | |
929 "TARGET_ALTIVEC" | |
930 "") | |
931 | |
932 ;; Expanders for logical shift right on each vector element | |
933 (define_expand "vlshr<mode>3" | |
934 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
935 (lshiftrt:VEC_I (match_operand:VEC_I 1 "vint_operand" "") | |
936 (match_operand:VEC_I 2 "vint_operand" "")))] | |
937 "TARGET_ALTIVEC" | |
938 "") | |
939 | |
940 ;; Expanders for arithmetic shift right on each vector element | |
941 (define_expand "vashr<mode>3" | |
942 [(set (match_operand:VEC_I 0 "vint_operand" "") | |
943 (ashiftrt:VEC_I (match_operand:VEC_I 1 "vint_operand" "") | |
944 (match_operand:VEC_I 2 "vint_operand" "")))] | |
945 "TARGET_ALTIVEC" | |
946 "") | |
947 | |
948 ;;; Expanders for vector insn patterns shared between the SPE and TARGET_PAIRED systems. | |
949 | |
950 (define_expand "absv2sf2" | |
951 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") | |
952 (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))] | |
953 "TARGET_PAIRED_FLOAT || TARGET_SPE" | |
954 "") | |
955 | |
956 (define_expand "negv2sf2" | |
957 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") | |
958 (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "")))] | |
959 "TARGET_PAIRED_FLOAT || TARGET_SPE" | |
960 "") | |
961 | |
962 (define_expand "addv2sf3" | |
963 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") | |
964 (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") | |
965 (match_operand:V2SF 2 "gpc_reg_operand" "")))] | |
966 "TARGET_PAIRED_FLOAT || TARGET_SPE" | |
967 " | |
968 { | |
969 if (TARGET_SPE) | |
970 { | |
971 /* We need to make a note that we clobber SPEFSCR. */ | |
972 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); | |
973 | |
974 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], | |
975 gen_rtx_PLUS (V2SFmode, operands[1], operands[2])); | |
976 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); | |
977 emit_insn (par); | |
978 DONE; | |
979 } | |
980 }") | |
981 | |
982 (define_expand "subv2sf3" | |
983 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") | |
984 (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") | |
985 (match_operand:V2SF 2 "gpc_reg_operand" "")))] | |
986 "TARGET_PAIRED_FLOAT || TARGET_SPE" | |
987 " | |
988 { | |
989 if (TARGET_SPE) | |
990 { | |
991 /* We need to make a note that we clobber SPEFSCR. */ | |
992 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); | |
993 | |
994 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], | |
995 gen_rtx_MINUS (V2SFmode, operands[1], operands[2])); | |
996 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); | |
997 emit_insn (par); | |
998 DONE; | |
999 } | |
1000 }") | |
1001 | |
1002 (define_expand "mulv2sf3" | |
1003 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") | |
1004 (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") | |
1005 (match_operand:V2SF 2 "gpc_reg_operand" "")))] | |
1006 "TARGET_PAIRED_FLOAT || TARGET_SPE" | |
1007 " | |
1008 { | |
1009 if (TARGET_SPE) | |
1010 { | |
1011 /* We need to make a note that we clobber SPEFSCR. */ | |
1012 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); | |
1013 | |
1014 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], | |
1015 gen_rtx_MULT (V2SFmode, operands[1], operands[2])); | |
1016 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); | |
1017 emit_insn (par); | |
1018 DONE; | |
1019 } | |
1020 }") | |
1021 | |
1022 (define_expand "divv2sf3" | |
1023 [(set (match_operand:V2SF 0 "gpc_reg_operand" "") | |
1024 (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "") | |
1025 (match_operand:V2SF 2 "gpc_reg_operand" "")))] | |
1026 "TARGET_PAIRED_FLOAT || TARGET_SPE" | |
1027 " | |
1028 { | |
1029 if (TARGET_SPE) | |
1030 { | |
1031 /* We need to make a note that we clobber SPEFSCR. */ | |
1032 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2)); | |
1033 | |
1034 XVECEXP (par, 0, 0) = gen_rtx_SET (VOIDmode, operands[0], | |
1035 gen_rtx_DIV (V2SFmode, operands[1], operands[2])); | |
1036 XVECEXP (par, 0, 1) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, SPEFSCR_REGNO)); | |
1037 emit_insn (par); | |
1038 DONE; | |
1039 } | |
1040 }") |