comparison gcc/config/vax/vax.md @ 0:a06113de4d67

first commit
author kent <kent@cr.ie.u-ryukyu.ac.jp>
date Fri, 17 Jul 2009 14:47:48 +0900
parents
children 77e2b8dfacca
comparison
equal deleted inserted replaced
-1:000000000000 0:a06113de4d67
1 ;; Machine description for GNU compiler, VAX Version
2 ;; Copyright (C) 1987, 1988, 1991, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
3 ;; 2002, 2004, 2005, 2007 Free Software Foundation, Inc.
4
5 ;; This file is part of GCC.
6
7 ;; GCC is free software; you can redistribute it and/or modify
8 ;; it under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
10 ;; any later version.
11
12 ;; GCC is distributed in the hope that it will be useful,
13 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 ;; GNU General Public License for more details.
16
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
20
21
22 ;;- Instruction patterns. When multiple patterns apply,
23 ;;- the first one in the file is chosen.
24 ;;-
25 ;;- See file "rtl.def" for documentation on define_insn, match_*, et al.
26 ;;-
27 ;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
28 ;;- updates for most instructions.
29
30 ;; UNSPEC_VOLATILE usage:
31
32 (define_constants
33 [(VUNSPEC_BLOCKAGE 0) ; `blockage' insn to prevent scheduling across an
34 ; insn in the code.
35 (VUNSPEC_SYNC_ISTREAM 1) ; sequence of insns to sync the I-stream
36 (VAX_AP_REGNUM 12) ; Register 12 contains the argument pointer
37 (VAX_FP_REGNUM 13) ; Register 13 contains the frame pointer
38 (VAX_SP_REGNUM 14) ; Register 14 contains the stack pointer
39 (VAX_PC_REGNUM 15) ; Register 15 contains the program counter
40 ]
41 )
42
43 ;; Integer modes supported on VAX, with a mapping from machine mode
44 ;; to mnemonic suffix. DImode is always a special case.
45 (define_mode_iterator VAXint [QI HI SI])
46 (define_mode_attr isfx [(QI "b") (HI "w") (SI "l")])
47
48 ;; Similar for float modes supported on VAX.
49 (define_mode_iterator VAXfp [SF DF])
50 (define_mode_attr fsfx [(SF "f") (DF "%#")])
51
52 ;; Some output patterns want integer immediates with a prefix...
53 (define_mode_attr iprefx [(QI "B") (HI "H") (SI "N")])
54
55 ;; We don't want to allow a constant operand for test insns because
56 ;; (set (cc0) (const_int foo)) has no mode information. Such insns will
57 ;; be folded while optimizing anyway.
58
59 (define_insn "tst<mode>"
60 [(set (cc0)
61 (match_operand:VAXint 0 "nonimmediate_operand" "g"))]
62 ""
63 "tst<VAXint:isfx> %0")
64
65 (define_insn "tst<mode>"
66 [(set (cc0)
67 (match_operand:VAXfp 0 "general_operand" "gF"))]
68 ""
69 "tst<VAXfp:fsfx> %0")
70
71 (define_insn "cmp<mode>"
72 [(set (cc0)
73 (compare (match_operand:VAXint 0 "nonimmediate_operand" "g")
74 (match_operand:VAXint 1 "general_operand" "g")))]
75 ""
76 "cmp<VAXint:isfx> %0,%1")
77
78 (define_insn "cmp<mode>"
79 [(set (cc0)
80 (compare (match_operand:VAXfp 0 "general_operand" "gF,gF")
81 (match_operand:VAXfp 1 "general_operand" "G,gF")))]
82 ""
83 "@
84 tst<VAXfp:fsfx> %0
85 cmp<VAXfp:fsfx> %0,%1")
86
87 (define_insn "*bit<mode>"
88 [(set (cc0)
89 (and:VAXint (match_operand:VAXint 0 "general_operand" "g")
90 (match_operand:VAXint 1 "general_operand" "g")))]
91 ""
92 "bit<VAXint:isfx> %0,%1")
93
94 ;; The VAX has no sCOND insns. It does have add/subtract with carry
95 ;; which could be used to implement the sltu and sgeu patterns. However,
96 ;; to do this properly requires a complete rewrite of the compare insns
97 ;; to keep them together with the sltu/sgeu insns until after the
98 ;; reload pass is complete. The previous implementation didn't do this
99 ;; and has been deleted.
100
101
102 (define_insn "mov<mode>"
103 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g")
104 (match_operand:VAXfp 1 "general_operand" "G,gF"))]
105 ""
106 "@
107 clr<VAXfp:fsfx> %0
108 mov<VAXfp:fsfx> %1,%0")
109
110 ;; Some VAXen don't support this instruction.
111 ;;(define_insn "movti"
112 ;; [(set (match_operand:TI 0 "general_operand" "=g")
113 ;; (match_operand:TI 1 "general_operand" "g"))]
114 ;; ""
115 ;; "movh %1,%0")
116
117 (define_insn "movdi"
118 [(set (match_operand:DI 0 "nonimmediate_operand" "=g,g")
119 (match_operand:DI 1 "general_operand" "I,g"))]
120 ""
121 "@
122 clrq %0
123 movq %D1,%0")
124
125 ;; The VAX move instructions have space-time tradeoffs. On a MicroVAX
126 ;; register-register mov instructions take 3 bytes and 2 CPU cycles. clrl
127 ;; takes 2 bytes and 3 cycles. mov from constant to register takes 2 cycles
128 ;; if the constant is smaller than 4 bytes, 3 cycles for a longword
129 ;; constant. movz, mneg, and mcom are as fast as mov, so movzwl is faster
130 ;; than movl for positive constants that fit in 16 bits but not 6 bits. cvt
131 ;; instructions take 4 cycles. inc takes 3 cycles. The machine description
132 ;; is willing to trade 1 byte for 1 cycle (clrl instead of movl $0; cvtwl
133 ;; instead of movl).
134
135 ;; Cycle counts for other models may vary (on a VAX 750 they are similar,
136 ;; but on a VAX 9000 most move and add instructions with one constant
137 ;; operand take 1 cycle).
138
139 ;; Loads of constants between 64 and 128 used to be done with
140 ;; "addl3 $63,#,dst" but this is slower than movzbl and takes as much space.
141
142 (define_insn "mov<mode>"
143 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
144 (match_operand:VAXint 1 "general_operand" "g"))]
145 ""
146 "* return vax_output_int_move (insn, operands, <MODE>mode);")
147
148 (define_insn "movstricthi"
149 [(set (strict_low_part (match_operand:HI 0 "register_operand" "+g"))
150 (match_operand:HI 1 "general_operand" "g"))]
151 ""
152 "*
153 {
154 if (CONST_INT_P (operands[1]))
155 {
156 int i = INTVAL (operands[1]);
157 if (i == 0)
158 return \"clrw %0\";
159 else if ((unsigned int)i < 64)
160 return \"movw %1,%0\";
161 else if ((unsigned int)~i < 64)
162 return \"mcomw %H1,%0\";
163 else if ((unsigned int)i < 256)
164 return \"movzbw %1,%0\";
165 }
166 return \"movw %1,%0\";
167 }")
168
169 (define_insn "movstrictqi"
170 [(set (strict_low_part (match_operand:QI 0 "register_operand" "+g"))
171 (match_operand:QI 1 "general_operand" "g"))]
172 ""
173 "*
174 {
175 if (CONST_INT_P (operands[1]))
176 {
177 int i = INTVAL (operands[1]);
178 if (i == 0)
179 return \"clrb %0\";
180 else if ((unsigned int)~i < 64)
181 return \"mcomb %B1,%0\";
182 }
183 return \"movb %1,%0\";
184 }")
185
186 ;; This is here to accept 4 arguments and pass the first 3 along
187 ;; to the movmemhi1 pattern that really does the work.
188 (define_expand "movmemhi"
189 [(set (match_operand:BLK 0 "general_operand" "=g")
190 (match_operand:BLK 1 "general_operand" "g"))
191 (use (match_operand:HI 2 "general_operand" "g"))
192 (match_operand 3 "" "")]
193 ""
194 "
195 emit_insn (gen_movmemhi1 (operands[0], operands[1], operands[2]));
196 DONE;
197 ")
198
199 ;; The definition of this insn does not really explain what it does,
200 ;; but it should suffice
201 ;; that anything generated as this insn will be recognized as one
202 ;; and that it won't successfully combine with anything.
203 (define_insn "movmemhi1"
204 [(set (match_operand:BLK 0 "memory_operand" "=m")
205 (match_operand:BLK 1 "memory_operand" "m"))
206 (use (match_operand:HI 2 "general_operand" "g"))
207 (clobber (reg:SI 0))
208 (clobber (reg:SI 1))
209 (clobber (reg:SI 2))
210 (clobber (reg:SI 3))
211 (clobber (reg:SI 4))
212 (clobber (reg:SI 5))]
213 ""
214 "movc3 %2,%1,%0")
215
216 ;; Extension and truncation insns.
217
218 (define_insn "truncsiqi2"
219 [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
220 (truncate:QI (match_operand:SI 1 "nonimmediate_operand" "g")))]
221 ""
222 "cvtlb %1,%0")
223
224 (define_insn "truncsihi2"
225 [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
226 (truncate:HI (match_operand:SI 1 "nonimmediate_operand" "g")))]
227 ""
228 "cvtlw %1,%0")
229
230 (define_insn "trunchiqi2"
231 [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
232 (truncate:QI (match_operand:HI 1 "nonimmediate_operand" "g")))]
233 ""
234 "cvtwb %1,%0")
235
236 (define_insn "extendhisi2"
237 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
238 (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
239 ""
240 "cvtwl %1,%0")
241
242 (define_insn "extendqihi2"
243 [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
244 (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
245 ""
246 "cvtbw %1,%0")
247
248 (define_insn "extendqisi2"
249 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
250 (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
251 ""
252 "cvtbl %1,%0")
253
254 (define_insn "extendsfdf2"
255 [(set (match_operand:DF 0 "nonimmediate_operand" "=g")
256 (float_extend:DF (match_operand:SF 1 "general_operand" "gF")))]
257 ""
258 "cvtf%# %1,%0")
259
260 (define_insn "truncdfsf2"
261 [(set (match_operand:SF 0 "nonimmediate_operand" "=g")
262 (float_truncate:SF (match_operand:DF 1 "general_operand" "gF")))]
263 ""
264 "cvt%#f %1,%0")
265
266 (define_insn "zero_extendhisi2"
267 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
268 (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
269 ""
270 "movzwl %1,%0")
271
272 (define_insn "zero_extendqihi2"
273 [(set (match_operand:HI 0 "nonimmediate_operand" "=g")
274 (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
275 ""
276 "movzbw %1,%0")
277
278 (define_insn "zero_extendqisi2"
279 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
280 (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
281 ""
282 "movzbl %1,%0")
283
284 ;; Fix-to-float conversion insns.
285
286 (define_insn "float<VAXint:mode><VAXfp:mode>2"
287 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g")
288 (float:VAXfp (match_operand:VAXint 1 "nonimmediate_operand" "g")))]
289 ""
290 "cvt<VAXint:isfx><VAXfp:fsfx> %1,%0")
291
292 ;; Float-to-fix conversion insns.
293
294 (define_insn "fix_trunc<VAXfp:mode><VAXint:mode>2"
295 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
296 (fix:VAXint (fix:VAXfp (match_operand:VAXfp 1 "general_operand" "gF"))))]
297 ""
298 "cvt<VAXfp:fsfx><VAXint:isfx> %1,%0")
299
300 ;;- All kinds of add instructions.
301
302 (define_insn "add<mode>3"
303 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g,g")
304 (plus:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF,gF")
305 (match_operand:VAXfp 2 "general_operand" "gF,0,gF")))]
306 ""
307 "@
308 add<VAXfp:fsfx>2 %2,%0
309 add<VAXfp:fsfx>2 %1,%0
310 add<VAXfp:fsfx>3 %1,%2,%0")
311
312 (define_insn "add<mode>3"
313 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
314 (plus:VAXint (match_operand:VAXint 1 "general_operand" "g")
315 (match_operand:VAXint 2 "general_operand" "g")))]
316 ""
317 "* return vax_output_int_add (insn, operands, <MODE>mode);")
318
319 ;; The add-with-carry (adwc) instruction only accepts two operands.
320 (define_insn "adddi3"
321 [(set (match_operand:DI 0 "nonimmediate_operand" "=ro>,ro>")
322 (plus:DI (match_operand:DI 1 "general_operand" "%0,ro>")
323 (match_operand:DI 2 "general_operand" "Fro,F")))]
324 ""
325 "*
326 {
327 rtx low[3];
328 const char *pattern;
329 int carry = 1;
330
331 split_quadword_operands (operands, low, 3);
332 /* Add low parts. */
333 if (rtx_equal_p (operands[0], operands[1]))
334 {
335 if (low[2] == const0_rtx)
336 /* Should examine operand, punt if not POST_INC. */
337 pattern = \"tstl %0\", carry = 0;
338 else if (low[2] == const1_rtx)
339 pattern = \"incl %0\";
340 else
341 pattern = \"addl2 %2,%0\";
342 }
343 else
344 {
345 if (low[2] == const0_rtx)
346 pattern = \"movl %1,%0\", carry = 0;
347 else
348 pattern = \"addl3 %2,%1,%0\";
349 }
350 if (pattern)
351 output_asm_insn (pattern, low);
352 if (!carry)
353 /* If CARRY is 0, we don't have any carry value to worry about. */
354 return get_insn_template (CODE_FOR_addsi3, insn);
355 /* %0 = C + %1 + %2 */
356 if (!rtx_equal_p (operands[0], operands[1]))
357 output_asm_insn ((operands[1] == const0_rtx
358 ? \"clrl %0\"
359 : \"movl %1,%0\"), operands);
360 return \"adwc %2,%0\";
361 }")
362
363 ;;- All kinds of subtract instructions.
364
365 (define_insn "sub<mode>3"
366 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g")
367 (minus:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF")
368 (match_operand:VAXfp 2 "general_operand" "gF,gF")))]
369 ""
370 "@
371 sub<VAXfp:fsfx>2 %2,%0
372 sub<VAXfp:fsfx>3 %2,%1,%0")
373
374 (define_insn "sub<mode>3"
375 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
376 (minus:VAXint (match_operand:VAXint 1 "general_operand" "0,g")
377 (match_operand:VAXint 2 "general_operand" "g,g")))]
378 ""
379 "@
380 sub<VAXint:isfx>2 %2,%0
381 sub<VAXint:isfx>3 %2,%1,%0")
382
383 ;; The subtract-with-carry (sbwc) instruction only takes two operands.
384 (define_insn "subdi3"
385 [(set (match_operand:DI 0 "nonimmediate_operand" "=or>,or>")
386 (minus:DI (match_operand:DI 1 "general_operand" "0,or>")
387 (match_operand:DI 2 "general_operand" "For,F")))]
388 ""
389 "*
390 {
391 rtx low[3];
392 const char *pattern;
393 int carry = 1;
394
395 split_quadword_operands (operands, low, 3);
396 /* Subtract low parts. */
397 if (rtx_equal_p (operands[0], operands[1]))
398 {
399 if (low[2] == const0_rtx)
400 pattern = 0, carry = 0;
401 else if (low[2] == constm1_rtx)
402 pattern = \"decl %0\";
403 else
404 pattern = \"subl2 %2,%0\";
405 }
406 else
407 {
408 if (low[2] == constm1_rtx)
409 pattern = \"decl %0\";
410 else if (low[2] == const0_rtx)
411 pattern = get_insn_template (CODE_FOR_movsi, insn), carry = 0;
412 else
413 pattern = \"subl3 %2,%1,%0\";
414 }
415 if (pattern)
416 output_asm_insn (pattern, low);
417 if (carry)
418 {
419 if (!rtx_equal_p (operands[0], operands[1]))
420 return \"movl %1,%0\;sbwc %2,%0\";
421 return \"sbwc %2,%0\";
422 /* %0 = %2 - %1 - C */
423 }
424 return get_insn_template (CODE_FOR_subsi3, insn);
425 }")
426
427 ;;- Multiply instructions.
428
429 (define_insn "mul<mode>3"
430 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g,g")
431 (mult:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF,gF")
432 (match_operand:VAXfp 2 "general_operand" "gF,0,gF")))]
433 ""
434 "@
435 mul<VAXfp:fsfx>2 %2,%0
436 mul<VAXfp:fsfx>2 %1,%0
437 mul<VAXfp:fsfx>3 %1,%2,%0")
438
439 (define_insn "mul<mode>3"
440 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g")
441 (mult:VAXint (match_operand:VAXint 1 "general_operand" "0,g,g")
442 (match_operand:VAXint 2 "general_operand" "g,0,g")))]
443 ""
444 "@
445 mul<VAXint:isfx>2 %2,%0
446 mul<VAXint:isfx>2 %1,%0
447 mul<VAXint:isfx>3 %1,%2,%0")
448
449 (define_insn "mulsidi3"
450 [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
451 (mult:DI (sign_extend:DI
452 (match_operand:SI 1 "nonimmediate_operand" "g"))
453 (sign_extend:DI
454 (match_operand:SI 2 "nonimmediate_operand" "g"))))]
455 ""
456 "emul %1,%2,$0,%0")
457
458 (define_insn ""
459 [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
460 (plus:DI
461 (mult:DI (sign_extend:DI
462 (match_operand:SI 1 "nonimmediate_operand" "g"))
463 (sign_extend:DI
464 (match_operand:SI 2 "nonimmediate_operand" "g")))
465 (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "g"))))]
466 ""
467 "emul %1,%2,%3,%0")
468
469 ;; 'F' constraint means type CONST_DOUBLE
470 (define_insn ""
471 [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
472 (plus:DI
473 (mult:DI (sign_extend:DI
474 (match_operand:SI 1 "nonimmediate_operand" "g"))
475 (sign_extend:DI
476 (match_operand:SI 2 "nonimmediate_operand" "g")))
477 (match_operand:DI 3 "immediate_operand" "F")))]
478 "GET_CODE (operands[3]) == CONST_DOUBLE
479 && CONST_DOUBLE_HIGH (operands[3]) == (CONST_DOUBLE_LOW (operands[3]) >> 31)"
480 "*
481 {
482 if (CONST_DOUBLE_HIGH (operands[3]))
483 operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[3]));
484 return \"emul %1,%2,%3,%0\";
485 }")
486
487 ;;- Divide instructions.
488
489 (define_insn "div<mode>3"
490 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g,g")
491 (div:VAXfp (match_operand:VAXfp 1 "general_operand" "0,gF")
492 (match_operand:VAXfp 2 "general_operand" "gF,gF")))]
493 ""
494 "@
495 div<VAXfp:fsfx>2 %2,%0
496 div<VAXfp:fsfx>3 %2,%1,%0")
497
498 (define_insn "div<mode>3"
499 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
500 (div:VAXint (match_operand:VAXint 1 "general_operand" "0,g")
501 (match_operand:VAXint 2 "general_operand" "g,g")))]
502 ""
503 "@
504 div<VAXint:isfx>2 %2,%0
505 div<VAXint:isfx>3 %2,%1,%0")
506
507 ;This is left out because it is very slow;
508 ;we are better off programming around the "lack" of this insn.
509 ;(define_insn "divmoddisi4"
510 ; [(set (match_operand:SI 0 "general_operand" "=g")
511 ; (div:SI (match_operand:DI 1 "general_operand" "g")
512 ; (match_operand:SI 2 "general_operand" "g")))
513 ; (set (match_operand:SI 3 "general_operand" "=g")
514 ; (mod:SI (match_operand:DI 1 "general_operand" "g")
515 ; (match_operand:SI 2 "general_operand" "g")))]
516 ; ""
517 ; "ediv %2,%1,%0,%3")
518
519 ;; Bit-and on the VAX is done with a clear-bits insn.
520 (define_expand "and<mode>3"
521 [(set (match_operand:VAXint 0 "nonimmediate_operand" "")
522 (and:VAXint (not:VAXint (match_operand:VAXint 1 "general_operand" ""))
523 (match_operand:VAXint 2 "general_operand" "")))]
524 ""
525 "
526 {
527 rtx op1 = operands[1];
528
529 /* If there is a constant argument, complement that one. */
530 if (CONST_INT_P (operands[2]) && !CONST_INT_P (op1))
531 {
532 operands[1] = operands[2];
533 operands[2] = op1;
534 op1 = operands[1];
535 }
536
537 if (CONST_INT_P (op1))
538 operands[1] = GEN_INT (~INTVAL (op1));
539 else
540 operands[1] = expand_unop (<MODE>mode, one_cmpl_optab, op1, 0, 1);
541 }")
542
543 (define_insn "*and<mode>"
544 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
545 (and:VAXint (not:VAXint (match_operand:VAXint 1 "general_operand" "g,g"))
546 (match_operand:VAXint 2 "general_operand" "0,g")))]
547 ""
548 "@
549 bic<VAXint:isfx>2 %1,%0
550 bic<VAXint:isfx>3 %1,%2,%0")
551
552 ;; The following used to be needed because constant propagation can
553 ;; create them starting from the bic insn patterns above. This is no
554 ;; longer a problem. However, having these patterns allows optimization
555 ;; opportunities in combine.c.
556
557 (define_insn "*and<mode>_const_int"
558 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g")
559 (and:VAXint (match_operand:VAXint 1 "general_operand" "0,g")
560 (match_operand:VAXint 2 "const_int_operand" "n,n")))]
561 ""
562 "@
563 bic<VAXint:isfx>2 %<VAXint:iprefx>2,%0
564 bic<VAXint:isfx>3 %<VAXint:iprefx>2,%1,%0")
565
566
567 ;;- Bit set instructions.
568
569 (define_insn "ior<mode>3"
570 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g")
571 (ior:VAXint (match_operand:VAXint 1 "general_operand" "0,g,g")
572 (match_operand:VAXint 2 "general_operand" "g,0,g")))]
573 ""
574 "@
575 bis<VAXint:isfx>2 %2,%0
576 bis<VAXint:isfx>2 %1,%0
577 bis<VAXint:isfx>3 %2,%1,%0")
578
579 ;;- xor instructions.
580
581 (define_insn "xor<mode>3"
582 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g,g,g")
583 (xor:VAXint (match_operand:VAXint 1 "general_operand" "0,g,g")
584 (match_operand:VAXint 2 "general_operand" "g,0,g")))]
585 ""
586 "@
587 xor<VAXint:isfx>2 %2,%0
588 xor<VAXint:isfx>2 %1,%0
589 xor<VAXint:isfx>3 %2,%1,%0")
590
591
592 (define_insn "neg<mode>2"
593 [(set (match_operand:VAXfp 0 "nonimmediate_operand" "=g")
594 (neg:VAXfp (match_operand:VAXfp 1 "general_operand" "gF")))]
595 ""
596 "mneg<VAXfp:fsfx> %1,%0")
597
598 (define_insn "neg<mode>2"
599 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
600 (neg:VAXint (match_operand:VAXint 1 "general_operand" "g")))]
601 ""
602 "mneg<VAXint:isfx> %1,%0")
603
604 (define_insn "one_cmpl<mode>2"
605 [(set (match_operand:VAXint 0 "nonimmediate_operand" "=g")
606 (not:VAXint (match_operand:VAXint 1 "general_operand" "g")))]
607 ""
608 "mcom<VAXint:isfx> %1,%0")
609
610
611 ;; Arithmetic right shift on the VAX works by negating the shift count,
612 ;; then emitting a right shift with the shift count negated. This means
613 ;; that all actual shift counts in the RTL will be positive. This
614 ;; prevents converting shifts to ZERO_EXTRACTs with negative positions,
615 ;; which isn't valid.
616 (define_expand "ashrsi3"
617 [(set (match_operand:SI 0 "general_operand" "=g")
618 (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
619 (match_operand:QI 2 "general_operand" "g")))]
620 ""
621 "
622 {
623 if (!CONST_INT_P (operands[2]))
624 operands[2] = gen_rtx_NEG (QImode, negate_rtx (QImode, operands[2]));
625 }")
626
627 (define_insn ""
628 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
629 (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
630 (match_operand:QI 2 "const_int_operand" "n")))]
631 ""
632 "ashl $%n2,%1,%0")
633
634 (define_insn ""
635 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
636 (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
637 (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
638 ""
639 "ashl %2,%1,%0")
640
641 (define_insn "ashlsi3"
642 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
643 (ashift:SI (match_operand:SI 1 "general_operand" "g")
644 (match_operand:QI 2 "general_operand" "g")))]
645 ""
646 "*
647 {
648 if (operands[2] == const1_rtx && rtx_equal_p (operands[0], operands[1]))
649 return \"addl2 %0,%0\";
650 if (REG_P (operands[1])
651 && CONST_INT_P (operands[2]))
652 {
653 int i = INTVAL (operands[2]);
654 if (i == 1)
655 return \"addl3 %1,%1,%0\";
656 if (i == 2)
657 return \"moval 0[%1],%0\";
658 if (i == 3)
659 return \"movad 0[%1],%0\";
660 }
661 return \"ashl %2,%1,%0\";
662 }")
663
664 ;; Arithmetic right shift on the VAX works by negating the shift count.
665 (define_expand "ashrdi3"
666 [(set (match_operand:DI 0 "general_operand" "=g")
667 (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
668 (match_operand:QI 2 "general_operand" "g")))]
669 ""
670 "
671 {
672 operands[2] = gen_rtx_NEG (QImode, negate_rtx (QImode, operands[2]));
673 }")
674
675 (define_insn "ashldi3"
676 [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
677 (ashift:DI (match_operand:DI 1 "general_operand" "g")
678 (match_operand:QI 2 "general_operand" "g")))]
679 ""
680 "ashq %2,%1,%0")
681
682 (define_insn ""
683 [(set (match_operand:DI 0 "nonimmediate_operand" "=g")
684 (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
685 (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
686 ""
687 "ashq %2,%1,%0")
688
689 ;; We used to have expand_shift handle logical right shifts by using extzv,
690 ;; but this make it very difficult to do lshrdi3. Since the VAX is the
691 ;; only machine with this kludge, it's better to just do this with a
692 ;; define_expand and remove that case from expand_shift.
693
694 (define_expand "lshrsi3"
695 [(set (match_dup 3)
696 (minus:QI (const_int 32)
697 (match_dup 4)))
698 (set (match_operand:SI 0 "general_operand" "=g")
699 (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
700 (match_dup 3)
701 (match_operand:SI 2 "register_operand" "g")))]
702 ""
703 "
704 {
705 operands[3] = gen_reg_rtx (QImode);
706 operands[4] = gen_lowpart (QImode, operands[2]);
707 }")
708
709 ;; Rotate right on the VAX works by negating the shift count.
710 (define_expand "rotrsi3"
711 [(set (match_operand:SI 0 "general_operand" "=g")
712 (rotatert:SI (match_operand:SI 1 "general_operand" "g")
713 (match_operand:QI 2 "general_operand" "g")))]
714 ""
715 "
716 {
717 if (!CONST_INT_P (operands[2]))
718 operands[2] = gen_rtx_NEG (QImode, negate_rtx (QImode, operands[2]));
719 }")
720
721 (define_insn "rotlsi3"
722 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
723 (rotate:SI (match_operand:SI 1 "general_operand" "g")
724 (match_operand:QI 2 "general_operand" "g")))]
725 ""
726 "rotl %2,%1,%0")
727
728 (define_insn ""
729 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
730 (rotatert:SI (match_operand:SI 1 "general_operand" "g")
731 (match_operand:QI 2 "const_int_operand" "n")))]
732 ""
733 "rotl %R2,%1,%0")
734
735 (define_insn ""
736 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
737 (rotatert:SI (match_operand:SI 1 "general_operand" "g")
738 (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
739 ""
740 "rotl %2,%1,%0")
741
742 ;This insn is probably slower than a multiply and an add.
743 ;(define_insn ""
744 ; [(set (match_operand:SI 0 "general_operand" "=g")
745 ; (mult:SI (plus:SI (match_operand:SI 1 "general_operand" "g")
746 ; (match_operand:SI 2 "general_operand" "g"))
747 ; (match_operand:SI 3 "general_operand" "g")))]
748 ; ""
749 ; "index %1,$0x80000000,$0x7fffffff,%3,%2,%0")
750
751 ;; Special cases of bit-field insns which we should
752 ;; recognize in preference to the general case.
753 ;; These handle aligned 8-bit and 16-bit fields,
754 ;; which can usually be done with move instructions.
755
756 (define_insn ""
757 [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+ro")
758 (match_operand:QI 1 "const_int_operand" "n")
759 (match_operand:SI 2 "const_int_operand" "n"))
760 (match_operand:SI 3 "general_operand" "g"))]
761 "(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
762 && INTVAL (operands[2]) % INTVAL (operands[1]) == 0
763 && (REG_P (operands[0])
764 || !mode_dependent_address_p (XEXP (operands[0], 0)))"
765 "*
766 {
767 if (REG_P (operands[0]))
768 {
769 if (INTVAL (operands[2]) != 0)
770 return \"insv %3,%2,%1,%0\";
771 }
772 else
773 operands[0]
774 = adjust_address (operands[0],
775 INTVAL (operands[1]) == 8 ? QImode : HImode,
776 INTVAL (operands[2]) / 8);
777
778 CC_STATUS_INIT;
779 if (INTVAL (operands[1]) == 8)
780 return \"movb %3,%0\";
781 return \"movw %3,%0\";
782 }")
783
784 (define_insn ""
785 [(set (match_operand:SI 0 "nonimmediate_operand" "=&g")
786 (zero_extract:SI (match_operand:SI 1 "register_operand" "ro")
787 (match_operand:QI 2 "const_int_operand" "n")
788 (match_operand:SI 3 "const_int_operand" "n")))]
789 "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
790 && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
791 && (REG_P (operands[1])
792 || !mode_dependent_address_p (XEXP (operands[1], 0)))"
793 "*
794 {
795 if (REG_P (operands[1]))
796 {
797 if (INTVAL (operands[3]) != 0)
798 return \"extzv %3,%2,%1,%0\";
799 }
800 else
801 operands[1]
802 = adjust_address (operands[1],
803 INTVAL (operands[2]) == 8 ? QImode : HImode,
804 INTVAL (operands[3]) / 8);
805
806 if (INTVAL (operands[2]) == 8)
807 return \"movzbl %1,%0\";
808 return \"movzwl %1,%0\";
809 }")
810
811 (define_insn ""
812 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
813 (sign_extract:SI (match_operand:SI 1 "register_operand" "ro")
814 (match_operand:QI 2 "const_int_operand" "n")
815 (match_operand:SI 3 "const_int_operand" "n")))]
816 "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
817 && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
818 && (REG_P (operands[1])
819 || !mode_dependent_address_p (XEXP (operands[1], 0)))"
820 "*
821 {
822 if (REG_P (operands[1]))
823 {
824 if (INTVAL (operands[3]) != 0)
825 return \"extv %3,%2,%1,%0\";
826 }
827 else
828 operands[1]
829 = adjust_address (operands[1],
830 INTVAL (operands[2]) == 8 ? QImode : HImode,
831 INTVAL (operands[3]) / 8);
832
833 if (INTVAL (operands[2]) == 8)
834 return \"cvtbl %1,%0\";
835 return \"cvtwl %1,%0\";
836 }")
837
838 ;; Register-only SImode cases of bit-field insns.
839
840 (define_insn ""
841 [(set (cc0)
842 (compare
843 (sign_extract:SI (match_operand:SI 0 "register_operand" "r")
844 (match_operand:QI 1 "general_operand" "g")
845 (match_operand:SI 2 "general_operand" "g"))
846 (match_operand:SI 3 "general_operand" "g")))]
847 ""
848 "cmpv %2,%1,%0,%3")
849
850 (define_insn ""
851 [(set (cc0)
852 (compare
853 (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
854 (match_operand:QI 1 "general_operand" "g")
855 (match_operand:SI 2 "general_operand" "g"))
856 (match_operand:SI 3 "general_operand" "g")))]
857 ""
858 "cmpzv %2,%1,%0,%3")
859
860 ;; When the field position and size are constant and the destination
861 ;; is a register, extv and extzv are much slower than a rotate followed
862 ;; by a bicl or sign extension. Because we might end up choosing ext[z]v
863 ;; anyway, we can't allow immediate values for the primary source operand.
864
865 (define_insn ""
866 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
867 (sign_extract:SI (match_operand:SI 1 "register_operand" "ro")
868 (match_operand:QI 2 "general_operand" "g")
869 (match_operand:SI 3 "general_operand" "g")))]
870 ""
871 "*
872 {
873 if (!CONST_INT_P (operands[3])
874 || !CONST_INT_P (operands[2])
875 || !REG_P (operands[0])
876 || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16))
877 return \"extv %3,%2,%1,%0\";
878 if (INTVAL (operands[2]) == 8)
879 return \"rotl %R3,%1,%0\;cvtbl %0,%0\";
880 return \"rotl %R3,%1,%0\;cvtwl %0,%0\";
881 }")
882
883 (define_insn ""
884 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
885 (zero_extract:SI (match_operand:SI 1 "register_operand" "ro")
886 (match_operand:QI 2 "general_operand" "g")
887 (match_operand:SI 3 "general_operand" "g")))]
888 ""
889 "*
890 {
891 if (!CONST_INT_P (operands[3])
892 || !CONST_INT_P (operands[2])
893 || !REG_P (operands[0]))
894 return \"extzv %3,%2,%1,%0\";
895 if (INTVAL (operands[2]) == 8)
896 return \"rotl %R3,%1,%0\;movzbl %0,%0\";
897 if (INTVAL (operands[2]) == 16)
898 return \"rotl %R3,%1,%0\;movzwl %0,%0\";
899 if (INTVAL (operands[3]) & 31)
900 return \"rotl %R3,%1,%0\;bicl2 %M2,%0\";
901 if (rtx_equal_p (operands[0], operands[1]))
902 return \"bicl2 %M2,%0\";
903 return \"bicl3 %M2,%1,%0\";
904 }")
905
906 ;; Non-register cases.
907 ;; nonimmediate_operand is used to make sure that mode-ambiguous cases
908 ;; don't match these (and therefore match the cases above instead).
909
910 (define_insn ""
911 [(set (cc0)
912 (compare
913 (sign_extract:SI (match_operand:QI 0 "memory_operand" "m")
914 (match_operand:QI 1 "general_operand" "g")
915 (match_operand:SI 2 "general_operand" "g"))
916 (match_operand:SI 3 "general_operand" "g")))]
917 ""
918 "cmpv %2,%1,%0,%3")
919
920 (define_insn ""
921 [(set (cc0)
922 (compare
923 (zero_extract:SI (match_operand:QI 0 "nonimmediate_operand" "rm")
924 (match_operand:QI 1 "general_operand" "g")
925 (match_operand:SI 2 "general_operand" "g"))
926 (match_operand:SI 3 "general_operand" "g")))]
927 ""
928 "cmpzv %2,%1,%0,%3")
929
930 (define_insn "extv"
931 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
932 (sign_extract:SI (match_operand:QI 1 "memory_operand" "m")
933 (match_operand:QI 2 "general_operand" "g")
934 (match_operand:SI 3 "general_operand" "g")))]
935 ""
936 "*
937 {
938 if (!REG_P (operands[0])
939 || !CONST_INT_P (operands[2])
940 || !CONST_INT_P (operands[3])
941 || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16)
942 || INTVAL (operands[2]) + INTVAL (operands[3]) > 32
943 || side_effects_p (operands[1])
944 || (MEM_P (operands[1])
945 && mode_dependent_address_p (XEXP (operands[1], 0))))
946 return \"extv %3,%2,%1,%0\";
947 if (INTVAL (operands[2]) == 8)
948 return \"rotl %R3,%1,%0\;cvtbl %0,%0\";
949 return \"rotl %R3,%1,%0\;cvtwl %0,%0\";
950 }")
951
952 (define_expand "extzv"
953 [(set (match_operand:SI 0 "general_operand" "")
954 (zero_extract:SI (match_operand:SI 1 "general_operand" "")
955 (match_operand:QI 2 "general_operand" "")
956 (match_operand:SI 3 "general_operand" "")))]
957 ""
958 "")
959
960 (define_insn ""
961 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
962 (zero_extract:SI (match_operand:QI 1 "memory_operand" "m")
963 (match_operand:QI 2 "general_operand" "g")
964 (match_operand:SI 3 "general_operand" "g")))]
965 ""
966 "*
967 {
968 if (!REG_P (operands[0])
969 || !CONST_INT_P (operands[2])
970 || !CONST_INT_P (operands[3])
971 || INTVAL (operands[2]) + INTVAL (operands[3]) > 32
972 || side_effects_p (operands[1])
973 || (MEM_P (operands[1])
974 && mode_dependent_address_p (XEXP (operands[1], 0))))
975 return \"extzv %3,%2,%1,%0\";
976 if (INTVAL (operands[2]) == 8)
977 return \"rotl %R3,%1,%0\;movzbl %0,%0\";
978 if (INTVAL (operands[2]) == 16)
979 return \"rotl %R3,%1,%0\;movzwl %0,%0\";
980 return \"rotl %R3,%1,%0\;bicl2 %M2,%0\";
981 }")
982
983 (define_expand "insv"
984 [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "")
985 (match_operand:QI 1 "general_operand" "")
986 (match_operand:SI 2 "general_operand" ""))
987 (match_operand:SI 3 "general_operand" ""))]
988 ""
989 "")
990
991 (define_insn ""
992 [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+g")
993 (match_operand:QI 1 "general_operand" "g")
994 (match_operand:SI 2 "general_operand" "g"))
995 (match_operand:SI 3 "general_operand" "g"))]
996 ""
997 "insv %3,%2,%1,%0")
998
999 (define_insn ""
1000 [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
1001 (match_operand:QI 1 "general_operand" "g")
1002 (match_operand:SI 2 "general_operand" "g"))
1003 (match_operand:SI 3 "general_operand" "g"))]
1004 ""
1005 "insv %3,%2,%1,%0")
1006
1007 ;; Unconditional jump
1008 (define_insn "jump"
1009 [(set (pc)
1010 (label_ref (match_operand 0 "" "")))]
1011 ""
1012 "jbr %l0")
1013
1014 ;; Conditional jumps
1015 (define_code_iterator any_cond [eq ne gt lt gtu ltu ge le geu leu])
1016
1017 (define_insn "b<code>"
1018 [(set (pc)
1019 (if_then_else (any_cond (cc0)
1020 (const_int 0))
1021 (label_ref (match_operand 0 "" ""))
1022 (pc)))]
1023 ""
1024 "* return vax_output_conditional_branch (<CODE>);")
1025
1026 ;; Recognize reversed jumps.
1027 (define_insn ""
1028 [(set (pc)
1029 (if_then_else (match_operator 0 "comparison_operator"
1030 [(cc0)
1031 (const_int 0)])
1032 (pc)
1033 (label_ref (match_operand 1 "" ""))))]
1034 ""
1035 "j%C0 %l1") ; %C0 negates condition
1036
1037 ;; Recognize jbs, jlbs, jbc and jlbc instructions. Note that the operand
1038 ;; of jlbs and jlbc insns are SImode in the hardware. However, if it is
1039 ;; memory, we use QImode in the insn. So we can't use those instructions
1040 ;; for mode-dependent addresses.
1041
1042 (define_insn ""
1043 [(set (pc)
1044 (if_then_else
1045 (ne (zero_extract:SI (match_operand:QI 0 "memory_operand" "Q,g")
1046 (const_int 1)
1047 (match_operand:SI 1 "general_operand" "I,g"))
1048 (const_int 0))
1049 (label_ref (match_operand 2 "" ""))
1050 (pc)))]
1051 ""
1052 "@
1053 jlbs %0,%l2
1054 jbs %1,%0,%l2")
1055
1056 (define_insn ""
1057 [(set (pc)
1058 (if_then_else
1059 (eq (zero_extract:SI (match_operand:QI 0 "memory_operand" "Q,g")
1060 (const_int 1)
1061 (match_operand:SI 1 "general_operand" "I,g"))
1062 (const_int 0))
1063 (label_ref (match_operand 2 "" ""))
1064 (pc)))]
1065 ""
1066 "@
1067 jlbc %0,%l2
1068 jbc %1,%0,%l2")
1069
1070 (define_insn ""
1071 [(set (pc)
1072 (if_then_else
1073 (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
1074 (const_int 1)
1075 (match_operand:SI 1 "general_operand" "I,g"))
1076 (const_int 0))
1077 (label_ref (match_operand 2 "" ""))
1078 (pc)))]
1079 ""
1080 "@
1081 jlbs %0,%l2
1082 jbs %1,%0,%l2")
1083
1084 (define_insn ""
1085 [(set (pc)
1086 (if_then_else
1087 (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
1088 (const_int 1)
1089 (match_operand:SI 1 "general_operand" "I,g"))
1090 (const_int 0))
1091 (label_ref (match_operand 2 "" ""))
1092 (pc)))]
1093 ""
1094 "@
1095 jlbc %0,%l2
1096 jbc %1,%0,%l2")
1097
1098 ;; Subtract-and-jump and Add-and-jump insns.
1099 ;; These are not used when output is for the Unix assembler
1100 ;; because it does not know how to modify them to reach far.
1101
1102 ;; Normal sob insns.
1103
1104 (define_insn ""
1105 [(set (pc)
1106 (if_then_else
1107 (gt (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
1108 (const_int -1))
1109 (const_int 0))
1110 (label_ref (match_operand 1 "" ""))
1111 (pc)))
1112 (set (match_dup 0)
1113 (plus:SI (match_dup 0)
1114 (const_int -1)))]
1115 "!TARGET_UNIX_ASM"
1116 "jsobgtr %0,%l1")
1117
1118 (define_insn ""
1119 [(set (pc)
1120 (if_then_else
1121 (ge (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
1122 (const_int -1))
1123 (const_int 0))
1124 (label_ref (match_operand 1 "" ""))
1125 (pc)))
1126 (set (match_dup 0)
1127 (plus:SI (match_dup 0)
1128 (const_int -1)))]
1129 "!TARGET_UNIX_ASM"
1130 "jsobgeq %0,%l1")
1131
1132 ;; Normal aob insns. Define a version for when operands[1] is a constant.
1133 (define_insn ""
1134 [(set (pc)
1135 (if_then_else
1136 (lt (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
1137 (const_int 1))
1138 (match_operand:SI 1 "general_operand" "g"))
1139 (label_ref (match_operand 2 "" ""))
1140 (pc)))
1141 (set (match_dup 0)
1142 (plus:SI (match_dup 0)
1143 (const_int 1)))]
1144 "!TARGET_UNIX_ASM"
1145 "jaoblss %1,%0,%l2")
1146
1147 (define_insn ""
1148 [(set (pc)
1149 (if_then_else
1150 (lt (match_operand:SI 0 "nonimmediate_operand" "+g")
1151 (match_operand:SI 1 "general_operand" "g"))
1152 (label_ref (match_operand 2 "" ""))
1153 (pc)))
1154 (set (match_dup 0)
1155 (plus:SI (match_dup 0)
1156 (const_int 1)))]
1157 "!TARGET_UNIX_ASM && CONST_INT_P (operands[1])"
1158 "jaoblss %P1,%0,%l2")
1159
1160 (define_insn ""
1161 [(set (pc)
1162 (if_then_else
1163 (le (plus:SI (match_operand:SI 0 "nonimmediate_operand" "+g")
1164 (const_int 1))
1165 (match_operand:SI 1 "general_operand" "g"))
1166 (label_ref (match_operand 2 "" ""))
1167 (pc)))
1168 (set (match_dup 0)
1169 (plus:SI (match_dup 0)
1170 (const_int 1)))]
1171 "!TARGET_UNIX_ASM"
1172 "jaobleq %1,%0,%l2")
1173
1174 (define_insn ""
1175 [(set (pc)
1176 (if_then_else
1177 (le (match_operand:SI 0 "nonimmediate_operand" "+g")
1178 (match_operand:SI 1 "general_operand" "g"))
1179 (label_ref (match_operand 2 "" ""))
1180 (pc)))
1181 (set (match_dup 0)
1182 (plus:SI (match_dup 0)
1183 (const_int 1)))]
1184 "!TARGET_UNIX_ASM && CONST_INT_P (operands[1])"
1185 "jaobleq %P1,%0,%l2")
1186
1187 ;; Something like a sob insn, but compares against -1.
1188 ;; This finds `while (foo--)' which was changed to `while (--foo != -1)'.
1189
1190 (define_insn ""
1191 [(set (pc)
1192 (if_then_else
1193 (ne (match_operand:SI 0 "nonimmediate_operand" "+g")
1194 (const_int 0))
1195 (label_ref (match_operand 1 "" ""))
1196 (pc)))
1197 (set (match_dup 0)
1198 (plus:SI (match_dup 0)
1199 (const_int -1)))]
1200 ""
1201 "decl %0\;jgequ %l1")
1202
1203 (define_expand "call_pop"
1204 [(parallel [(call (match_operand:QI 0 "memory_operand" "")
1205 (match_operand:SI 1 "const_int_operand" ""))
1206 (set (reg:SI VAX_SP_REGNUM)
1207 (plus:SI (reg:SI VAX_SP_REGNUM)
1208 (match_operand:SI 3 "immediate_operand" "")))])]
1209 ""
1210 {
1211 gcc_assert (INTVAL (operands[3]) <= 255 * 4 && INTVAL (operands[3]) % 4 == 0);
1212
1213 /* Operand 1 is the number of bytes to be popped by DW_CFA_GNU_args_size
1214 during EH unwinding. We must include the argument count pushed by
1215 the calls instruction. */
1216 operands[1] = GEN_INT (INTVAL (operands[3]) + 4);
1217 })
1218
1219 (define_insn "*call_pop"
1220 [(call (match_operand:QI 0 "memory_operand" "m")
1221 (match_operand:SI 1 "const_int_operand" "n"))
1222 (set (reg:SI VAX_SP_REGNUM) (plus:SI (reg:SI VAX_SP_REGNUM)
1223 (match_operand:SI 2 "immediate_operand" "i")))]
1224 ""
1225 {
1226 operands[1] = GEN_INT ((INTVAL (operands[1]) - 4) / 4);
1227 return "calls %1,%0";
1228 })
1229
1230 (define_expand "call_value_pop"
1231 [(parallel [(set (match_operand 0 "" "")
1232 (call (match_operand:QI 1 "memory_operand" "")
1233 (match_operand:SI 2 "const_int_operand" "")))
1234 (set (reg:SI VAX_SP_REGNUM)
1235 (plus:SI (reg:SI VAX_SP_REGNUM)
1236 (match_operand:SI 4 "immediate_operand" "")))])]
1237 ""
1238 {
1239 gcc_assert (INTVAL (operands[4]) <= 255 * 4 && INTVAL (operands[4]) % 4 == 0);
1240
1241 /* Operand 2 is the number of bytes to be popped by DW_CFA_GNU_args_size
1242 during EH unwinding. We must include the argument count pushed by
1243 the calls instruction. */
1244 operands[2] = GEN_INT (INTVAL (operands[4]) + 4);
1245 })
1246
1247 (define_insn "*call_value_pop"
1248 [(set (match_operand 0 "" "")
1249 (call (match_operand:QI 1 "memory_operand" "m")
1250 (match_operand:SI 2 "const_int_operand" "n")))
1251 (set (reg:SI VAX_SP_REGNUM) (plus:SI (reg:SI VAX_SP_REGNUM)
1252 (match_operand:SI 3 "immediate_operand" "i")))]
1253 ""
1254 "*
1255 {
1256 operands[2] = GEN_INT ((INTVAL (operands[2]) - 4) / 4);
1257 return \"calls %2,%1\";
1258 }")
1259
1260 (define_expand "call"
1261 [(call (match_operand:QI 0 "memory_operand" "")
1262 (match_operand:SI 1 "const_int_operand" ""))]
1263 ""
1264 "
1265 {
1266 /* Operand 1 is the number of bytes to be popped by DW_CFA_GNU_args_size
1267 during EH unwinding. We must include the argument count pushed by
1268 the calls instruction. */
1269 operands[1] = GEN_INT (INTVAL (operands[1]) + 4);
1270 }")
1271
1272 (define_insn "*call"
1273 [(call (match_operand:QI 0 "memory_operand" "m")
1274 (match_operand:SI 1 "const_int_operand" ""))]
1275 ""
1276 "calls $0,%0")
1277
1278 (define_expand "call_value"
1279 [(set (match_operand 0 "" "")
1280 (call (match_operand:QI 1 "memory_operand" "")
1281 (match_operand:SI 2 "const_int_operand" "")))]
1282 ""
1283 "
1284 {
1285 /* Operand 2 is the number of bytes to be popped by DW_CFA_GNU_args_size
1286 during EH unwinding. We must include the argument count pushed by
1287 the calls instruction. */
1288 operands[2] = GEN_INT (INTVAL (operands[2]) + 4);
1289 }")
1290
1291 (define_insn "*call_value"
1292 [(set (match_operand 0 "" "")
1293 (call (match_operand:QI 1 "memory_operand" "m")
1294 (match_operand:SI 2 "const_int_operand" "")))]
1295 ""
1296 "calls $0,%1")
1297
1298 ;; Call subroutine returning any type.
1299
1300 (define_expand "untyped_call"
1301 [(parallel [(call (match_operand 0 "" "")
1302 (const_int 0))
1303 (match_operand 1 "" "")
1304 (match_operand 2 "" "")])]
1305 ""
1306 "
1307 {
1308 int i;
1309
1310 emit_call_insn (gen_call_pop (operands[0], const0_rtx, NULL, const0_rtx));
1311
1312 for (i = 0; i < XVECLEN (operands[2], 0); i++)
1313 {
1314 rtx set = XVECEXP (operands[2], 0, i);
1315 emit_move_insn (SET_DEST (set), SET_SRC (set));
1316 }
1317
1318 /* The optimizer does not know that the call sets the function value
1319 registers we stored in the result block. We avoid problems by
1320 claiming that all hard registers are used and clobbered at this
1321 point. */
1322 emit_insn (gen_blockage ());
1323
1324 DONE;
1325 }")
1326
1327 ;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
1328 ;; all of memory. This blocks insns from being moved across this point.
1329
1330 (define_insn "blockage"
1331 [(unspec_volatile [(const_int 0)] VUNSPEC_BLOCKAGE)]
1332 ""
1333 "")
1334
1335 (define_insn "return"
1336 [(return)]
1337 ""
1338 "ret")
1339
1340 (define_expand "epilogue"
1341 [(return)]
1342 ""
1343 "
1344 {
1345 emit_jump_insn (gen_return ());
1346 DONE;
1347 }")
1348
1349 (define_insn "nop"
1350 [(const_int 0)]
1351 ""
1352 "nop")
1353
1354 ;; This had a wider constraint once, and it had trouble.
1355 ;; If you are tempted to try `g', please don't--it's not worth
1356 ;; the risk we will reopen the same bug.
1357 (define_insn "indirect_jump"
1358 [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
1359 ""
1360 "jmp (%0)")
1361
1362 ;; This is here to accept 5 arguments (as passed by expand_end_case)
1363 ;; and pass the first 4 along to the casesi1 pattern that really does
1364 ;; the actual casesi work. We emit a jump here to the default label
1365 ;; _before_ the casesi so that we can be sure that the casesi never
1366 ;; drops through.
1367 ;; This is suboptimal perhaps, but so is much of the rest of this
1368 ;; machine description. For what it's worth, HPPA uses the same trick.
1369 ;;
1370 ;; operand 0 is index
1371 ;; operand 1 is the minimum bound (a const_int)
1372 ;; operand 2 is the maximum bound - minimum bound + 1 (also a const_int)
1373 ;; operand 3 is CODE_LABEL for the table;
1374 ;; operand 4 is the CODE_LABEL to go to if index out of range (ie. default).
1375 ;;
1376 ;; We emit:
1377 ;; i = index - minimum_bound
1378 ;; if (i > (maximum_bound - minimum_bound + 1) goto default;
1379 ;; casesi (i, 0, table);
1380 ;;
1381 (define_expand "casesi"
1382 [(match_operand:SI 0 "general_operand" "")
1383 (match_operand:SI 1 "general_operand" "")
1384 (match_operand:SI 2 "general_operand" "")
1385 (match_operand 3 "" "")
1386 (match_operand 4 "" "")]
1387 ""
1388 {
1389 /* i = index - minimum_bound;
1390 But only if the lower bound is not already zero. */
1391 if (operands[1] != const0_rtx)
1392 {
1393 rtx index = gen_reg_rtx (SImode);
1394 emit_insn (gen_addsi3 (index,
1395 operands[0],
1396 GEN_INT (-INTVAL (operands[1]))));
1397 operands[0] = index;
1398 }
1399
1400 /* if (i > (maximum_bound - minimum_bound + 1) goto default; */
1401 emit_insn (gen_cmpsi (operands[0], operands[2]));
1402 emit_jump_insn (gen_bgtu (operands[4]));
1403
1404 /* casesi (i, 0, table); */
1405 emit_jump_insn (gen_casesi1 (operands[0], operands[2], operands[3]));
1406 DONE;
1407 })
1408
1409 ;; This insn is a bit of a lier. It actually falls through if no case
1410 ;; matches. But, we prevent that from ever happening by emitting a jump
1411 ;; before this, see the define_expand above.
1412 (define_insn "casesi1"
1413 [(match_operand:SI 1 "const_int_operand" "n")
1414 (set (pc)
1415 (plus:SI (sign_extend:SI
1416 (mem:HI (plus:SI (mult:SI (match_operand:SI 0 "general_operand" "g")
1417 (const_int 2))
1418 (pc))))
1419 (label_ref:SI (match_operand 2 "" ""))))]
1420 ""
1421 "casel %0,$0,%1")
1422
1423 ;;- load or push effective address
1424 ;; These come after the move and add/sub patterns
1425 ;; because we don't want pushl $1 turned into pushad 1.
1426 ;; or addl3 r1,r2,r3 turned into movab 0(r1)[r2],r3.
1427
1428 ;; It does not work to use constraints to distinguish pushes from moves,
1429 ;; because < matches any autodecrement, not just a push.
1430
1431 (define_insn ""
1432 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
1433 (match_operand:QI 1 "address_operand" "p"))]
1434 ""
1435 "*
1436 {
1437 if (push_operand (operands[0], SImode))
1438 return \"pushab %a1\";
1439 else
1440 return \"movab %a1,%0\";
1441 }")
1442
1443 (define_insn ""
1444 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
1445 (match_operand:HI 1 "address_operand" "p"))]
1446 ""
1447 "*
1448 {
1449 if (push_operand (operands[0], SImode))
1450 return \"pushaw %a1\";
1451 else
1452 return \"movaw %a1,%0\";
1453 }")
1454
1455 (define_insn ""
1456 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
1457 (match_operand:SI 1 "address_operand" "p"))]
1458 ""
1459 "*
1460 {
1461 if (push_operand (operands[0], SImode))
1462 return \"pushal %a1\";
1463 else
1464 return \"moval %a1,%0\";
1465 }")
1466
1467 (define_insn ""
1468 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
1469 (match_operand:DI 1 "address_operand" "p"))]
1470 ""
1471 "*
1472 {
1473 if (push_operand (operands[0], SImode))
1474 return \"pushaq %a1\";
1475 else
1476 return \"movaq %a1,%0\";
1477 }")
1478
1479 (define_insn ""
1480 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
1481 (match_operand:SF 1 "address_operand" "p"))]
1482 ""
1483 "*
1484 {
1485 if (push_operand (operands[0], SImode))
1486 return \"pushaf %a1\";
1487 else
1488 return \"movaf %a1,%0\";
1489 }")
1490
1491 (define_insn ""
1492 [(set (match_operand:SI 0 "nonimmediate_operand" "=g")
1493 (match_operand:DF 1 "address_operand" "p"))]
1494 ""
1495 "*
1496 {
1497 if (push_operand (operands[0], SImode))
1498 return \"pushad %a1\";
1499 else
1500 return \"movad %a1,%0\";
1501 }")
1502
1503 ;; These used to be peepholes, but it is more straightforward to do them
1504 ;; as single insns. However, we must force the output to be a register
1505 ;; if it is not an offsettable address so that we know that we can assign
1506 ;; to it twice.
1507
1508 ;; If we had a good way of evaluating the relative costs, these could be
1509 ;; machine-independent.
1510
1511 ;; Optimize extzv ...,z; andl2 ...,z
1512 ;; or ashl ...,z; andl2 ...,z
1513 ;; with other operands constant. This is what the combiner converts the
1514 ;; above sequences to before attempting to recognize the new insn.
1515
1516 (define_insn ""
1517 [(set (match_operand:SI 0 "nonimmediate_operand" "=ro")
1518 (and:SI (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
1519 (match_operand:QI 2 "const_int_operand" "n"))
1520 (match_operand:SI 3 "const_int_operand" "n")))]
1521 "(INTVAL (operands[3]) & ~((1 << (32 - INTVAL (operands[2]))) - 1)) == 0"
1522 "*
1523 {
1524 unsigned long mask1 = INTVAL (operands[3]);
1525 unsigned long mask2 = (1 << (32 - INTVAL (operands[2]))) - 1;
1526
1527 if ((mask1 & mask2) != mask1)
1528 operands[3] = GEN_INT (mask1 & mask2);
1529
1530 return \"rotl %R2,%1,%0\;bicl2 %N3,%0\";
1531 }")
1532
1533 ;; left-shift and mask
1534 ;; The only case where `ashl' is better is if the mask only turns off
1535 ;; bits that the ashl would anyways, in which case it should have been
1536 ;; optimized away.
1537
1538 (define_insn ""
1539 [(set (match_operand:SI 0 "nonimmediate_operand" "=ro")
1540 (and:SI (ashift:SI (match_operand:SI 1 "general_operand" "g")
1541 (match_operand:QI 2 "const_int_operand" "n"))
1542 (match_operand:SI 3 "const_int_operand" "n")))]
1543 ""
1544 "*
1545 {
1546 operands[3]
1547 = GEN_INT (INTVAL (operands[3]) & ~((1 << INTVAL (operands[2])) - 1));
1548 return \"rotl %2,%1,%0\;bicl2 %N3,%0\";
1549 }")
1550
1551 ;; Instruction sequence to sync the VAX instruction stream.
1552 (define_insn "sync_istream"
1553 [(unspec_volatile [(const_int 0)] VUNSPEC_SYNC_ISTREAM)]
1554 ""
1555 "movpsl -(%|sp)\;pushal 1(%|pc)\;rei")