Mercurial > hg > CbC > CbC_gcc
annotate gcc/config/mcore/mcore.md @ 67:f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
author | nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 22 Mar 2011 17:18:12 +0900 |
parents | 77e2b8dfacca |
children | 04ced10e8804 |
rev | line source |
---|---|
0 | 1 ;; Machine description the Motorola MCore |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
2 ;; Copyright (C) 1993, 1999, 2000, 2004, 2005, 2007, 2009, 2010 |
0 | 3 ;; Free Software Foundation, Inc. |
4 ;; Contributed by Motorola. | |
5 | |
6 ;; This file is part of GCC. | |
7 | |
8 ;; GCC is free software; you can redistribute it and/or modify | |
9 ;; it under the terms of the GNU General Public License as published by | |
10 ;; the Free Software Foundation; either version 3, or (at your option) | |
11 ;; any later version. | |
12 | |
13 ;; GCC is distributed in the hope that it will be useful, | |
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 ;; GNU General Public License for more details. | |
17 | |
18 ;; You should have received a copy of the GNU General Public License | |
19 ;; along with GCC; see the file COPYING3. If not see | |
20 ;; <http://www.gnu.org/licenses/>. | |
21 | |
22 ;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. | |
23 | |
24 | |
25 | |
26 ;; ------------------------------------------------------------------------- | |
27 ;; Attributes | |
28 ;; ------------------------------------------------------------------------- | |
29 | |
30 ; Target CPU. | |
31 | |
32 (define_attr "type" "brcond,branch,jmp,load,store,move,alu,shift" | |
33 (const_string "alu")) | |
34 | |
35 ;; If a branch destination is within -2048..2047 bytes away from the | |
36 ;; instruction it can be 2 bytes long. All other conditional branches | |
37 ;; are 10 bytes long, and all other unconditional branches are 8 bytes. | |
38 ;; | |
39 ;; the assembler handles the long-branch span case for us if we use | |
40 ;; the "jb*" mnemonics for jumps/branches. This pushes the span | |
41 ;; calculations and the literal table placement into the assembler, | |
42 ;; where their interactions can be managed in a single place. | |
43 | |
44 ;; All MCORE instructions are two bytes long. | |
45 | |
46 (define_attr "length" "" (const_int 2)) | |
47 | |
48 ;; Scheduling. We only model a simple load latency. | |
49 (define_insn_reservation "any_insn" 1 | |
50 (eq_attr "type" "!load") | |
51 "nothing") | |
52 (define_insn_reservation "memory" 2 | |
53 (eq_attr "type" "load") | |
54 "nothing") | |
55 | |
56 (include "predicates.md") | |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
57 (include "constraints.md") |
0 | 58 |
59 ;; ------------------------------------------------------------------------- | |
60 ;; Test and bit test | |
61 ;; ------------------------------------------------------------------------- | |
62 | |
63 (define_insn "" | |
64 [(set (reg:SI 17) | |
65 (sign_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
66 (const_int 1) | |
67 (match_operand:SI 1 "mcore_literal_K_operand" "K")))] | |
68 "" | |
69 "btsti %0,%1" | |
70 [(set_attr "type" "shift")]) | |
71 | |
72 (define_insn "" | |
73 [(set (reg:SI 17) | |
74 (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
75 (const_int 1) | |
76 (match_operand:SI 1 "mcore_literal_K_operand" "K")))] | |
77 "" | |
78 "btsti %0,%1" | |
79 [(set_attr "type" "shift")]) | |
80 | |
81 ;;; This is created by combine. | |
82 (define_insn "" | |
83 [(set (reg:CC 17) | |
84 (ne:CC (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
85 (const_int 1) | |
86 (match_operand:SI 1 "mcore_literal_K_operand" "K")) | |
87 (const_int 0)))] | |
88 "" | |
89 "btsti %0,%1" | |
90 [(set_attr "type" "shift")]) | |
91 | |
92 | |
93 ;; Created by combine from conditional patterns below (see sextb/btsti rx,31) | |
94 | |
95 (define_insn "" | |
96 [(set (reg:CC 17) | |
97 (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
98 (const_int 7)) | |
99 (const_int 0)))] | |
100 "GET_CODE(operands[0]) == SUBREG && | |
101 GET_MODE(SUBREG_REG(operands[0])) == QImode" | |
102 "btsti %0,7" | |
103 [(set_attr "type" "shift")]) | |
104 | |
105 (define_insn "" | |
106 [(set (reg:CC 17) | |
107 (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
108 (const_int 15)) | |
109 (const_int 0)))] | |
110 "GET_CODE(operands[0]) == SUBREG && | |
111 GET_MODE(SUBREG_REG(operands[0])) == HImode" | |
112 "btsti %0,15" | |
113 [(set_attr "type" "shift")]) | |
114 | |
115 (define_split | |
116 [(set (pc) | |
117 (if_then_else (ne (eq:CC (zero_extract:SI | |
118 (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
119 (const_int 1) | |
120 (match_operand:SI 1 "mcore_literal_K_operand" "")) | |
121 (const_int 0)) | |
122 (const_int 0)) | |
123 (label_ref (match_operand 2 "" "")) | |
124 (pc)))] | |
125 "" | |
126 [(set (reg:CC 17) | |
127 (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))) | |
128 (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) | |
129 (label_ref (match_dup 2)) | |
130 (pc)))] | |
131 "") | |
132 | |
133 (define_split | |
134 [(set (pc) | |
135 (if_then_else (eq (ne:CC (zero_extract:SI | |
136 (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
137 (const_int 1) | |
138 (match_operand:SI 1 "mcore_literal_K_operand" "")) | |
139 (const_int 0)) | |
140 (const_int 0)) | |
141 (label_ref (match_operand 2 "" "")) | |
142 (pc)))] | |
143 "" | |
144 [(set (reg:CC 17) | |
145 (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1))) | |
146 (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) | |
147 (label_ref (match_dup 2)) | |
148 (pc)))] | |
149 "") | |
150 | |
151 ;; XXX - disabled by nickc because it fails on libiberty/fnmatch.c | |
152 ;; | |
153 ;; ; Experimental - relax immediates for and, andn, or, and tst to allow | |
154 ;; ; any immediate value (or an immediate at all -- or, andn, & tst). | |
155 ;; ; This is done to allow bit field masks to fold together in combine. | |
156 ;; ; The reload phase will force the immediate into a register at the | |
157 ;; ; very end. This helps in some cases, but hurts in others: we'd | |
158 ;; ; really like to cse these immediates. However, there is a phase | |
159 ;; ; ordering problem here. cse picks up individual masks and cse's | |
160 ;; ; those, but not folded masks (cse happens before combine). It's | |
161 ;; ; not clear what the best solution is because we really want cse | |
162 ;; ; before combine (leaving the bit field masks alone). To pick up | |
163 ;; ; relaxed immediates use -mrelax-immediates. It might take some | |
164 ;; ; experimenting to see which does better (i.e. regular imms vs. | |
165 ;; ; arbitrary imms) for a particular code. BRC | |
166 ;; | |
167 ;; (define_insn "" | |
168 ;; [(set (reg:CC 17) | |
169 ;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
170 ;; (match_operand:SI 1 "mcore_arith_any_imm_operand" "rI")) | |
171 ;; (const_int 0)))] | |
172 ;; "TARGET_RELAX_IMM" | |
173 ;; "tst %0,%1") | |
174 ;; | |
175 ;; (define_insn "" | |
176 ;; [(set (reg:CC 17) | |
177 ;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
178 ;; (match_operand:SI 1 "mcore_arith_M_operand" "r")) | |
179 ;; (const_int 0)))] | |
180 ;; "!TARGET_RELAX_IMM" | |
181 ;; "tst %0,%1") | |
182 | |
183 (define_insn "" | |
184 [(set (reg:CC 17) | |
185 (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
186 (match_operand:SI 1 "mcore_arith_M_operand" "r")) | |
187 (const_int 0)))] | |
188 "" | |
189 "tst %0,%1") | |
190 | |
191 | |
192 (define_split | |
193 [(parallel[ | |
194 (set (reg:CC 17) | |
195 (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
196 (match_operand:SI 1 "mcore_arith_reg_operand" "")) | |
197 (const_int 0)) | |
198 (const_int 0))) | |
199 (clobber (match_operand:CC 2 "mcore_arith_reg_operand" ""))])] | |
200 "" | |
201 [(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0))) | |
202 (set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))]) | |
203 | |
204 ;; ------------------------------------------------------------------------- | |
205 ;; SImode signed integer comparisons | |
206 ;; ------------------------------------------------------------------------- | |
207 | |
208 (define_insn "decne_t" | |
209 [(set (reg:CC 17) (ne:CC (plus:SI (match_operand:SI 0 "mcore_arith_reg_operand" "+r") | |
210 (const_int -1)) | |
211 (const_int 0))) | |
212 (set (match_dup 0) | |
213 (plus:SI (match_dup 0) | |
214 (const_int -1)))] | |
215 "" | |
216 "decne %0") | |
217 | |
218 ;; The combiner seems to prefer the following to the former. | |
219 ;; | |
220 (define_insn "" | |
221 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "+r") | |
222 (const_int 1))) | |
223 (set (match_dup 0) | |
224 (plus:SI (match_dup 0) | |
225 (const_int -1)))] | |
226 "" | |
227 "decne %0") | |
228 | |
229 (define_insn "cmpnesi_t" | |
230 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
231 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
232 "" | |
233 "cmpne %0,%1") | |
234 | |
235 (define_insn "cmpneisi_t" | |
236 [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
237 (match_operand:SI 1 "mcore_arith_K_operand" "K")))] | |
238 "" | |
239 "cmpnei %0,%1") | |
240 | |
241 (define_insn "cmpgtsi_t" | |
242 [(set (reg:CC 17) (gt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
243 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
244 "" | |
245 "cmplt %1,%0") | |
246 | |
247 (define_insn "" | |
248 [(set (reg:CC 17) (gt:CC (plus:SI | |
249 (match_operand:SI 0 "mcore_arith_reg_operand" "+r") | |
250 (const_int -1)) | |
251 (const_int 0))) | |
252 (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] | |
253 "" | |
254 "decgt %0") | |
255 | |
256 (define_insn "cmpltsi_t" | |
257 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
258 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
259 "" | |
260 "cmplt %0,%1") | |
261 | |
262 ; cmplti is 1-32 | |
263 (define_insn "cmpltisi_t" | |
264 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
265 (match_operand:SI 1 "mcore_arith_J_operand" "J")))] | |
266 "" | |
267 "cmplti %0,%1") | |
268 | |
269 ; covers cmplti x,0 | |
270 (define_insn "" | |
271 [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
272 (const_int 0)))] | |
273 "" | |
274 "btsti %0,31") | |
275 | |
276 (define_insn "" | |
277 [(set (reg:CC 17) (lt:CC (plus:SI | |
278 (match_operand:SI 0 "mcore_arith_reg_operand" "+r") | |
279 (const_int -1)) | |
280 (const_int 0))) | |
281 (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))] | |
282 "" | |
283 "declt %0") | |
284 | |
285 ;; ------------------------------------------------------------------------- | |
286 ;; SImode unsigned integer comparisons | |
287 ;; ------------------------------------------------------------------------- | |
288 | |
289 (define_insn "cmpgeusi_t" | |
290 [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
291 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
292 "" | |
293 "cmphs %0,%1") | |
294 | |
295 (define_insn "cmpgeusi_0" | |
296 [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
297 (const_int 0)))] | |
298 "" | |
299 "cmpnei %0, 0") | |
300 | |
301 (define_insn "cmpleusi_t" | |
302 [(set (reg:CC 17) (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
303 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
304 "" | |
305 "cmphs %1,%0") | |
306 | |
307 ;; ------------------------------------------------------------------------- | |
308 ;; Logical operations | |
309 ;; ------------------------------------------------------------------------- | |
310 | |
311 ;; Logical AND clearing a single bit. andsi3 knows that we have this | |
312 ;; pattern and allows the constant literal pass through. | |
313 ;; | |
314 | |
315 ;; RBE 2/97: don't need this pattern any longer... | |
316 ;; RBE: I don't think we need both "S" and exact_log2() clauses. | |
317 ;;(define_insn "" | |
318 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
319 ;; (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") | |
320 ;; (match_operand:SI 2 "const_int_operand" "S")))] | |
321 ;; "mcore_arith_S_operand (operands[2])" | |
322 ;; "bclri %0,%Q2") | |
323 ;; | |
324 | |
325 (define_insn "andnsi3" | |
326 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
327 (and:SI (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")) | |
328 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] | |
329 "" | |
330 "andn %0,%1") | |
331 | |
332 (define_expand "andsi3" | |
333 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
334 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
335 (match_operand:SI 2 "nonmemory_operand" "")))] | |
336 "" | |
337 " | |
338 { | |
339 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0 | |
340 && ! mcore_arith_S_operand (operands[2])) | |
341 { | |
342 HOST_WIDE_INT not_value = ~ INTVAL (operands[2]); | |
343 | |
344 if ( CONST_OK_FOR_I (not_value) | |
345 || CONST_OK_FOR_M (not_value) | |
346 || CONST_OK_FOR_N (not_value)) | |
347 { | |
348 operands[2] = copy_to_mode_reg (SImode, GEN_INT (not_value)); | |
349 emit_insn (gen_andnsi3 (operands[0], operands[2], operands[1])); | |
350 DONE; | |
351 } | |
352 } | |
353 | |
354 if (! mcore_arith_K_S_operand (operands[2], SImode)) | |
355 operands[2] = copy_to_mode_reg (SImode, operands[2]); | |
356 }") | |
357 | |
358 (define_insn "" | |
359 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
360 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0") | |
361 (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,K,0,S")))] | |
362 "TARGET_RELAX_IMM" | |
363 "* | |
364 { | |
365 switch (which_alternative) | |
366 { | |
367 case 0: return \"and %0,%2\"; | |
368 case 1: return \"andi %0,%2\"; | |
369 case 2: return \"and %0,%1\"; | |
370 /* case -1: return \"bclri %0,%Q2\"; will not happen */ | |
371 case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2])); | |
372 default: gcc_unreachable (); | |
373 } | |
374 }") | |
375 | |
376 ;; This was the old "S" which was "!(2^n)" */ | |
377 ;; case -1: return \"bclri %0,%Q2\"; will not happen */ | |
378 | |
379 (define_insn "" | |
380 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
381 (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0") | |
382 (match_operand:SI 2 "mcore_arith_K_S_operand" "r,K,0,S")))] | |
383 "!TARGET_RELAX_IMM" | |
384 "* | |
385 { | |
386 switch (which_alternative) | |
387 { | |
388 case 0: return \"and %0,%2\"; | |
389 case 1: return \"andi %0,%2\"; | |
390 case 2: return \"and %0,%1\"; | |
391 case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2])); | |
392 default: gcc_unreachable (); | |
393 } | |
394 }") | |
395 | |
396 ;(define_insn "iorsi3" | |
397 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
398 ; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") | |
399 ; (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] | |
400 ; "" | |
401 ; "or %0,%2") | |
402 | |
403 ; need an expand to resolve ambiguity betw. the two iors below. | |
404 (define_expand "iorsi3" | |
405 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
406 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
407 (match_operand:SI 2 "nonmemory_operand" "")))] | |
408 "" | |
409 " | |
410 { | |
411 if (! mcore_arith_M_operand (operands[2], SImode)) | |
412 operands[2] = copy_to_mode_reg (SImode, operands[2]); | |
413 }") | |
414 | |
415 (define_insn "" | |
416 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") | |
417 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") | |
418 (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,M,T")))] | |
419 "TARGET_RELAX_IMM" | |
420 "* | |
421 { | |
422 switch (which_alternative) | |
423 { | |
424 case 0: return \"or %0,%2\"; | |
425 case 1: return \"bseti %0,%P2\"; | |
426 case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2])); | |
427 default: gcc_unreachable (); | |
428 } | |
429 }") | |
430 | |
431 (define_insn "" | |
432 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") | |
433 (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") | |
434 (match_operand:SI 2 "mcore_arith_M_operand" "r,M,T")))] | |
435 "!TARGET_RELAX_IMM" | |
436 "* | |
437 { | |
438 switch (which_alternative) | |
439 { | |
440 case 0: return \"or %0,%2\"; | |
441 case 1: return \"bseti %0,%P2\"; | |
442 case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2])); | |
443 default: gcc_unreachable (); | |
444 } | |
445 }") | |
446 | |
447 ;(define_insn "" | |
448 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
449 ; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
450 ; (match_operand:SI 2 "const_int_operand" "M")))] | |
451 ; "exact_log2 (INTVAL (operands[2])) >= 0" | |
452 ; "bseti %0,%P2") | |
453 | |
454 ;(define_insn "" | |
455 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
456 ; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
457 ; (match_operand:SI 2 "const_int_operand" "i")))] | |
458 ; "mcore_num_ones (INTVAL (operands[2])) < 3" | |
459 ; "* return mcore_output_bseti (operands[0], INTVAL (operands[2]));") | |
460 | |
461 (define_insn "xorsi3" | |
462 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
463 (xor:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") | |
464 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] | |
465 "" | |
466 "xor %0,%2") | |
467 | |
468 ; these patterns give better code then gcc invents if | |
469 ; left to its own devices | |
470 | |
471 (define_insn "anddi3" | |
472 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") | |
473 (and:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") | |
474 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))] | |
475 "" | |
476 "and %0,%2\;and %R0,%R2" | |
477 [(set_attr "length" "4")]) | |
478 | |
479 (define_insn "iordi3" | |
480 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") | |
481 (ior:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") | |
482 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))] | |
483 "" | |
484 "or %0,%2\;or %R0,%R2" | |
485 [(set_attr "length" "4")]) | |
486 | |
487 (define_insn "xordi3" | |
488 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") | |
489 (xor:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") | |
490 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))] | |
491 "" | |
492 "xor %0,%2\;xor %R0,%R2" | |
493 [(set_attr "length" "4")]) | |
494 | |
495 ;; ------------------------------------------------------------------------- | |
496 ;; Shifts and rotates | |
497 ;; ------------------------------------------------------------------------- | |
498 | |
499 ;; Only allow these if the shift count is a convenient constant. | |
500 (define_expand "rotlsi3" | |
501 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
502 (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
503 (match_operand:SI 2 "nonmemory_operand" "")))] | |
504 "" | |
505 "if (! mcore_literal_K_operand (operands[2], SImode)) | |
506 FAIL; | |
507 ") | |
508 | |
509 ;; We can only do constant rotates, which is what this pattern provides. | |
510 ;; The combiner will put it together for us when we do: | |
511 ;; (x << N) | (x >> (32 - N)) | |
512 (define_insn "" | |
513 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
514 (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
515 (match_operand:SI 2 "mcore_literal_K_operand" "K")))] | |
516 "" | |
517 "rotli %0,%2" | |
518 [(set_attr "type" "shift")]) | |
519 | |
520 (define_insn "ashlsi3" | |
521 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") | |
522 (ashift:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") | |
523 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] | |
524 "" | |
525 "@ | |
526 lsl %0,%2 | |
527 lsli %0,%2" | |
528 [(set_attr "type" "shift")]) | |
529 | |
530 (define_insn "" | |
531 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
532 (ashift:SI (const_int 1) | |
533 (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
534 "" | |
535 "bgenr %0,%1" | |
536 [(set_attr "type" "shift")]) | |
537 | |
538 (define_insn "ashrsi3" | |
539 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") | |
540 (ashiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") | |
541 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] | |
542 "" | |
543 "@ | |
544 asr %0,%2 | |
545 asri %0,%2" | |
546 [(set_attr "type" "shift")]) | |
547 | |
548 (define_insn "lshrsi3" | |
549 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") | |
550 (lshiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0") | |
551 (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))] | |
552 "" | |
553 "@ | |
554 lsr %0,%2 | |
555 lsri %0,%2" | |
556 [(set_attr "type" "shift")]) | |
557 | |
558 ;(define_expand "ashldi3" | |
559 ; [(parallel[(set (match_operand:DI 0 "mcore_arith_reg_operand" "") | |
560 ; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "") | |
561 ; (match_operand:DI 2 "immediate_operand" ""))) | |
562 ; | |
563 ; (clobber (reg:CC 17))])] | |
564 ; | |
565 ; "" | |
566 ; " | |
567 ;{ | |
568 ; if (GET_CODE (operands[2]) != CONST_INT | |
569 ; || INTVAL (operands[2]) != 1) | |
570 ; FAIL; | |
571 ;}") | |
572 ; | |
573 ;(define_insn "" | |
574 ; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") | |
575 ; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") | |
576 ; (const_int 1))) | |
577 ; (clobber (reg:CC 17))] | |
578 ; "" | |
579 ; "lsli %R0,0\;rotli %0,0" | |
580 ; [(set_attr "length" "4") (set_attr "type" "shift")]) | |
581 | |
582 ;; ------------------------------------------------------------------------- | |
583 ;; Index instructions | |
584 ;; ------------------------------------------------------------------------- | |
585 ;; The second of each set of patterns is borrowed from the alpha.md file. | |
586 ;; These variants of the above insns can occur if the second operand | |
587 ;; is the frame pointer. This is a kludge, but there doesn't | |
588 ;; seem to be a way around it. Only recognize them while reloading. | |
589 | |
590 ;; We must use reload_operand for some operands in case frame pointer | |
591 ;; elimination put a MEM with invalid address there. Otherwise, | |
592 ;; the result of the substitution will not match this pattern, and reload | |
593 ;; will not be able to correctly fix the result. | |
594 | |
595 ;; indexing longlongs or doubles (8 bytes) | |
596 | |
597 (define_insn "indexdi_t" | |
598 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
599 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
600 (const_int 8)) | |
601 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] | |
602 "" | |
603 "* | |
604 if (! mcore_is_same_reg (operands[1], operands[2])) | |
605 { | |
606 output_asm_insn (\"ixw\\t%0,%1\", operands); | |
607 output_asm_insn (\"ixw\\t%0,%1\", operands); | |
608 } | |
609 else | |
610 { | |
611 output_asm_insn (\"ixh\\t%0,%1\", operands); | |
612 output_asm_insn (\"ixh\\t%0,%1\", operands); | |
613 } | |
614 return \"\"; | |
615 " | |
616 ;; if operands[1] == operands[2], the first option above is wrong! -- dac | |
617 ;; was this... -- dac | |
618 ;; ixw %0,%1\;ixw %0,%1" | |
619 | |
620 [(set_attr "length" "4")]) | |
621 | |
622 (define_insn "" | |
623 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") | |
624 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") | |
625 (const_int 8)) | |
626 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) | |
627 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] | |
628 "reload_in_progress" | |
629 "@ | |
630 ixw %0,%1\;ixw %0,%1\;addu %0,%3 | |
631 ixw %0,%1\;ixw %0,%1\;addi %0,%3 | |
632 ixw %0,%1\;ixw %0,%1\;subi %0,%M3" | |
633 [(set_attr "length" "6")]) | |
634 | |
635 ;; indexing longs (4 bytes) | |
636 | |
637 (define_insn "indexsi_t" | |
638 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
639 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
640 (const_int 4)) | |
641 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] | |
642 "" | |
643 "ixw %0,%1") | |
644 | |
645 (define_insn "" | |
646 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") | |
647 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") | |
648 (const_int 4)) | |
649 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) | |
650 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] | |
651 "reload_in_progress" | |
652 "@ | |
653 ixw %0,%1\;addu %0,%3 | |
654 ixw %0,%1\;addi %0,%3 | |
655 ixw %0,%1\;subi %0,%M3" | |
656 [(set_attr "length" "4")]) | |
657 | |
658 ;; indexing shorts (2 bytes) | |
659 | |
660 (define_insn "indexhi_t" | |
661 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
662 (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
663 (const_int 2)) | |
664 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] | |
665 "" | |
666 "ixh %0,%1") | |
667 | |
668 (define_insn "" | |
669 [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r") | |
670 (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r") | |
671 (const_int 2)) | |
672 (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0")) | |
673 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))] | |
674 "reload_in_progress" | |
675 "@ | |
676 ixh %0,%1\;addu %0,%3 | |
677 ixh %0,%1\;addi %0,%3 | |
678 ixh %0,%1\;subi %0,%M3" | |
679 [(set_attr "length" "4")]) | |
680 | |
681 ;; | |
682 ;; Other sizes may be handy for indexing. | |
683 ;; the tradeoffs to consider when adding these are | |
684 ;; code size, execution time [vs. mul it is easy to win], | |
685 ;; and register pressure -- these patterns don't use an extra | |
686 ;; register to build the offset from the base | |
687 ;; and whether the compiler will not come up with some other idiom. | |
688 ;; | |
689 | |
690 ;; ------------------------------------------------------------------------- | |
691 ;; Addition, Subtraction instructions | |
692 ;; ------------------------------------------------------------------------- | |
693 | |
694 (define_expand "addsi3" | |
695 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
696 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
697 (match_operand:SI 2 "nonmemory_operand" "")))] | |
698 "" | |
699 " | |
700 { | |
701 /* If this is an add to the frame pointer, then accept it as is so | |
702 that we can later fold in the fp/sp offset from frame pointer | |
703 elimination. */ | |
704 if (flag_omit_frame_pointer | |
705 && GET_CODE (operands[1]) == REG | |
706 && (REGNO (operands[1]) == VIRTUAL_STACK_VARS_REGNUM | |
707 || REGNO (operands[1]) == FRAME_POINTER_REGNUM)) | |
708 { | |
709 emit_insn (gen_addsi3_fp (operands[0], operands[1], operands[2])); | |
710 DONE; | |
711 } | |
712 | |
713 /* Convert adds to subtracts if this makes loading the constant cheaper. | |
714 But only if we are allowed to generate new pseudos. */ | |
715 if (! (reload_in_progress || reload_completed) | |
716 && GET_CODE (operands[2]) == CONST_INT | |
717 && INTVAL (operands[2]) < -32) | |
718 { | |
719 HOST_WIDE_INT neg_value = - INTVAL (operands[2]); | |
720 | |
721 if ( CONST_OK_FOR_I (neg_value) | |
722 || CONST_OK_FOR_M (neg_value) | |
723 || CONST_OK_FOR_N (neg_value)) | |
724 { | |
725 operands[2] = copy_to_mode_reg (SImode, GEN_INT (neg_value)); | |
726 emit_insn (gen_subsi3 (operands[0], operands[1], operands[2])); | |
727 DONE; | |
728 } | |
729 } | |
730 | |
731 if (! mcore_addsub_operand (operands[2], SImode)) | |
732 operands[2] = copy_to_mode_reg (SImode, operands[2]); | |
733 }") | |
734 | |
735 ;; RBE: for some constants which are not in the range which allows | |
736 ;; us to do a single operation, we will try a paired addi/addi instead | |
737 ;; of a movi/addi. This relieves some register pressure at the expense | |
738 ;; of giving away some potential constant reuse. | |
739 ;; | |
740 ;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern | |
741 ;; for later reference | |
742 ;; | |
743 ;; (define_insn "addsi3_i2" | |
744 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
745 ;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") | |
746 ;; (match_operand:SI 2 "const_int_operand" "g")))] | |
747 ;; "GET_CODE(operands[2]) == CONST_INT | |
748 ;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64) | |
749 ;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))" | |
750 ;; "* | |
751 ;; { | |
752 ;; HOST_WIDE_INT n = INTVAL(operands[2]); | |
753 ;; if (n > 0) | |
754 ;; { | |
755 ;; operands[2] = GEN_INT(n - 32); | |
756 ;; return \"addi\\t%0,32\;addi\\t%0,%2\"; | |
757 ;; } | |
758 ;; else | |
759 ;; { | |
760 ;; n = (-n); | |
761 ;; operands[2] = GEN_INT(n - 32); | |
762 ;; return \"subi\\t%0,32\;subi\\t%0,%2\"; | |
763 ;; } | |
764 ;; }" | |
765 ;; [(set_attr "length" "4")]) | |
766 | |
767 (define_insn "addsi3_i" | |
768 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") | |
769 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") | |
770 (match_operand:SI 2 "mcore_addsub_operand" "r,J,L")))] | |
771 "" | |
772 "@ | |
773 addu %0,%2 | |
774 addi %0,%2 | |
775 subi %0,%M2") | |
776 | |
777 ;; This exists so that address computations based on the frame pointer | |
778 ;; can be folded in when frame pointer elimination occurs. Ordinarily | |
779 ;; this would be bad because it allows insns which would require reloading, | |
780 ;; but without it, we get multiple adds where one would do. | |
781 | |
782 (define_insn "addsi3_fp" | |
783 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") | |
784 (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0") | |
785 (match_operand:SI 2 "immediate_operand" "r,J,L")))] | |
786 "flag_omit_frame_pointer | |
787 && (reload_in_progress || reload_completed || REGNO (operands[1]) == FRAME_POINTER_REGNUM)" | |
788 "@ | |
789 addu %0,%2 | |
790 addi %0,%2 | |
791 subi %0,%M2") | |
792 | |
793 ;; RBE: for some constants which are not in the range which allows | |
794 ;; us to do a single operation, we will try a paired addi/addi instead | |
795 ;; of a movi/addi. This relieves some register pressure at the expense | |
796 ;; of giving away some potential constant reuse. | |
797 ;; | |
798 ;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern | |
799 ;; for later reference | |
800 ;; | |
801 ;; (define_insn "subsi3_i2" | |
802 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
803 ;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") | |
804 ;; (match_operand:SI 2 "const_int_operand" "g")))] | |
805 ;; "TARGET_RBETEST && GET_CODE(operands[2]) == CONST_INT | |
806 ;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64) | |
807 ;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))" | |
808 ;; "* | |
809 ;; { | |
810 ;; HOST_WIDE_INT n = INTVAL(operands[2]); | |
811 ;; if ( n > 0) | |
812 ;; { | |
813 ;; operands[2] = GEN_INT( n - 32); | |
814 ;; return \"subi\\t%0,32\;subi\\t%0,%2\"; | |
815 ;; } | |
816 ;; else | |
817 ;; { | |
818 ;; n = (-n); | |
819 ;; operands[2] = GEN_INT(n - 32); | |
820 ;; return \"addi\\t%0,32\;addi\\t%0,%2\"; | |
821 ;; } | |
822 ;; }" | |
823 ;; [(set_attr "length" "4")]) | |
824 | |
825 ;(define_insn "subsi3" | |
826 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
827 ; (minus:SI (match_operand:SI 1 "mcore_arith_K_operand" "0,0,r,K") | |
828 ; (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0,0")))] | |
829 ; "" | |
830 ; "@ | |
831 ; sub %0,%2 | |
832 ; subi %0,%2 | |
833 ; rsub %0,%1 | |
834 ; rsubi %0,%1") | |
835 | |
836 (define_insn "subsi3" | |
837 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r") | |
838 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r") | |
839 (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0")))] | |
840 "" | |
841 "@ | |
842 subu %0,%2 | |
843 subi %0,%2 | |
844 rsub %0,%1") | |
845 | |
846 (define_insn "" | |
847 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
848 (minus:SI (match_operand:SI 1 "mcore_literal_K_operand" "K") | |
849 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))] | |
850 "" | |
851 "rsubi %0,%1") | |
852 | |
853 (define_insn "adddi3" | |
854 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
855 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") | |
856 (match_operand:DI 2 "mcore_arith_reg_operand" "r"))) | |
857 (clobber (reg:CC 17))] | |
858 "" | |
859 "* | |
860 { | |
861 if (TARGET_LITTLE_END) | |
862 return \"cmplt %0,%0\;addc %0,%2\;addc %R0,%R2\"; | |
863 return \"cmplt %R0,%R0\;addc %R0,%R2\;addc %0,%2\"; | |
864 }" | |
865 [(set_attr "length" "6")]) | |
866 | |
867 ;; special case for "longlong += 1" | |
868 (define_insn "" | |
869 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
870 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") | |
871 (const_int 1))) | |
872 (clobber (reg:CC 17))] | |
873 "" | |
874 "* | |
875 { | |
876 if (TARGET_LITTLE_END) | |
877 return \"addi %0,1\;cmpnei %0,0\;incf %R0\"; | |
878 return \"addi %R0,1\;cmpnei %R0,0\;incf %0\"; | |
879 }" | |
880 [(set_attr "length" "6")]) | |
881 | |
882 ;; special case for "longlong -= 1" | |
883 (define_insn "" | |
884 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
885 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") | |
886 (const_int -1))) | |
887 (clobber (reg:CC 17))] | |
888 "" | |
889 "* | |
890 { | |
891 if (TARGET_LITTLE_END) | |
892 return \"cmpnei %0,0\;decf %R0\;subi %0,1\"; | |
893 return \"cmpnei %R0,0\;decf %0\;subi %R0,1\"; | |
894 }" | |
895 [(set_attr "length" "6")]) | |
896 | |
897 ;; special case for "longlong += const_int" | |
898 ;; we have to use a register for the const_int because we don't | |
899 ;; have an unsigned compare immediate... only +/- 1 get to | |
900 ;; play the no-extra register game because they compare with 0. | |
901 ;; This winds up working out for any literal that is synthesized | |
902 ;; with a single instruction. The more complicated ones look | |
903 ;; like the get broken into subreg's to get initialized too soon | |
904 ;; for us to catch here. -- RBE 4/25/96 | |
905 ;; only allow for-sure positive values. | |
906 | |
907 (define_insn "" | |
908 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
909 (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") | |
910 (match_operand:SI 2 "const_int_operand" "r"))) | |
911 (clobber (reg:CC 17))] | |
912 "GET_CODE (operands[2]) == CONST_INT | |
913 && INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)" | |
914 "* | |
915 { | |
916 gcc_assert (GET_MODE (operands[2]) == SImode); | |
917 if (TARGET_LITTLE_END) | |
918 return \"addu %0,%2\;cmphs %0,%2\;incf %R0\"; | |
919 return \"addu %R0,%2\;cmphs %R0,%2\;incf %0\"; | |
920 }" | |
921 [(set_attr "length" "6")]) | |
922 | |
923 ;; optimize "long long" + "unsigned long" | |
924 ;; won't trigger because of how the extension is expanded upstream. | |
925 ;; (define_insn "" | |
926 ;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
927 ;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") | |
928 ;; (zero_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r")))) | |
929 ;; (clobber (reg:CC 17))] | |
930 ;; "0" | |
931 ;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0" | |
932 ;; [(set_attr "length" "6")]) | |
933 | |
934 ;; optimize "long long" + "signed long" | |
935 ;; won't trigger because of how the extension is expanded upstream. | |
936 ;; (define_insn "" | |
937 ;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
938 ;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0") | |
939 ;; (sign_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r")))) | |
940 ;; (clobber (reg:CC 17))] | |
941 ;; "0" | |
942 ;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0\;btsti %2,31\;dect %0" | |
943 ;; [(set_attr "length" "6")]) | |
944 | |
945 (define_insn "subdi3" | |
946 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
947 (minus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0") | |
948 (match_operand:DI 2 "mcore_arith_reg_operand" "r"))) | |
949 (clobber (reg:CC 17))] | |
950 "" | |
951 "* | |
952 { | |
953 if (TARGET_LITTLE_END) | |
954 return \"cmphs %0,%0\;subc %0,%2\;subc %R0,%R2\"; | |
955 return \"cmphs %R0,%R0\;subc %R0,%R2\;subc %0,%2\"; | |
956 }" | |
957 [(set_attr "length" "6")]) | |
958 | |
959 ;; ------------------------------------------------------------------------- | |
960 ;; Multiplication instructions | |
961 ;; ------------------------------------------------------------------------- | |
962 | |
963 (define_insn "mulsi3" | |
964 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
965 (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0") | |
966 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] | |
967 "" | |
968 "mult %0,%2") | |
969 | |
970 ;; | |
971 ;; 32/32 signed division -- added to the MCORE instruction set spring 1997 | |
972 ;; | |
973 ;; Different constraints based on the architecture revision... | |
974 ;; | |
975 (define_expand "divsi3" | |
976 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
977 (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
978 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] | |
979 "TARGET_DIV" | |
980 "") | |
981 | |
982 ;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97) | |
983 ;; | |
984 (define_insn "" | |
985 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
986 (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
987 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))] | |
988 "TARGET_DIV" | |
989 "divs %0,%2") | |
990 | |
991 ;; | |
992 ;; 32/32 signed division -- added to the MCORE instruction set spring 1997 | |
993 ;; | |
994 ;; Different constraints based on the architecture revision... | |
995 ;; | |
996 (define_expand "udivsi3" | |
997 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
998 (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
999 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] | |
1000 "TARGET_DIV" | |
1001 "") | |
1002 | |
1003 ;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97) | |
1004 (define_insn "" | |
1005 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1006 (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
1007 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))] | |
1008 "TARGET_DIV" | |
1009 "divu %0,%2") | |
1010 | |
1011 ;; ------------------------------------------------------------------------- | |
1012 ;; Unary arithmetic | |
1013 ;; ------------------------------------------------------------------------- | |
1014 | |
1015 (define_insn "negsi2" | |
1016 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1017 (neg:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] | |
1018 "" | |
1019 "* | |
1020 { | |
1021 return \"rsubi %0,0\"; | |
1022 }") | |
1023 | |
1024 | |
1025 (define_insn "abssi2" | |
1026 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1027 (abs:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] | |
1028 "" | |
1029 "abs %0") | |
1030 | |
1031 (define_insn "negdi2" | |
1032 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r") | |
1033 (neg:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0"))) | |
1034 (clobber (reg:CC 17))] | |
1035 "" | |
1036 "* | |
1037 { | |
1038 if (TARGET_LITTLE_END) | |
1039 return \"cmpnei %0,0\\n\\trsubi %0,0\\n\\tnot %R0\\n\\tincf %R0\"; | |
1040 return \"cmpnei %R0,0\\n\\trsubi %R0,0\\n\\tnot %0\\n\\tincf %0\"; | |
1041 }" | |
1042 [(set_attr "length" "8")]) | |
1043 | |
1044 (define_insn "one_cmplsi2" | |
1045 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1046 (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] | |
1047 "" | |
1048 "not %0") | |
1049 | |
1050 ;; ------------------------------------------------------------------------- | |
1051 ;; Zero extension instructions | |
1052 ;; ------------------------------------------------------------------------- | |
1053 | |
1054 (define_expand "zero_extendhisi2" | |
1055 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1056 (zero_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "")))] | |
1057 "" | |
1058 "") | |
1059 | |
1060 (define_insn "" | |
1061 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r") | |
1062 (zero_extend:SI (match_operand:HI 1 "general_operand" "0,m")))] | |
1063 "" | |
1064 "@ | |
1065 zexth %0 | |
1066 ld.h %0,%1" | |
1067 [(set_attr "type" "shift,load")]) | |
1068 | |
1069 ;; ldh gives us a free zero-extension. The combiner picks up on this. | |
1070 (define_insn "" | |
1071 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1072 (zero_extend:SI (mem:HI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] | |
1073 "" | |
1074 "ld.h %0,(%1)" | |
1075 [(set_attr "type" "load")]) | |
1076 | |
1077 (define_insn "" | |
1078 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1079 (zero_extend:SI (mem:HI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
1080 (match_operand:SI 2 "const_int_operand" "")))))] | |
1081 "(INTVAL (operands[2]) >= 0) && | |
1082 (INTVAL (operands[2]) < 32) && | |
1083 ((INTVAL (operands[2])&1) == 0)" | |
1084 "ld.h %0,(%1,%2)" | |
1085 [(set_attr "type" "load")]) | |
1086 | |
1087 (define_expand "zero_extendqisi2" | |
1088 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1089 (zero_extend:SI (match_operand:QI 1 "general_operand" "")))] | |
1090 "" | |
1091 "") | |
1092 | |
1093 ;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register. | |
1094 (define_insn "" | |
1095 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b,r") | |
1096 (zero_extend:SI (match_operand:QI 1 "general_operand" "0,r,m")))] | |
1097 "" | |
1098 "@ | |
1099 zextb %0 | |
1100 xtrb3 %0,%1 | |
1101 ld.b %0,%1" | |
1102 [(set_attr "type" "shift,shift,load")]) | |
1103 | |
1104 ;; ldb gives us a free zero-extension. The combiner picks up on this. | |
1105 (define_insn "" | |
1106 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1107 (zero_extend:SI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] | |
1108 "" | |
1109 "ld.b %0,(%1)" | |
1110 [(set_attr "type" "load")]) | |
1111 | |
1112 (define_insn "" | |
1113 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1114 (zero_extend:SI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
1115 (match_operand:SI 2 "const_int_operand" "")))))] | |
1116 "(INTVAL (operands[2]) >= 0) && | |
1117 (INTVAL (operands[2]) < 16)" | |
1118 "ld.b %0,(%1,%2)" | |
1119 [(set_attr "type" "load")]) | |
1120 | |
1121 (define_expand "zero_extendqihi2" | |
1122 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "") | |
1123 (zero_extend:HI (match_operand:QI 1 "general_operand" "")))] | |
1124 "" | |
1125 "") | |
1126 | |
1127 ;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register. | |
1128 (define_insn "" | |
1129 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r,b,r") | |
1130 (zero_extend:HI (match_operand:QI 1 "general_operand" "0,r,m")))] | |
1131 "" | |
1132 "@ | |
1133 zextb %0 | |
1134 xtrb3 %0,%1 | |
1135 ld.b %0,%1" | |
1136 [(set_attr "type" "shift,shift,load")]) | |
1137 | |
1138 ;; ldb gives us a free zero-extension. The combiner picks up on this. | |
1139 ;; this doesn't catch references that are into a structure. | |
1140 ;; note that normally the compiler uses the above insn, unless it turns | |
1141 ;; out that we're dealing with a volatile... | |
1142 (define_insn "" | |
1143 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") | |
1144 (zero_extend:HI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))] | |
1145 "" | |
1146 "ld.b %0,(%1)" | |
1147 [(set_attr "type" "load")]) | |
1148 | |
1149 (define_insn "" | |
1150 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") | |
1151 (zero_extend:HI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
1152 (match_operand:SI 2 "const_int_operand" "")))))] | |
1153 "(INTVAL (operands[2]) >= 0) && | |
1154 (INTVAL (operands[2]) < 16)" | |
1155 "ld.b %0,(%1,%2)" | |
1156 [(set_attr "type" "load")]) | |
1157 | |
1158 | |
1159 ;; ------------------------------------------------------------------------- | |
1160 ;; Sign extension instructions | |
1161 ;; ------------------------------------------------------------------------- | |
1162 | |
1163 (define_expand "extendsidi2" | |
1164 [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r") | |
1165 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))] | |
1166 "" | |
1167 " | |
1168 { | |
1169 int low, high; | |
1170 | |
1171 if (TARGET_LITTLE_END) | |
1172 low = 0, high = 4; | |
1173 else | |
1174 low = 4, high = 0; | |
1175 | |
1176 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], low), | |
1177 operands[1])); | |
1178 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], high), | |
1179 gen_rtx_ASHIFTRT (SImode, | |
1180 gen_rtx_SUBREG (SImode, operands[0], low), | |
1181 GEN_INT (31)))); | |
1182 DONE; | |
1183 }" | |
1184 ) | |
1185 | |
1186 (define_insn "extendhisi2" | |
1187 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1188 (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))] | |
1189 "" | |
1190 "sexth %0") | |
1191 | |
1192 (define_insn "extendqisi2" | |
1193 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1194 (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))] | |
1195 "" | |
1196 "sextb %0") | |
1197 | |
1198 (define_insn "extendqihi2" | |
1199 [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") | |
1200 (sign_extend:HI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))] | |
1201 "" | |
1202 "sextb %0") | |
1203 | |
1204 ;; ------------------------------------------------------------------------- | |
1205 ;; Move instructions | |
1206 ;; ------------------------------------------------------------------------- | |
1207 | |
1208 ;; SImode | |
1209 | |
1210 (define_expand "movsi" | |
1211 [(set (match_operand:SI 0 "general_operand" "") | |
1212 (match_operand:SI 1 "general_operand" ""))] | |
1213 "" | |
1214 " | |
1215 { | |
1216 if (GET_CODE (operands[0]) == MEM) | |
1217 operands[1] = force_reg (SImode, operands[1]); | |
1218 }") | |
1219 | |
1220 (define_insn "" | |
1221 [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,a,r,a,r,m") | |
1222 (match_operand:SI 1 "mcore_general_movsrc_operand" "r,P,i,c,R,m,r"))] | |
1223 "(register_operand (operands[0], SImode) | |
1224 || register_operand (operands[1], SImode))" | |
1225 "* return mcore_output_move (insn, operands, SImode);" | |
1226 [(set_attr "type" "move,move,move,move,load,load,store")]) | |
1227 | |
1228 ;; | |
1229 ;; HImode | |
1230 ;; | |
1231 | |
1232 (define_expand "movhi" | |
1233 [(set (match_operand:HI 0 "general_operand" "") | |
1234 (match_operand:HI 1 "general_operand" ""))] | |
1235 "" | |
1236 " | |
1237 { | |
1238 if (GET_CODE (operands[0]) == MEM) | |
1239 operands[1] = force_reg (HImode, operands[1]); | |
1240 else if (CONSTANT_P (operands[1]) | |
1241 && (GET_CODE (operands[1]) != CONST_INT | |
1242 || (! CONST_OK_FOR_I (INTVAL (operands[1])) | |
1243 && ! CONST_OK_FOR_M (INTVAL (operands[1])) | |
1244 && ! CONST_OK_FOR_N (INTVAL (operands[1])))) | |
1245 && ! reload_completed && ! reload_in_progress) | |
1246 { | |
1247 rtx reg = gen_reg_rtx (SImode); | |
1248 emit_insn (gen_movsi (reg, operands[1])); | |
1249 operands[1] = gen_lowpart (HImode, reg); | |
1250 } | |
1251 }") | |
1252 | |
1253 (define_insn "" | |
1254 [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m") | |
1255 (match_operand:HI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))] | |
1256 "(register_operand (operands[0], HImode) | |
1257 || register_operand (operands[1], HImode))" | |
1258 "* return mcore_output_move (insn, operands, HImode);" | |
1259 [(set_attr "type" "move,move,move,move,load,store")]) | |
1260 | |
1261 ;; | |
1262 ;; QImode | |
1263 ;; | |
1264 | |
1265 (define_expand "movqi" | |
1266 [(set (match_operand:QI 0 "general_operand" "") | |
1267 (match_operand:QI 1 "general_operand" ""))] | |
1268 "" | |
1269 " | |
1270 { | |
1271 if (GET_CODE (operands[0]) == MEM) | |
1272 operands[1] = force_reg (QImode, operands[1]); | |
1273 else if (CONSTANT_P (operands[1]) | |
1274 && (GET_CODE (operands[1]) != CONST_INT | |
1275 || (! CONST_OK_FOR_I (INTVAL (operands[1])) | |
1276 && ! CONST_OK_FOR_M (INTVAL (operands[1])) | |
1277 && ! CONST_OK_FOR_N (INTVAL (operands[1])))) | |
1278 && ! reload_completed && ! reload_in_progress) | |
1279 { | |
1280 rtx reg = gen_reg_rtx (SImode); | |
1281 emit_insn (gen_movsi (reg, operands[1])); | |
1282 operands[1] = gen_lowpart (QImode, reg); | |
1283 } | |
1284 }") | |
1285 | |
1286 (define_insn "" | |
1287 [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m") | |
1288 (match_operand:QI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))] | |
1289 "(register_operand (operands[0], QImode) | |
1290 || register_operand (operands[1], QImode))" | |
1291 "* return mcore_output_move (insn, operands, QImode);" | |
1292 [(set_attr "type" "move,move,move,move,load,store")]) | |
1293 | |
1294 | |
1295 ;; DImode | |
1296 | |
1297 (define_expand "movdi" | |
1298 [(set (match_operand:DI 0 "general_operand" "") | |
1299 (match_operand:DI 1 "general_operand" ""))] | |
1300 "" | |
1301 " | |
1302 { | |
1303 if (GET_CODE (operands[0]) == MEM) | |
1304 operands[1] = force_reg (DImode, operands[1]); | |
1305 else if (GET_CODE (operands[1]) == CONST_INT | |
1306 && ! CONST_OK_FOR_I (INTVAL (operands[1])) | |
1307 && ! CONST_OK_FOR_M (INTVAL (operands[1])) | |
1308 && ! CONST_OK_FOR_N (INTVAL (operands[1]))) | |
1309 { | |
1310 int i; | |
1311 for (i = 0; i < UNITS_PER_WORD * 2; i += UNITS_PER_WORD) | |
1312 emit_move_insn (simplify_gen_subreg (SImode, operands[0], DImode, i), | |
1313 simplify_gen_subreg (SImode, operands[1], DImode, i)); | |
1314 DONE; | |
1315 } | |
1316 }") | |
1317 | |
1318 (define_insn "movdi_i" | |
1319 [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,a,r,m") | |
1320 (match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))] | |
1321 "" | |
1322 "* return mcore_output_movedouble (operands, DImode);" | |
1323 [(set_attr "length" "4") (set_attr "type" "move,move,move,move,load,load,store")]) | |
1324 | |
1325 ;; SFmode | |
1326 | |
1327 (define_expand "movsf" | |
1328 [(set (match_operand:SF 0 "general_operand" "") | |
1329 (match_operand:SF 1 "general_operand" ""))] | |
1330 "" | |
1331 " | |
1332 { | |
1333 if (GET_CODE (operands[0]) == MEM) | |
1334 operands[1] = force_reg (SFmode, operands[1]); | |
1335 }") | |
1336 | |
1337 (define_insn "movsf_i" | |
1338 [(set (match_operand:SF 0 "general_operand" "=r,r,m") | |
1339 (match_operand:SF 1 "general_operand" "r,m,r"))] | |
1340 "" | |
1341 "@ | |
1342 mov %0,%1 | |
1343 ld.w %0,%1 | |
1344 st.w %1,%0" | |
1345 [(set_attr "type" "move,load,store")]) | |
1346 | |
1347 ;; DFmode | |
1348 | |
1349 (define_expand "movdf" | |
1350 [(set (match_operand:DF 0 "general_operand" "") | |
1351 (match_operand:DF 1 "general_operand" ""))] | |
1352 "" | |
1353 " | |
1354 { | |
1355 if (GET_CODE (operands[0]) == MEM) | |
1356 operands[1] = force_reg (DFmode, operands[1]); | |
1357 }") | |
1358 | |
1359 (define_insn "movdf_k" | |
1360 [(set (match_operand:DF 0 "general_operand" "=r,r,m") | |
1361 (match_operand:DF 1 "general_operand" "r,m,r"))] | |
1362 "" | |
1363 "* return mcore_output_movedouble (operands, DFmode);" | |
1364 [(set_attr "length" "4") (set_attr "type" "move,load,store")]) | |
1365 | |
1366 | |
1367 ;; Load/store multiple | |
1368 | |
1369 ;; ??? This is not currently used. | |
1370 (define_insn "ldm" | |
1371 [(set (match_operand:TI 0 "mcore_arith_reg_operand" "=r") | |
1372 (mem:TI (match_operand:SI 1 "mcore_arith_reg_operand" "r")))] | |
1373 "" | |
1374 "ldq %U0,(%1)") | |
1375 | |
1376 ;; ??? This is not currently used. | |
1377 (define_insn "stm" | |
1378 [(set (mem:TI (match_operand:SI 0 "mcore_arith_reg_operand" "r")) | |
1379 (match_operand:TI 1 "mcore_arith_reg_operand" "r"))] | |
1380 "" | |
1381 "stq %U1,(%0)") | |
1382 | |
1383 (define_expand "load_multiple" | |
1384 [(match_par_dup 3 [(set (match_operand:SI 0 "" "") | |
1385 (match_operand:SI 1 "" "")) | |
1386 (use (match_operand:SI 2 "" ""))])] | |
1387 "" | |
1388 " | |
1389 { | |
1390 int regno, count, i; | |
1391 | |
1392 /* Support only loading a constant number of registers from memory and | |
1393 only if at least two registers. The last register must be r15. */ | |
1394 if (GET_CODE (operands[2]) != CONST_INT | |
1395 || INTVAL (operands[2]) < 2 | |
1396 || GET_CODE (operands[1]) != MEM | |
1397 || XEXP (operands[1], 0) != stack_pointer_rtx | |
1398 || GET_CODE (operands[0]) != REG | |
1399 || REGNO (operands[0]) + INTVAL (operands[2]) != 16) | |
1400 FAIL; | |
1401 | |
1402 count = INTVAL (operands[2]); | |
1403 regno = REGNO (operands[0]); | |
1404 | |
1405 operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); | |
1406 | |
1407 for (i = 0; i < count; i++) | |
1408 XVECEXP (operands[3], 0, i) | |
1409 = gen_rtx_SET (VOIDmode, | |
1410 gen_rtx_REG (SImode, regno + i), | |
1411 gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, | |
1412 i * 4))); | |
1413 }") | |
1414 | |
1415 (define_insn "" | |
1416 [(match_parallel 0 "mcore_load_multiple_operation" | |
1417 [(set (match_operand:SI 1 "mcore_arith_reg_operand" "=r") | |
1418 (mem:SI (match_operand:SI 2 "register_operand" "r")))])] | |
1419 "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM" | |
1420 "ldm %1-r15,(%2)") | |
1421 | |
1422 (define_expand "store_multiple" | |
1423 [(match_par_dup 3 [(set (match_operand:SI 0 "" "") | |
1424 (match_operand:SI 1 "" "")) | |
1425 (use (match_operand:SI 2 "" ""))])] | |
1426 "" | |
1427 " | |
1428 { | |
1429 int regno, count, i; | |
1430 | |
1431 /* Support only storing a constant number of registers to memory and | |
1432 only if at least two registers. The last register must be r15. */ | |
1433 if (GET_CODE (operands[2]) != CONST_INT | |
1434 || INTVAL (operands[2]) < 2 | |
1435 || GET_CODE (operands[0]) != MEM | |
1436 || XEXP (operands[0], 0) != stack_pointer_rtx | |
1437 || GET_CODE (operands[1]) != REG | |
1438 || REGNO (operands[1]) + INTVAL (operands[2]) != 16) | |
1439 FAIL; | |
1440 | |
1441 count = INTVAL (operands[2]); | |
1442 regno = REGNO (operands[1]); | |
1443 | |
1444 operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count)); | |
1445 | |
1446 for (i = 0; i < count; i++) | |
1447 XVECEXP (operands[3], 0, i) | |
1448 = gen_rtx_SET (VOIDmode, | |
1449 gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, | |
1450 i * 4)), | |
1451 gen_rtx_REG (SImode, regno + i)); | |
1452 }") | |
1453 | |
1454 (define_insn "" | |
1455 [(match_parallel 0 "mcore_store_multiple_operation" | |
1456 [(set (mem:SI (match_operand:SI 2 "register_operand" "r")) | |
1457 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))])] | |
1458 "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM" | |
1459 "stm %1-r15,(%2)") | |
1460 | |
1461 ;; ------------------------------------------------------------------------ | |
1462 ;; Define the real conditional branch instructions. | |
1463 ;; ------------------------------------------------------------------------ | |
1464 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1465 ;; At top-level, condition test are eq/ne, because we |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1466 ;; are comparing against the condition register (which |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1467 ;; has the result of the true relational test |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1468 |
0 | 1469 (define_insn "branch_true" |
1470 [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0)) | |
1471 (label_ref (match_operand 0 "" "")) | |
1472 (pc)))] | |
1473 "" | |
1474 "jbt %l0" | |
1475 [(set_attr "type" "brcond")]) | |
1476 | |
1477 (define_insn "branch_false" | |
1478 [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) | |
1479 (label_ref (match_operand 0 "" "")) | |
1480 (pc)))] | |
1481 "" | |
1482 "jbf %l0" | |
1483 [(set_attr "type" "brcond")]) | |
1484 | |
1485 (define_insn "inverse_branch_true" | |
1486 [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0)) | |
1487 (pc) | |
1488 (label_ref (match_operand 0 "" ""))))] | |
1489 "" | |
1490 "jbf %l0" | |
1491 [(set_attr "type" "brcond")]) | |
1492 | |
1493 (define_insn "inverse_branch_false" | |
1494 [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0)) | |
1495 (pc) | |
1496 (label_ref (match_operand 0 "" ""))))] | |
1497 "" | |
1498 "jbt %l0" | |
1499 [(set_attr "type" "brcond")]) | |
1500 | |
1501 ;; Conditional branch insns | |
1502 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1503 (define_expand "cbranchsi4" |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1504 [(set (pc) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1505 (if_then_else (match_operator:SI 0 "ordered_comparison_operator" |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1506 [(match_operand:SI 1 "mcore_compare_operand") |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1507 (match_operand:SI 2 "nonmemory_operand")]) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1508 (label_ref (match_operand 3 "")) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1509 (pc)))] |
0 | 1510 "" |
1511 " | |
1512 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1513 bool invert; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1514 invert = mcore_gen_compare (GET_CODE (operands[0]), |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1515 operands[1], operands[2]); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1516 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1517 if (invert) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1518 emit_jump_insn (gen_branch_false (operands[3])); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1519 else |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1520 emit_jump_insn (gen_branch_true (operands[3])); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1521 DONE; |
0 | 1522 }") |
1523 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1524 |
0 | 1525 |
1526 ;; ------------------------------------------------------------------------ | |
1527 ;; Jump and linkage insns | |
1528 ;; ------------------------------------------------------------------------ | |
1529 | |
1530 (define_insn "jump_real" | |
1531 [(set (pc) | |
1532 (label_ref (match_operand 0 "" "")))] | |
1533 "" | |
1534 "jbr %l0" | |
1535 [(set_attr "type" "branch")]) | |
1536 | |
1537 (define_expand "jump" | |
1538 [(set (pc) (label_ref (match_operand 0 "" "")))] | |
1539 "" | |
1540 " | |
1541 { | |
1542 emit_jump_insn (gen_jump_real (operand0)); | |
1543 DONE; | |
1544 } | |
1545 ") | |
1546 | |
1547 (define_insn "indirect_jump" | |
1548 [(set (pc) | |
1549 (match_operand:SI 0 "mcore_arith_reg_operand" "r"))] | |
1550 "" | |
1551 "jmp %0" | |
1552 [(set_attr "type" "jmp")]) | |
1553 | |
1554 (define_expand "call" | |
1555 [(parallel[(call (match_operand:SI 0 "" "") | |
1556 (match_operand 1 "" "")) | |
1557 (clobber (reg:SI 15))])] | |
1558 "" | |
1559 " | |
1560 { | |
1561 if (GET_CODE (operands[0]) == MEM | |
1562 && ! register_operand (XEXP (operands[0], 0), SImode) | |
1563 && ! mcore_symbolic_address_p (XEXP (operands[0], 0))) | |
1564 operands[0] = gen_rtx_MEM (GET_MODE (operands[0]), | |
1565 force_reg (Pmode, XEXP (operands[0], 0))); | |
1566 }") | |
1567 | |
1568 (define_insn "call_internal" | |
1569 [(call (mem:SI (match_operand:SI 0 "mcore_call_address_operand" "riR")) | |
1570 (match_operand 1 "" "")) | |
1571 (clobber (reg:SI 15))] | |
1572 "" | |
1573 "* return mcore_output_call (operands, 0);") | |
1574 | |
1575 (define_expand "call_value" | |
1576 [(parallel[(set (match_operand 0 "register_operand" "") | |
1577 (call (match_operand:SI 1 "" "") | |
1578 (match_operand 2 "" ""))) | |
1579 (clobber (reg:SI 15))])] | |
1580 "" | |
1581 " | |
1582 { | |
1583 if (GET_CODE (operands[0]) == MEM | |
1584 && ! register_operand (XEXP (operands[0], 0), SImode) | |
1585 && ! mcore_symbolic_address_p (XEXP (operands[0], 0))) | |
1586 operands[1] = gen_rtx_MEM (GET_MODE (operands[1]), | |
1587 force_reg (Pmode, XEXP (operands[1], 0))); | |
1588 }") | |
1589 | |
1590 (define_insn "call_value_internal" | |
1591 [(set (match_operand 0 "register_operand" "=r") | |
1592 (call (mem:SI (match_operand:SI 1 "mcore_call_address_operand" "riR")) | |
1593 (match_operand 2 "" ""))) | |
1594 (clobber (reg:SI 15))] | |
1595 "" | |
1596 "* return mcore_output_call (operands, 1);") | |
1597 | |
1598 (define_insn "call_value_struct" | |
1599 [(parallel [(set (match_parallel 0 "" | |
1600 [(expr_list (match_operand 3 "register_operand" "") (match_operand 4 "immediate_operand" "")) | |
1601 (expr_list (match_operand 5 "register_operand" "") (match_operand 6 "immediate_operand" ""))]) | |
1602 (call (match_operand:SI 1 "" "") | |
1603 (match_operand 2 "" ""))) | |
1604 (clobber (reg:SI 15))])] | |
1605 "" | |
1606 "* return mcore_output_call (operands, 1);" | |
1607 ) | |
1608 | |
1609 | |
1610 ;; ------------------------------------------------------------------------ | |
1611 ;; Misc insns | |
1612 ;; ------------------------------------------------------------------------ | |
1613 | |
1614 (define_insn "nop" | |
1615 [(const_int 0)] | |
1616 "" | |
1617 "or r0,r0") | |
1618 | |
1619 (define_insn "tablejump" | |
1620 [(set (pc) | |
1621 (match_operand:SI 0 "mcore_arith_reg_operand" "r")) | |
1622 (use (label_ref (match_operand 1 "" "")))] | |
1623 "" | |
1624 "jmp %0" | |
1625 [(set_attr "type" "jmp")]) | |
1626 | |
1627 (define_insn "*return" | |
1628 [(return)] | |
1629 "reload_completed && ! mcore_naked_function_p ()" | |
1630 "jmp r15" | |
1631 [(set_attr "type" "jmp")]) | |
1632 | |
1633 (define_insn "*no_return" | |
1634 [(return)] | |
1635 "reload_completed && mcore_naked_function_p ()" | |
1636 "" | |
1637 [(set_attr "length" "0")] | |
1638 ) | |
1639 | |
1640 (define_expand "prologue" | |
1641 [(const_int 0)] | |
1642 "" | |
1643 "mcore_expand_prolog (); DONE;") | |
1644 | |
1645 (define_expand "epilogue" | |
1646 [(return)] | |
1647 "" | |
1648 "mcore_expand_epilog ();") | |
1649 | |
1650 ;; ------------------------------------------------------------------------ | |
1651 ;; Scc instructions | |
1652 ;; ------------------------------------------------------------------------ | |
1653 | |
1654 (define_insn "mvc" | |
1655 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1656 (ne:SI (reg:CC 17) (const_int 0)))] | |
1657 "" | |
1658 "mvc %0" | |
1659 [(set_attr "type" "move")]) | |
1660 | |
1661 (define_insn "mvcv" | |
1662 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1663 (eq:SI (reg:CC 17) (const_int 0)))] | |
1664 "" | |
1665 "mvcv %0" | |
1666 [(set_attr "type" "move")]) | |
1667 | |
1668 ; in 0.97 use (LE 0) with (LT 1) and complement c. BRC | |
1669 (define_split | |
1670 [(parallel[ | |
1671 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1672 (ne:SI (gt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1673 (const_int 0)) | |
1674 (const_int 0))) | |
1675 (clobber (reg:SI 17))])] | |
1676 "" | |
1677 [(set (reg:CC 17) | |
1678 (lt:CC (match_dup 1) (const_int 1))) | |
1679 (set (match_dup 0) (eq:SI (reg:CC 17) (const_int 0)))]) | |
1680 | |
1681 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1682 (define_expand "cstoresi4" |
0 | 1683 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1684 (match_operator:SI 1 "ordered_comparison_operator" |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1685 [(match_operand:SI 2 "mcore_compare_operand" "") |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1686 (match_operand:SI 3 "nonmemory_operand" "")]))] |
0 | 1687 "" |
1688 " | |
1689 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1690 bool invert; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1691 invert = mcore_gen_compare (GET_CODE (operands[1]), |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1692 operands[2], operands[3]); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1693 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1694 if (invert) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1695 emit_insn (gen_mvcv (operands[0])); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1696 else |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1697 emit_insn (gen_mvc (operands[0])); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1698 DONE; |
0 | 1699 }") |
1700 | |
1701 (define_insn "incscc" | |
1702 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1703 (plus:SI (ne (reg:CC 17) (const_int 0)) | |
1704 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] | |
1705 "" | |
1706 "inct %0") | |
1707 | |
1708 (define_insn "incscc_false" | |
1709 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1710 (plus:SI (eq (reg:CC 17) (const_int 0)) | |
1711 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))] | |
1712 "" | |
1713 "incf %0") | |
1714 | |
1715 (define_insn "decscc" | |
1716 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1717 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
1718 (ne (reg:CC 17) (const_int 0))))] | |
1719 "" | |
1720 "dect %0") | |
1721 | |
1722 (define_insn "decscc_false" | |
1723 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1724 (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") | |
1725 (eq (reg:CC 17) (const_int 0))))] | |
1726 "" | |
1727 "decf %0") | |
1728 | |
1729 ;; ------------------------------------------------------------------------ | |
1730 ;; Conditional move patterns. | |
1731 ;; ------------------------------------------------------------------------ | |
1732 | |
1733 (define_expand "smaxsi3" | |
1734 [(set (reg:CC 17) | |
1735 (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1736 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) | |
1737 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1738 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) | |
1739 (match_dup 1) (match_dup 2)))] | |
1740 "" | |
1741 "") | |
1742 | |
1743 (define_split | |
1744 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1745 (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1746 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] | |
1747 "" | |
1748 [(set (reg:CC 17) | |
1749 (lt:SI (match_dup 1) (match_dup 2))) | |
1750 (set (match_dup 0) | |
1751 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) | |
1752 (match_dup 1) (match_dup 2)))] | |
1753 "") | |
1754 | |
1755 ; no tstgt in 0.97, so just use cmplti (btsti x,31) and reverse move | |
1756 ; condition BRC | |
1757 (define_split | |
1758 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1759 (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1760 (const_int 0)))] | |
1761 "" | |
1762 [(set (reg:CC 17) | |
1763 (lt:CC (match_dup 1) (const_int 0))) | |
1764 (set (match_dup 0) | |
1765 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) | |
1766 (match_dup 1) (const_int 0)))] | |
1767 "") | |
1768 | |
1769 (define_expand "sminsi3" | |
1770 [(set (reg:CC 17) | |
1771 (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1772 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) | |
1773 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1774 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) | |
1775 (match_dup 1) (match_dup 2)))] | |
1776 "" | |
1777 "") | |
1778 | |
1779 (define_split | |
1780 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1781 (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1782 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] | |
1783 "" | |
1784 [(set (reg:CC 17) | |
1785 (lt:SI (match_dup 1) (match_dup 2))) | |
1786 (set (match_dup 0) | |
1787 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) | |
1788 (match_dup 1) (match_dup 2)))] | |
1789 "") | |
1790 | |
1791 ;(define_split | |
1792 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1793 ; (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1794 ; (const_int 0)))] | |
1795 ; "" | |
1796 ; [(set (reg:CC 17) | |
1797 ; (gt:CC (match_dup 1) (const_int 0))) | |
1798 ; (set (match_dup 0) | |
1799 ; (if_then_else:SI (eq (reg:CC 17) (const_int 0)) | |
1800 ; (match_dup 1) (const_int 0)))] | |
1801 ; "") | |
1802 | |
1803 ; changed these unsigned patterns to use geu instead of ltu. it appears | |
1804 ; that the c-torture & ssrl test suites didn't catch these! only showed | |
1805 ; up in friedman's clib work. BRC 7/7/95 | |
1806 | |
1807 (define_expand "umaxsi3" | |
1808 [(set (reg:CC 17) | |
1809 (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1810 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) | |
1811 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1812 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) | |
1813 (match_dup 2) (match_dup 1)))] | |
1814 "" | |
1815 "") | |
1816 | |
1817 (define_split | |
1818 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1819 (umax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1820 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] | |
1821 "" | |
1822 [(set (reg:CC 17) | |
1823 (geu:SI (match_dup 1) (match_dup 2))) | |
1824 (set (match_dup 0) | |
1825 (if_then_else:SI (eq (reg:CC 17) (const_int 0)) | |
1826 (match_dup 2) (match_dup 1)))] | |
1827 "") | |
1828 | |
1829 (define_expand "uminsi3" | |
1830 [(set (reg:CC 17) | |
1831 (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1832 (match_operand:SI 2 "mcore_arith_reg_operand" ""))) | |
1833 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1834 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) | |
1835 (match_dup 2) (match_dup 1)))] | |
1836 "" | |
1837 "") | |
1838 | |
1839 (define_split | |
1840 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
1841 (umin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
1842 (match_operand:SI 2 "mcore_arith_reg_operand" "")))] | |
1843 "" | |
1844 [(set (reg:CC 17) | |
1845 (geu:SI (match_dup 1) (match_dup 2))) | |
1846 (set (match_dup 0) | |
1847 (if_then_else:SI (ne (reg:CC 17) (const_int 0)) | |
1848 (match_dup 2) (match_dup 1)))] | |
1849 "") | |
1850 | |
1851 ;; ------------------------------------------------------------------------ | |
1852 ;; conditional move patterns really start here | |
1853 ;; ------------------------------------------------------------------------ | |
1854 | |
1855 ;; the "movtK" patterns are experimental. they are intended to account for | |
1856 ;; gcc's mucking on code such as: | |
1857 ;; | |
1858 ;; free_ent = ((block_compress) ? 257 : 256 ); | |
1859 ;; | |
1860 ;; these patterns help to get a tstne/bgeni/inct (or equivalent) sequence | |
1861 ;; when both arms have constants that are +/- 1 of each other. | |
1862 ;; | |
1863 ;; note in the following patterns that the "movtK" ones should be the first | |
1864 ;; one defined in each sequence. this is because the general pattern also | |
1865 ;; matches, so use ordering to determine priority (it's easier this way than | |
1866 ;; adding conditions to the general patterns). BRC | |
1867 ;; | |
1868 ;; the U and Q constraints are necessary to ensure that reload does the | |
1869 ;; 'right thing'. U constrains the operand to 0 and Q to 1 for use in the | |
1870 ;; clrt & clrf and clrt/inct & clrf/incf patterns. BRC 6/26 | |
1871 ;; | |
1872 ;; ??? there appears to be some problems with these movtK patterns for ops | |
1873 ;; other than eq & ne. need to fix. 6/30 BRC | |
1874 | |
1875 ;; ------------------------------------------------------------------------ | |
1876 ;; ne | |
1877 ;; ------------------------------------------------------------------------ | |
1878 | |
1879 ; experimental conditional move with two constants +/- 1 BRC | |
1880 | |
1881 (define_insn "movtK_1" | |
1882 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1883 (if_then_else:SI | |
1884 (ne (reg:CC 17) (const_int 0)) | |
1885 (match_operand:SI 1 "mcore_arith_O_operand" "O") | |
1886 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] | |
1887 " GET_CODE (operands[1]) == CONST_INT | |
1888 && GET_CODE (operands[2]) == CONST_INT | |
1889 && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1) | |
1890 || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" | |
1891 "* return mcore_output_cmov (operands, 1, NULL);" | |
1892 [(set_attr "length" "4")]) | |
1893 | |
1894 (define_insn "movt0" | |
1895 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
1896 (if_then_else:SI | |
1897 (ne (reg:CC 17) (const_int 0)) | |
1898 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") | |
1899 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] | |
1900 "" | |
1901 "@ | |
1902 movt %0,%1 | |
1903 movf %0,%2 | |
1904 clrt %0 | |
1905 clrf %0") | |
1906 | |
1907 ;; ------------------------------------------------------------------------ | |
1908 ;; eq | |
1909 ;; ------------------------------------------------------------------------ | |
1910 | |
1911 ; experimental conditional move with two constants +/- 1 BRC | |
1912 (define_insn "movtK_2" | |
1913 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1914 (if_then_else:SI | |
1915 (eq (reg:CC 17) (const_int 0)) | |
1916 (match_operand:SI 1 "mcore_arith_O_operand" "O") | |
1917 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] | |
1918 " GET_CODE (operands[1]) == CONST_INT | |
1919 && GET_CODE (operands[2]) == CONST_INT | |
1920 && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1) | |
1921 || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" | |
1922 "* return mcore_output_cmov (operands, 0, NULL);" | |
1923 [(set_attr "length" "4")]) | |
1924 | |
1925 (define_insn "movf0" | |
1926 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
1927 (if_then_else:SI | |
1928 (eq (reg:CC 17) (const_int 0)) | |
1929 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") | |
1930 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] | |
1931 "" | |
1932 "@ | |
1933 movf %0,%1 | |
1934 movt %0,%2 | |
1935 clrf %0 | |
1936 clrt %0") | |
1937 | |
1938 ; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole | |
1939 ; because the instructions are not adjacent (peepholes are related by posn - | |
1940 ; not by dataflow). BRC | |
1941 | |
1942 (define_insn "" | |
1943 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
1944 (if_then_else:SI (eq (zero_extract:SI | |
1945 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
1946 (const_int 1) | |
1947 (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K")) | |
1948 (const_int 0)) | |
1949 (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0") | |
1950 (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))] | |
1951 "" | |
1952 "@ | |
1953 btsti %1,%2\;movf %0,%3 | |
1954 btsti %1,%2\;movt %0,%4 | |
1955 btsti %1,%2\;clrf %0 | |
1956 btsti %1,%2\;clrt %0" | |
1957 [(set_attr "length" "4")]) | |
1958 | |
1959 ; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC | |
1960 | |
1961 (define_insn "" | |
1962 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
1963 (if_then_else:SI (eq (lshiftrt:SI | |
1964 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
1965 (const_int 7)) | |
1966 (const_int 0)) | |
1967 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") | |
1968 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] | |
1969 "GET_CODE (operands[1]) == SUBREG && | |
1970 GET_MODE (SUBREG_REG (operands[1])) == QImode" | |
1971 "@ | |
1972 btsti %1,7\;movf %0,%2 | |
1973 btsti %1,7\;movt %0,%3 | |
1974 btsti %1,7\;clrf %0 | |
1975 btsti %1,7\;clrt %0" | |
1976 [(set_attr "length" "4")]) | |
1977 | |
1978 | |
1979 ;; ------------------------------------------------------------------------ | |
1980 ;; ne | |
1981 ;; ------------------------------------------------------------------------ | |
1982 | |
1983 ;; Combine creates this from an andn instruction in a scc sequence. | |
1984 ;; We must recognize it to get conditional moves generated. | |
1985 | |
1986 ; experimental conditional move with two constants +/- 1 BRC | |
1987 (define_insn "movtK_3" | |
1988 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
1989 (if_then_else:SI | |
1990 (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
1991 (const_int 0)) | |
1992 (match_operand:SI 2 "mcore_arith_O_operand" "O") | |
1993 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] | |
1994 " GET_CODE (operands[2]) == CONST_INT | |
1995 && GET_CODE (operands[3]) == CONST_INT | |
1996 && ( (INTVAL (operands[2]) - INTVAL (operands[3]) == 1) | |
1997 || (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" | |
1998 "* | |
1999 { | |
2000 rtx out_operands[4]; | |
2001 out_operands[0] = operands[0]; | |
2002 out_operands[1] = operands[2]; | |
2003 out_operands[2] = operands[3]; | |
2004 out_operands[3] = operands[1]; | |
2005 | |
2006 return mcore_output_cmov (out_operands, 1, \"cmpnei %3,0\"); | |
2007 | |
2008 }" | |
2009 [(set_attr "length" "6")]) | |
2010 | |
2011 (define_insn "movt2" | |
2012 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2013 (if_then_else:SI (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
2014 (const_int 0)) | |
2015 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") | |
2016 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2017 "" | |
2018 "@ | |
2019 cmpnei %1,0\;movt %0,%2 | |
2020 cmpnei %1,0\;movf %0,%3 | |
2021 cmpnei %1,0\;clrt %0 | |
2022 cmpnei %1,0\;clrf %0" | |
2023 [(set_attr "length" "4")]) | |
2024 | |
2025 ; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole | |
2026 ; because the instructions are not adjacent (peepholes are related by posn - | |
2027 ; not by dataflow). BRC | |
2028 | |
2029 (define_insn "" | |
2030 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2031 (if_then_else:SI (ne (zero_extract:SI | |
2032 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
2033 (const_int 1) | |
2034 (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K")) | |
2035 (const_int 0)) | |
2036 (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0") | |
2037 (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2038 "" | |
2039 "@ | |
2040 btsti %1,%2\;movt %0,%3 | |
2041 btsti %1,%2\;movf %0,%4 | |
2042 btsti %1,%2\;clrt %0 | |
2043 btsti %1,%2\;clrf %0" | |
2044 [(set_attr "length" "4")]) | |
2045 | |
2046 ; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC | |
2047 | |
2048 (define_insn "" | |
2049 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2050 (if_then_else:SI (ne (lshiftrt:SI | |
2051 (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
2052 (const_int 7)) | |
2053 (const_int 0)) | |
2054 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") | |
2055 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2056 "GET_CODE (operands[1]) == SUBREG && | |
2057 GET_MODE (SUBREG_REG (operands[1])) == QImode" | |
2058 "@ | |
2059 btsti %1,7\;movt %0,%2 | |
2060 btsti %1,7\;movf %0,%3 | |
2061 btsti %1,7\;clrt %0 | |
2062 btsti %1,7\;clrf %0" | |
2063 [(set_attr "length" "4")]) | |
2064 | |
2065 ;; ------------------------------------------------------------------------ | |
2066 ;; eq/eq | |
2067 ;; ------------------------------------------------------------------------ | |
2068 | |
2069 ; experimental conditional move with two constants +/- 1 BRC | |
2070 (define_insn "movtK_4" | |
2071 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2072 (if_then_else:SI | |
2073 (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2074 (match_operand:SI 1 "mcore_arith_O_operand" "O") | |
2075 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] | |
2076 "GET_CODE (operands[1]) == CONST_INT && | |
2077 GET_CODE (operands[2]) == CONST_INT && | |
2078 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || | |
2079 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" | |
2080 "* return mcore_output_cmov(operands, 1, NULL);" | |
2081 [(set_attr "length" "4")]) | |
2082 | |
2083 (define_insn "movt3" | |
2084 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2085 (if_then_else:SI | |
2086 (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2087 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") | |
2088 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2089 "" | |
2090 "@ | |
2091 movt %0,%1 | |
2092 movf %0,%2 | |
2093 clrt %0 | |
2094 clrf %0") | |
2095 | |
2096 ;; ------------------------------------------------------------------------ | |
2097 ;; eq/ne | |
2098 ;; ------------------------------------------------------------------------ | |
2099 | |
2100 ; experimental conditional move with two constants +/- 1 BRC | |
2101 (define_insn "movtK_5" | |
2102 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2103 (if_then_else:SI | |
2104 (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2105 (match_operand:SI 1 "mcore_arith_O_operand" "O") | |
2106 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] | |
2107 "GET_CODE (operands[1]) == CONST_INT && | |
2108 GET_CODE (operands[2]) == CONST_INT && | |
2109 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || | |
2110 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" | |
2111 "* return mcore_output_cmov (operands, 0, NULL);" | |
2112 [(set_attr "length" "4")]) | |
2113 | |
2114 (define_insn "movf1" | |
2115 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2116 (if_then_else:SI | |
2117 (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2118 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") | |
2119 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2120 "" | |
2121 "@ | |
2122 movf %0,%1 | |
2123 movt %0,%2 | |
2124 clrf %0 | |
2125 clrt %0") | |
2126 | |
2127 ;; ------------------------------------------------------------------------ | |
2128 ;; eq | |
2129 ;; ------------------------------------------------------------------------ | |
2130 | |
2131 ;; Combine creates this from an andn instruction in a scc sequence. | |
2132 ;; We must recognize it to get conditional moves generated. | |
2133 | |
2134 ; experimental conditional move with two constants +/- 1 BRC | |
2135 | |
2136 (define_insn "movtK_6" | |
2137 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2138 (if_then_else:SI | |
2139 (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
2140 (const_int 0)) | |
2141 (match_operand:SI 2 "mcore_arith_O_operand" "O") | |
2142 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] | |
2143 "GET_CODE (operands[1]) == CONST_INT && | |
2144 GET_CODE (operands[2]) == CONST_INT && | |
2145 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || | |
2146 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" | |
2147 "* | |
2148 { | |
2149 rtx out_operands[4]; | |
2150 out_operands[0] = operands[0]; | |
2151 out_operands[1] = operands[2]; | |
2152 out_operands[2] = operands[3]; | |
2153 out_operands[3] = operands[1]; | |
2154 | |
2155 return mcore_output_cmov (out_operands, 0, \"cmpnei %3,0\"); | |
2156 }" | |
2157 [(set_attr "length" "6")]) | |
2158 | |
2159 (define_insn "movf3" | |
2160 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2161 (if_then_else:SI (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
2162 (const_int 0)) | |
2163 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") | |
2164 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2165 "" | |
2166 "@ | |
2167 cmpnei %1,0\;movf %0,%2 | |
2168 cmpnei %1,0\;movt %0,%3 | |
2169 cmpnei %1,0\;clrf %0 | |
2170 cmpnei %1,0\;clrt %0" | |
2171 [(set_attr "length" "4")]) | |
2172 | |
2173 ;; ------------------------------------------------------------------------ | |
2174 ;; ne/eq | |
2175 ;; ------------------------------------------------------------------------ | |
2176 | |
2177 ; experimental conditional move with two constants +/- 1 BRC | |
2178 (define_insn "movtK_7" | |
2179 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2180 (if_then_else:SI | |
2181 (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2182 (match_operand:SI 1 "mcore_arith_O_operand" "O") | |
2183 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] | |
2184 "GET_CODE (operands[1]) == CONST_INT && | |
2185 GET_CODE (operands[2]) == CONST_INT && | |
2186 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || | |
2187 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" | |
2188 "* return mcore_output_cmov (operands, 0, NULL);" | |
2189 [(set_attr "length" "4")]) | |
2190 | |
2191 (define_insn "movf4" | |
2192 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2193 (if_then_else:SI | |
2194 (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2195 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") | |
2196 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2197 "" | |
2198 "@ | |
2199 movf %0,%1 | |
2200 movt %0,%2 | |
2201 clrf %0 | |
2202 clrt %0") | |
2203 | |
2204 ;; ------------------------------------------------------------------------ | |
2205 ;; ne/ne | |
2206 ;; ------------------------------------------------------------------------ | |
2207 | |
2208 ; experimental conditional move with two constants +/- 1 BRC | |
2209 (define_insn "movtK_8" | |
2210 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2211 (if_then_else:SI | |
2212 (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2213 (match_operand:SI 1 "mcore_arith_O_operand" "O") | |
2214 (match_operand:SI 2 "mcore_arith_O_operand" "O")))] | |
2215 "GET_CODE (operands[1]) == CONST_INT && | |
2216 GET_CODE (operands[2]) == CONST_INT && | |
2217 ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) || | |
2218 (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))" | |
2219 "* return mcore_output_cmov (operands, 1, NULL);" | |
2220 [(set_attr "length" "4")]) | |
2221 | |
2222 (define_insn "movt4" | |
2223 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2224 (if_then_else:SI | |
2225 (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0)) | |
2226 (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0") | |
2227 (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2228 "" | |
2229 "@ | |
2230 movt %0,%1 | |
2231 movf %0,%2 | |
2232 clrt %0 | |
2233 clrf %0") | |
2234 | |
2235 ;; Also need patterns to recognize lt/ge, since otherwise the compiler will | |
2236 ;; try to output not/asri/tstne/movf. | |
2237 | |
2238 ;; ------------------------------------------------------------------------ | |
2239 ;; lt | |
2240 ;; ------------------------------------------------------------------------ | |
2241 | |
2242 ; experimental conditional move with two constants +/- 1 BRC | |
2243 (define_insn "movtK_9" | |
2244 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2245 (if_then_else:SI | |
2246 (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
2247 (const_int 0)) | |
2248 (match_operand:SI 2 "mcore_arith_O_operand" "O") | |
2249 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] | |
2250 "GET_CODE (operands[2]) == CONST_INT && | |
2251 GET_CODE (operands[3]) == CONST_INT && | |
2252 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || | |
2253 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" | |
2254 "* | |
2255 { | |
2256 rtx out_operands[4]; | |
2257 out_operands[0] = operands[0]; | |
2258 out_operands[1] = operands[2]; | |
2259 out_operands[2] = operands[3]; | |
2260 out_operands[3] = operands[1]; | |
2261 | |
2262 return mcore_output_cmov (out_operands, 1, \"btsti %3,31\"); | |
2263 }" | |
2264 [(set_attr "length" "6")]) | |
2265 | |
2266 (define_insn "movt5" | |
2267 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2268 (if_then_else:SI (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
2269 (const_int 0)) | |
2270 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") | |
2271 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2272 "" | |
2273 "@ | |
2274 btsti %1,31\;movt %0,%2 | |
2275 btsti %1,31\;movf %0,%3 | |
2276 btsti %1,31\;clrt %0 | |
2277 btsti %1,31\;clrf %0" | |
2278 [(set_attr "length" "4")]) | |
2279 | |
2280 | |
2281 ;; ------------------------------------------------------------------------ | |
2282 ;; ge | |
2283 ;; ------------------------------------------------------------------------ | |
2284 | |
2285 ; experimental conditional move with two constants +/- 1 BRC | |
2286 (define_insn "movtK_10" | |
2287 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2288 (if_then_else:SI | |
2289 (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r") | |
2290 (const_int 0)) | |
2291 (match_operand:SI 2 "mcore_arith_O_operand" "O") | |
2292 (match_operand:SI 3 "mcore_arith_O_operand" "O")))] | |
2293 "GET_CODE (operands[2]) == CONST_INT && | |
2294 GET_CODE (operands[3]) == CONST_INT && | |
2295 ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) || | |
2296 (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))" | |
2297 "* | |
2298 { | |
2299 rtx out_operands[4]; | |
2300 out_operands[0] = operands[0]; | |
2301 out_operands[1] = operands[2]; | |
2302 out_operands[2] = operands[3]; | |
2303 out_operands[3] = operands[1]; | |
2304 | |
2305 return mcore_output_cmov (out_operands, 0, \"btsti %3,31\"); | |
2306 }" | |
2307 [(set_attr "length" "6")]) | |
2308 | |
2309 (define_insn "movf5" | |
2310 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r") | |
2311 (if_then_else:SI (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r") | |
2312 (const_int 0)) | |
2313 (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0") | |
2314 (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))] | |
2315 "" | |
2316 "@ | |
2317 btsti %1,31\;movf %0,%2 | |
2318 btsti %1,31\;movt %0,%3 | |
2319 btsti %1,31\;clrf %0 | |
2320 btsti %1,31\;clrt %0" | |
2321 [(set_attr "length" "4")]) | |
2322 | |
2323 ;; ------------------------------------------------------------------------ | |
2324 ;; Bitfield extract (xtrbN) | |
2325 ;; ------------------------------------------------------------------------ | |
2326 | |
2327 ; sometimes we're better off using QI/HI mode and letting the machine indep. | |
2328 ; part expand insv and extv. | |
2329 ; | |
2330 ; e.g., sequences like:a [an insertion] | |
2331 ; | |
2332 ; ldw r8,(r6) | |
2333 ; movi r7,0x00ffffff | |
2334 ; and r8,r7 r7 dead | |
2335 ; stw r8,(r6) r8 dead | |
2336 ; | |
2337 ; become: | |
2338 ; | |
2339 ; movi r8,0 | |
2340 ; stb r8,(r6) r8 dead | |
2341 ; | |
2342 ; it looks like always using SI mode is a win except in this type of code | |
2343 ; (when adjacent bit fields collapse on a byte or halfword boundary). when | |
2344 ; expanding with SI mode, non-adjacent bit field masks fold, but with QI/HI | |
2345 ; mode, they do not. one thought is to add some peepholes to cover cases | |
2346 ; like the above, but this is not a general solution. | |
2347 ; | |
2348 ; -mword-bitfields expands/inserts using SI mode. otherwise, do it with | |
2349 ; the smallest mode possible (using the machine indep. expansions). BRC | |
2350 | |
2351 ;(define_expand "extv" | |
2352 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2353 ; (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
2354 ; (match_operand:SI 2 "const_int_operand" "") | |
2355 ; (match_operand:SI 3 "const_int_operand" ""))) | |
2356 ; (clobber (reg:CC 17))] | |
2357 ; "" | |
2358 ; " | |
2359 ;{ | |
2360 ; if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) % 8 != 0) | |
2361 ; { | |
2362 ; if (TARGET_W_FIELD) | |
2363 ; { | |
2364 ; rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); | |
2365 ; rtx rshft = GEN_INT (32 - INTVAL (operands[2])); | |
2366 ; | |
2367 ; emit_insn (gen_rtx_SET (SImode, operands[0], operands[1])); | |
2368 ; emit_insn (gen_rtx_SET (SImode, operands[0], | |
2369 ; gen_rtx_ASHIFT (SImode, operands[0], lshft))); | |
2370 ; emit_insn (gen_rtx_SET (SImode, operands[0], | |
2371 ; gen_rtx_ASHIFTRT (SImode, operands[0], rshft))); | |
2372 ; DONE; | |
2373 ; } | |
2374 ; else | |
2375 ; FAIL; | |
2376 ; } | |
2377 ;}") | |
2378 | |
2379 (define_expand "extv" | |
2380 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2381 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
2382 (match_operand:SI 2 "const_int_operand" "") | |
2383 (match_operand:SI 3 "const_int_operand" ""))) | |
2384 (clobber (reg:CC 17))] | |
2385 "" | |
2386 " | |
2387 { | |
2388 if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0) | |
2389 { | |
2390 /* 8-bit field, aligned properly, use the xtrb[0123]+sext sequence. */ | |
2391 /* not DONE, not FAIL, but let the RTL get generated.... */ | |
2392 } | |
2393 else if (TARGET_W_FIELD) | |
2394 { | |
2395 /* Arbitrary placement; note that the tree->rtl generator will make | |
2396 something close to this if we return FAIL */ | |
2397 rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); | |
2398 rtx rshft = GEN_INT (32 - INTVAL (operands[2])); | |
2399 rtx tmp1 = gen_reg_rtx (SImode); | |
2400 rtx tmp2 = gen_reg_rtx (SImode); | |
2401 | |
2402 emit_insn (gen_rtx_SET (SImode, tmp1, operands[1])); | |
2403 emit_insn (gen_rtx_SET (SImode, tmp2, | |
2404 gen_rtx_ASHIFT (SImode, tmp1, lshft))); | |
2405 emit_insn (gen_rtx_SET (SImode, operands[0], | |
2406 gen_rtx_ASHIFTRT (SImode, tmp2, rshft))); | |
2407 DONE; | |
2408 } | |
2409 else | |
2410 { | |
2411 /* Let the caller choose an alternate sequence. */ | |
2412 FAIL; | |
2413 } | |
2414 }") | |
2415 | |
2416 (define_expand "extzv" | |
2417 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2418 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
2419 (match_operand:SI 2 "const_int_operand" "") | |
2420 (match_operand:SI 3 "const_int_operand" ""))) | |
2421 (clobber (reg:CC 17))] | |
2422 "" | |
2423 " | |
2424 { | |
2425 if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0) | |
2426 { | |
2427 /* 8-bit field, aligned properly, use the xtrb[0123] sequence. */ | |
2428 /* Let the template generate some RTL.... */ | |
2429 } | |
2430 else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1)) | |
2431 { | |
2432 /* A narrow bit-field (<=5 bits) means we can do a shift to put | |
2433 it in place and then use an andi to extract it. | |
2434 This is as good as a shiftleft/shiftright. */ | |
2435 | |
2436 rtx shifted; | |
2437 rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1); | |
2438 | |
2439 if (INTVAL (operands[3]) == 0) | |
2440 { | |
2441 shifted = operands[1]; | |
2442 } | |
2443 else | |
2444 { | |
2445 rtx rshft = GEN_INT (INTVAL (operands[3])); | |
2446 shifted = gen_reg_rtx (SImode); | |
2447 emit_insn (gen_rtx_SET (SImode, shifted, | |
2448 gen_rtx_LSHIFTRT (SImode, operands[1], rshft))); | |
2449 } | |
2450 emit_insn (gen_rtx_SET (SImode, operands[0], | |
2451 gen_rtx_AND (SImode, shifted, mask))); | |
2452 DONE; | |
2453 } | |
2454 else if (TARGET_W_FIELD) | |
2455 { | |
2456 /* Arbitrary pattern; play shift/shift games to get it. | |
2457 * this is pretty much what the caller will do if we say FAIL */ | |
2458 rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))); | |
2459 rtx rshft = GEN_INT (32 - INTVAL (operands[2])); | |
2460 rtx tmp1 = gen_reg_rtx (SImode); | |
2461 rtx tmp2 = gen_reg_rtx (SImode); | |
2462 | |
2463 emit_insn (gen_rtx_SET (SImode, tmp1, operands[1])); | |
2464 emit_insn (gen_rtx_SET (SImode, tmp2, | |
2465 gen_rtx_ASHIFT (SImode, tmp1, lshft))); | |
2466 emit_insn (gen_rtx_SET (SImode, operands[0], | |
2467 gen_rtx_LSHIFTRT (SImode, tmp2, rshft))); | |
2468 DONE; | |
2469 } | |
2470 else | |
2471 { | |
2472 /* Make the compiler figure out some alternative mechanism. */ | |
2473 FAIL; | |
2474 } | |
2475 | |
2476 /* Emit the RTL pattern; something will match it later. */ | |
2477 }") | |
2478 | |
2479 (define_expand "insv" | |
2480 [(set (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2481 (match_operand:SI 1 "const_int_operand" "") | |
2482 (match_operand:SI 2 "const_int_operand" "")) | |
2483 (match_operand:SI 3 "general_operand" "")) | |
2484 (clobber (reg:CC 17))] | |
2485 "" | |
2486 " | |
2487 { | |
2488 if (mcore_expand_insv (operands)) | |
2489 { | |
2490 DONE; | |
2491 } | |
2492 else | |
2493 { | |
2494 FAIL; | |
2495 } | |
2496 }") | |
2497 | |
2498 ;; | |
2499 ;; the xtrb[0123] instructions handily get at 8-bit fields on nice boundaries. | |
2500 ;; but then, they do force you through r1. | |
2501 ;; | |
2502 ;; the combiner will build such patterns for us, so we'll make them available | |
2503 ;; for its use. | |
2504 ;; | |
2505 ;; Note that we have both SIGNED and UNSIGNED versions of these... | |
2506 ;; | |
2507 | |
2508 ;; | |
2509 ;; These no longer worry about the clobbering of CC bit; not sure this is | |
2510 ;; good... | |
2511 ;; | |
2512 ;; the SIGNED versions of these | |
2513 ;; | |
2514 (define_insn "" | |
2515 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") | |
2516 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))] | |
2517 "" | |
2518 "@ | |
2519 asri %0,24 | |
2520 xtrb0 %0,%1\;sextb %0" | |
2521 [(set_attr "type" "shift")]) | |
2522 | |
2523 (define_insn "" | |
2524 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") | |
2525 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))] | |
2526 "" | |
2527 "xtrb1 %0,%1\;sextb %0" | |
2528 [(set_attr "type" "shift")]) | |
2529 | |
2530 (define_insn "" | |
2531 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") | |
2532 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))] | |
2533 "" | |
2534 "xtrb2 %0,%1\;sextb %0" | |
2535 [(set_attr "type" "shift")]) | |
2536 | |
2537 (define_insn "" | |
2538 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2539 (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") (const_int 8) (const_int 0)))] | |
2540 "" | |
2541 "sextb %0" | |
2542 [(set_attr "type" "shift")]) | |
2543 | |
2544 ;; the UNSIGNED uses of xtrb[0123] | |
2545 ;; | |
2546 (define_insn "" | |
2547 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") | |
2548 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))] | |
2549 "" | |
2550 "@ | |
2551 lsri %0,24 | |
2552 xtrb0 %0,%1" | |
2553 [(set_attr "type" "shift")]) | |
2554 | |
2555 (define_insn "" | |
2556 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") | |
2557 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))] | |
2558 "" | |
2559 "xtrb1 %0,%1" | |
2560 [(set_attr "type" "shift")]) | |
2561 | |
2562 (define_insn "" | |
2563 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b") | |
2564 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))] | |
2565 "" | |
2566 "xtrb2 %0,%1" | |
2567 [(set_attr "type" "shift")]) | |
2568 | |
2569 ;; This can be peepholed if it follows a ldb ... | |
2570 (define_insn "" | |
2571 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b") | |
2572 (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))] | |
2573 "" | |
2574 "@ | |
2575 zextb %0 | |
2576 xtrb3 %0,%1\;zextb %0" | |
2577 [(set_attr "type" "shift")]) | |
2578 | |
2579 | |
2580 ;; ------------------------------------------------------------------------ | |
2581 ;; Block move - adapted from m88k.md | |
2582 ;; ------------------------------------------------------------------------ | |
2583 | |
2584 (define_expand "movmemsi" | |
2585 [(parallel [(set (mem:BLK (match_operand:BLK 0 "" "")) | |
2586 (mem:BLK (match_operand:BLK 1 "" ""))) | |
2587 (use (match_operand:SI 2 "general_operand" "")) | |
2588 (use (match_operand:SI 3 "immediate_operand" ""))])] | |
2589 "" | |
2590 " | |
2591 { | |
2592 if (mcore_expand_block_move (operands)) | |
2593 DONE; | |
2594 else | |
2595 FAIL; | |
2596 }") | |
2597 | |
2598 ;; ;;; ??? These patterns are meant to be generated from expand_block_move, | |
2599 ;; ;;; but they currently are not. | |
2600 ;; | |
2601 ;; (define_insn "" | |
2602 ;; [(set (match_operand:QI 0 "mcore_arith_reg_operand" "=r") | |
2603 ;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] | |
2604 ;; "" | |
2605 ;; "ld.b %0,%1" | |
2606 ;; [(set_attr "type" "load")]) | |
2607 ;; | |
2608 ;; (define_insn "" | |
2609 ;; [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r") | |
2610 ;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] | |
2611 ;; "" | |
2612 ;; "ld.h %0,%1" | |
2613 ;; [(set_attr "type" "load")]) | |
2614 ;; | |
2615 ;; (define_insn "" | |
2616 ;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2617 ;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))] | |
2618 ;; "" | |
2619 ;; "ld.w %0,%1" | |
2620 ;; [(set_attr "type" "load")]) | |
2621 ;; | |
2622 ;; (define_insn "" | |
2623 ;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") | |
2624 ;; (match_operand:QI 1 "mcore_arith_reg_operand" "r"))] | |
2625 ;; "" | |
2626 ;; "st.b %1,%0" | |
2627 ;; [(set_attr "type" "store")]) | |
2628 ;; | |
2629 ;; (define_insn "" | |
2630 ;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") | |
2631 ;; (match_operand:HI 1 "mcore_arith_reg_operand" "r"))] | |
2632 ;; "" | |
2633 ;; "st.h %1,%0" | |
2634 ;; [(set_attr "type" "store")]) | |
2635 ;; | |
2636 ;; (define_insn "" | |
2637 ;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m") | |
2638 ;; (match_operand:SI 1 "mcore_arith_reg_operand" "r"))] | |
2639 ;; "" | |
2640 ;; "st.w %1,%0" | |
2641 ;; [(set_attr "type" "store")]) | |
2642 | |
2643 ;; ------------------------------------------------------------------------ | |
2644 ;; Misc Optimizing quirks | |
2645 ;; ------------------------------------------------------------------------ | |
2646 | |
2647 ;; pair to catch constructs like: (int *)((p+=4)-4) which happen | |
2648 ;; in stdarg/varargs traversal. This changes a 3 insn sequence to a 2 | |
2649 ;; insn sequence. -- RBE 11/30/95 | |
2650 (define_insn "" | |
2651 [(parallel[ | |
2652 (set (match_operand:SI 0 "mcore_arith_reg_operand" "=r") | |
2653 (match_operand:SI 1 "mcore_arith_reg_operand" "+r")) | |
2654 (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])] | |
2655 "GET_CODE(operands[2]) == CONST_INT" | |
2656 "#" | |
2657 [(set_attr "length" "4")]) | |
2658 | |
2659 (define_split | |
2660 [(parallel[ | |
2661 (set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2662 (match_operand:SI 1 "mcore_arith_reg_operand" "")) | |
2663 (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])] | |
2664 "GET_CODE(operands[2]) == CONST_INT && | |
2665 operands[0] != operands[1]" | |
2666 [(set (match_dup 0) (match_dup 1)) | |
2667 (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))]) | |
2668 | |
2669 | |
2670 ;;; Peepholes | |
2671 | |
2672 ; note: in the following patterns, use mcore_is_dead() to ensure that the | |
2673 ; reg we may be trashing really is dead. reload doesn't always mark | |
2674 ; deaths, so mcore_is_dead() (see mcore.c) scans forward to find its death. BRC | |
2675 | |
2676 ;;; A peephole to convert the 3 instruction sequence generated by reload | |
2677 ;;; to load a FP-offset address into a 2 instruction sequence. | |
2678 ;;; ??? This probably never matches anymore. | |
2679 (define_peephole | |
2680 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
2681 (match_operand:SI 1 "const_int_operand" "J")) | |
2682 (set (match_dup 0) (neg:SI (match_dup 0))) | |
2683 (set (match_dup 0) | |
2684 (plus:SI (match_dup 0) | |
2685 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))] | |
2686 "CONST_OK_FOR_J (INTVAL (operands[1]))" | |
2687 "error\;mov %0,%2\;subi %0,%1") | |
2688 | |
2689 ;; Moves of inlinable constants are done late, so when a 'not' is generated | |
2690 ;; it is never combined with the following 'and' to generate an 'andn' b/c | |
2691 ;; the combiner never sees it. use a peephole to pick up this case (happens | |
2692 ;; mostly with bitfields) BRC | |
2693 | |
2694 (define_peephole | |
2695 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
2696 (match_operand:SI 1 "const_int_operand" "i")) | |
2697 (set (match_operand:SI 2 "mcore_arith_reg_operand" "r") | |
2698 (and:SI (match_dup 2) (match_dup 0)))] | |
2699 "mcore_const_trick_uses_not (INTVAL (operands[1])) && | |
2700 operands[0] != operands[2] && | |
2701 mcore_is_dead (insn, operands[0])" | |
2702 "* return mcore_output_andn (insn, operands);") | |
2703 | |
2704 ; when setting or clearing just two bits, it's cheapest to use two bseti's | |
2705 ; or bclri's. only happens when relaxing immediates. BRC | |
2706 | |
2707 (define_peephole | |
2708 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2709 (match_operand:SI 1 "const_int_operand" "")) | |
2710 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") | |
2711 (ior:SI (match_dup 2) (match_dup 0)))] | |
2712 "TARGET_HARDLIT | |
2713 && mcore_num_ones (INTVAL (operands[1])) == 2 | |
2714 && mcore_is_dead (insn, operands[0])" | |
2715 "* return mcore_output_bseti (operands[2], INTVAL (operands[1]));") | |
2716 | |
2717 (define_peephole | |
2718 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2719 (match_operand:SI 1 "const_int_operand" "")) | |
2720 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") | |
2721 (and:SI (match_dup 2) (match_dup 0)))] | |
2722 "TARGET_HARDLIT && mcore_num_zeros (INTVAL (operands[1])) == 2 && | |
2723 mcore_is_dead (insn, operands[0])" | |
2724 "* return mcore_output_bclri (operands[2], INTVAL (operands[1]));") | |
2725 | |
2726 ; change an and with a mask that has a single cleared bit into a bclri. this | |
2727 ; handles QI and HI mode values using the knowledge that the most significant | |
2728 ; bits don't matter. | |
2729 | |
2730 (define_peephole | |
2731 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2732 (match_operand:SI 1 "const_int_operand" "")) | |
2733 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") | |
2734 (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "") | |
2735 (match_dup 0)))] | |
2736 "GET_CODE (operands[3]) == SUBREG && | |
2737 GET_MODE (SUBREG_REG (operands[3])) == QImode && | |
2738 mcore_num_zeros (INTVAL (operands[1]) | 0xffffff00) == 1 && | |
2739 mcore_is_dead (insn, operands[0])" | |
2740 "* | |
2741 if (! mcore_is_same_reg (operands[2], operands[3])) | |
2742 output_asm_insn (\"mov\\t%2,%3\", operands); | |
2743 return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);") | |
2744 | |
2745 /* Do not fold these together -- mode is lost at final output phase. */ | |
2746 | |
2747 (define_peephole | |
2748 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2749 (match_operand:SI 1 "const_int_operand" "")) | |
2750 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") | |
2751 (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "") | |
2752 (match_dup 0)))] | |
2753 "GET_CODE (operands[3]) == SUBREG && | |
2754 GET_MODE (SUBREG_REG (operands[3])) == HImode && | |
2755 mcore_num_zeros (INTVAL (operands[1]) | 0xffff0000) == 1 && | |
2756 operands[2] == operands[3] && | |
2757 mcore_is_dead (insn, operands[0])" | |
2758 "* | |
2759 if (! mcore_is_same_reg (operands[2], operands[3])) | |
2760 output_asm_insn (\"mov\\t%2,%3\", operands); | |
2761 return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffff0000);") | |
2762 | |
2763 ; This peephole helps when using -mwide-bitfields to widen fields so they | |
2764 ; collapse. This, however, has the effect that a narrower mode is not used | |
2765 ; when desirable. | |
2766 ; | |
2767 ; e.g., sequences like: | |
2768 ; | |
2769 ; ldw r8,(r6) | |
2770 ; movi r7,0x00ffffff | |
2771 ; and r8,r7 r7 dead | |
2772 ; stw r8,(r6) r8 dead | |
2773 ; | |
2774 ; get peepholed to become: | |
2775 ; | |
2776 ; movi r8,0 | |
2777 ; stb r8,(r6) r8 dead | |
2778 ; | |
2779 ; Do only easy addresses that have no offset. This peephole is also applied | |
2780 ; to halfwords. We need to check that the load is non-volatile before we get | |
2781 ; rid of it. | |
2782 | |
2783 (define_peephole | |
2784 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2785 (match_operand:SI 1 "memory_operand" "")) | |
2786 (set (match_operand:SI 2 "mcore_arith_reg_operand" "") | |
2787 (match_operand:SI 3 "const_int_operand" "")) | |
2788 (set (match_dup 0) (and:SI (match_dup 0) (match_dup 2))) | |
2789 (set (match_operand:SI 4 "memory_operand" "") (match_dup 0))] | |
2790 "mcore_is_dead (insn, operands[0]) && | |
2791 ! MEM_VOLATILE_P (operands[1]) && | |
2792 mcore_is_dead (insn, operands[2]) && | |
2793 (mcore_byte_offset (INTVAL (operands[3])) > -1 || | |
2794 mcore_halfword_offset (INTVAL (operands[3])) > -1) && | |
2795 ! MEM_VOLATILE_P (operands[4]) && | |
2796 GET_CODE (XEXP (operands[4], 0)) == REG" | |
2797 "* | |
2798 { | |
2799 int ofs; | |
2800 enum machine_mode mode; | |
2801 rtx base_reg = XEXP (operands[4], 0); | |
2802 | |
2803 if ((ofs = mcore_byte_offset (INTVAL (operands[3]))) > -1) | |
2804 mode = QImode; | |
2805 else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1) | |
2806 mode = HImode; | |
2807 else | |
2808 gcc_unreachable (); | |
2809 | |
2810 if (ofs > 0) | |
2811 operands[4] = gen_rtx_MEM (mode, | |
2812 gen_rtx_PLUS (SImode, base_reg, GEN_INT(ofs))); | |
2813 else | |
2814 operands[4] = gen_rtx_MEM (mode, base_reg); | |
2815 | |
2816 if (mode == QImode) | |
2817 return \"movi %0,0\\n\\tst.b %0,%4\"; | |
2818 | |
2819 return \"movi %0,0\\n\\tst.h %0,%4\"; | |
2820 }") | |
2821 | |
2822 ; from sop11. get btsti's for (LT A 0) where A is a QI or HI value | |
2823 | |
2824 (define_peephole | |
2825 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
2826 (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0"))) | |
2827 (set (reg:CC 17) | |
2828 (lt:CC (match_dup 0) | |
2829 (const_int 0)))] | |
2830 "mcore_is_dead (insn, operands[0])" | |
2831 "btsti %0,7") | |
2832 | |
2833 (define_peephole | |
2834 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
2835 (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0"))) | |
2836 (set (reg:CC 17) | |
2837 (lt:CC (match_dup 0) | |
2838 (const_int 0)))] | |
2839 "mcore_is_dead (insn, operands[0])" | |
2840 "btsti %0,15") | |
2841 | |
2842 ; Pick up a tst. This combination happens because the immediate is not | |
2843 ; allowed to fold into one of the operands of the tst. Does not happen | |
2844 ; when relaxing immediates. BRC | |
2845 | |
2846 (define_peephole | |
2847 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2848 (match_operand:SI 1 "mcore_arith_reg_operand" "")) | |
2849 (set (match_dup 0) | |
2850 (and:SI (match_dup 0) | |
2851 (match_operand:SI 2 "mcore_literal_K_operand" ""))) | |
2852 (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))] | |
2853 "mcore_is_dead (insn, operands[0])" | |
2854 "movi %0,%2\;tst %1,%0") | |
2855 | |
2856 (define_peephole | |
2857 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2858 (if_then_else:SI (ne (zero_extract:SI | |
2859 (match_operand:SI 1 "mcore_arith_reg_operand" "") | |
2860 (const_int 1) | |
2861 (match_operand:SI 2 "mcore_literal_K_operand" "")) | |
2862 (const_int 0)) | |
2863 (match_operand:SI 3 "mcore_arith_imm_operand" "") | |
2864 (match_operand:SI 4 "mcore_arith_imm_operand" ""))) | |
2865 (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))] | |
2866 "" | |
2867 "* | |
2868 { | |
2869 unsigned int op0 = REGNO (operands[0]); | |
2870 | |
2871 if (GET_CODE (operands[3]) == REG) | |
2872 { | |
2873 if (REGNO (operands[3]) == op0 && GET_CODE (operands[4]) == CONST_INT | |
2874 && INTVAL (operands[4]) == 0) | |
2875 return \"btsti %1,%2\\n\\tclrf %0\"; | |
2876 else if (GET_CODE (operands[4]) == REG) | |
2877 { | |
2878 if (REGNO (operands[4]) == op0) | |
2879 return \"btsti %1,%2\\n\\tmovf %0,%3\"; | |
2880 else if (REGNO (operands[3]) == op0) | |
2881 return \"btsti %1,%2\\n\\tmovt %0,%4\"; | |
2882 } | |
2883 | |
2884 gcc_unreachable (); | |
2885 } | |
2886 else if (GET_CODE (operands[3]) == CONST_INT | |
2887 && INTVAL (operands[3]) == 0 | |
2888 && GET_CODE (operands[4]) == REG) | |
2889 return \"btsti %1,%2\\n\\tclrt %0\"; | |
2890 | |
2891 gcc_unreachable (); | |
2892 }") | |
2893 | |
2894 ; experimental - do the constant folding ourselves. note that this isn't | |
2895 ; re-applied like we'd really want. i.e., four ands collapse into two | |
2896 ; instead of one. this is because peepholes are applied as a sliding | |
2897 ; window. the peephole does not generate new rtl's, but instead slides | |
2898 ; across the rtl's generating machine instructions. it would be nice | |
2899 ; if the peephole optimizer is changed to re-apply patterns and to gen | |
2900 ; new rtl's. this is more flexible. the pattern below helps when we're | |
2901 ; not using relaxed immediates. BRC | |
2902 | |
2903 ;(define_peephole | |
2904 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "") | |
2905 ; (match_operand:SI 1 "const_int_operand" "")) | |
2906 ; (set (match_operand:SI 2 "mcore_arith_reg_operand" "") | |
2907 ; (and:SI (match_dup 2) (match_dup 0))) | |
2908 ; (set (match_dup 0) | |
2909 ; (match_operand:SI 3 "const_int_operand" "")) | |
2910 ; (set (match_dup 2) | |
2911 ; (and:SI (match_dup 2) (match_dup 0)))] | |
2912 ; "!TARGET_RELAX_IMM && mcore_is_dead (insn, operands[0]) && | |
2913 ; mcore_const_ok_for_inline (INTVAL (operands[1]) & INTVAL (operands[3]))" | |
2914 ; "* | |
2915 ;{ | |
2916 ; rtx out_operands[2]; | |
2917 ; out_operands[0] = operands[0]; | |
2918 ; out_operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[3])); | |
2919 ; | |
2920 ; output_inline_const (SImode, out_operands); | |
2921 ; | |
2922 ; output_asm_insn (\"and %2,%0\", operands); | |
2923 ; | |
2924 ; return \"\"; | |
2925 ;}") | |
2926 | |
2927 ; BRC: for inlining get rid of extra test - experimental | |
2928 ;(define_peephole | |
2929 ; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r") | |
2930 ; (ne:SI (reg:CC 17) (const_int 0))) | |
2931 ; (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0))) | |
2932 ; (set (pc) | |
2933 ; (if_then_else (eq (reg:CC 17) (const_int 0)) | |
2934 ; (label_ref (match_operand 1 "" "")) | |
2935 ; (pc)))] | |
2936 ; "" | |
2937 ; "* | |
2938 ;{ | |
2939 ; if (get_attr_length (insn) == 10) | |
2940 ; { | |
2941 ; output_asm_insn (\"bt 2f\\n\\tjmpi [1f]\", operands); | |
2942 ; output_asm_insn (\".align 2\\n1:\", operands); | |
2943 ; output_asm_insn (\".long %1\\n2:\", operands); | |
2944 ; return \"\"; | |
2945 ; } | |
2946 ; return \"bf %l1\"; | |
2947 ;}") | |
2948 | |
2949 | |
2950 ;;; Special patterns for dealing with the constant pool. | |
2951 | |
2952 ;;; 4 byte integer in line. | |
2953 | |
2954 (define_insn "consttable_4" | |
2955 [(unspec_volatile [(match_operand:SI 0 "general_operand" "=g")] 0)] | |
2956 "" | |
2957 "* | |
2958 { | |
2959 assemble_integer (operands[0], 4, BITS_PER_WORD, 1); | |
2960 return \"\"; | |
2961 }" | |
2962 [(set_attr "length" "4")]) | |
2963 | |
2964 ;;; align to a four byte boundary. | |
2965 | |
2966 (define_insn "align_4" | |
2967 [(unspec_volatile [(const_int 0)] 1)] | |
2968 "" | |
2969 ".align 2") | |
2970 | |
2971 ;;; Handle extra constant pool entries created during final pass. | |
2972 | |
2973 (define_insn "consttable_end" | |
2974 [(unspec_volatile [(const_int 0)] 2)] | |
2975 "" | |
2976 "* return mcore_output_jump_label_table ();") | |
2977 | |
2978 ;; | |
2979 ;; Stack allocation -- in particular, for alloca(). | |
2980 ;; this is *not* what we use for entry into functions. | |
2981 ;; | |
2982 ;; This is how we allocate stack space. If we are allocating a | |
2983 ;; constant amount of space and we know it is less than 4096 | |
2984 ;; bytes, we need do nothing. | |
2985 ;; | |
2986 ;; If it is more than 4096 bytes, we need to probe the stack | |
2987 ;; periodically. | |
2988 ;; | |
2989 ;; operands[1], the distance is a POSITIVE number indicating that we | |
2990 ;; are allocating stack space | |
2991 ;; | |
2992 (define_expand "allocate_stack" | |
2993 [(set (reg:SI 0) | |
2994 (plus:SI (reg:SI 0) | |
2995 (match_operand:SI 1 "general_operand" ""))) | |
2996 (set (match_operand:SI 0 "register_operand" "=r") | |
2997 (match_dup 2))] | |
2998 "" | |
2999 " | |
3000 { | |
3001 /* If he wants no probing, just do it for him. */ | |
3002 if (mcore_stack_increment == 0) | |
3003 { | |
3004 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1])); | |
3005 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); | |
3006 DONE; | |
3007 } | |
3008 | |
3009 /* For small constant growth, we unroll the code. */ | |
3010 if (GET_CODE (operands[1]) == CONST_INT | |
3011 && INTVAL (operands[1]) < 8 * STACK_UNITS_MAXSTEP) | |
3012 { | |
3013 HOST_WIDE_INT left = INTVAL(operands[1]); | |
3014 | |
3015 /* If it's a long way, get close enough for a last shot. */ | |
3016 if (left >= STACK_UNITS_MAXSTEP) | |
3017 { | |
3018 rtx tmp = gen_reg_rtx (Pmode); | |
3019 emit_insn (gen_movsi (tmp, GEN_INT (STACK_UNITS_MAXSTEP))); | |
3020 do | |
3021 { | |
3022 rtx memref = gen_rtx_MEM (SImode, stack_pointer_rtx); | |
3023 | |
3024 MEM_VOLATILE_P (memref) = 1; | |
3025 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); | |
3026 emit_insn (gen_movsi (memref, stack_pointer_rtx)); | |
3027 left -= STACK_UNITS_MAXSTEP; | |
3028 } | |
3029 while (left > STACK_UNITS_MAXSTEP); | |
3030 } | |
3031 /* Perform the final adjustment. */ | |
3032 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-left))); | |
3033 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); | |
3034 DONE; | |
3035 } | |
3036 else | |
3037 { | |
3038 rtx out_label = 0; | |
3039 rtx loop_label = gen_label_rtx (); | |
3040 rtx step = gen_reg_rtx (Pmode); | |
3041 rtx tmp = gen_reg_rtx (Pmode); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3042 rtx test, memref; |
0 | 3043 |
3044 #if 1 | |
3045 emit_insn (gen_movsi (tmp, operands[1])); | |
3046 emit_insn (gen_movsi (step, GEN_INT (STACK_UNITS_MAXSTEP))); | |
3047 | |
3048 if (GET_CODE (operands[1]) != CONST_INT) | |
3049 { | |
3050 out_label = gen_label_rtx (); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3051 test = gen_rtx_GEU (VOIDmode, step, tmp); /* quick out */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3052 emit_jump_insn (gen_cbranchsi4 (test, step, tmp, out_label)); |
0 | 3053 } |
3054 | |
3055 /* Run a loop that steps it incrementally. */ | |
3056 emit_label (loop_label); | |
3057 | |
3058 /* Extend a step, probe, and adjust remaining count. */ | |
3059 emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step)); | |
3060 memref = gen_rtx_MEM (SImode, stack_pointer_rtx); | |
3061 MEM_VOLATILE_P (memref) = 1; | |
3062 emit_insn(gen_movsi(memref, stack_pointer_rtx)); | |
3063 emit_insn(gen_subsi3(tmp, tmp, step)); | |
3064 | |
3065 /* Loop condition -- going back up. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3066 test = gen_rtx_LTU (VOIDmode, step, tmp); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
3067 emit_jump_insn (gen_cbranchsi4 (test, step, tmp, loop_label)); |
0 | 3068 |
3069 if (out_label) | |
3070 emit_label (out_label); | |
3071 | |
3072 /* Bump the residual. */ | |
3073 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); | |
3074 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); | |
3075 DONE; | |
3076 #else | |
3077 /* simple one-shot -- ensure register and do a subtract. | |
3078 * This does NOT comply with the ABI. */ | |
3079 emit_insn (gen_movsi (tmp, operands[1])); | |
3080 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); | |
3081 ;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx); | |
3082 DONE; | |
3083 #endif | |
3084 } | |
3085 }") |