Mercurial > hg > CbC > CbC_gcc
comparison gcc/config/sh/sync.md @ 111:04ced10e8804
gcc 7
author | kono |
---|---|
date | Fri, 27 Oct 2017 22:46:09 +0900 |
parents | |
children | 84e7813d76e9 |
comparison
equal
deleted
inserted
replaced
68:561a7518be6b | 111:04ced10e8804 |
---|---|
1 ;; GCC machine description for SH synchronization instructions. | |
2 ;; Copyright (C) 2011-2017 Free Software Foundation, Inc. | |
3 ;; | |
4 ;; This file is part of GCC. | |
5 ;; | |
6 ;; GCC is free software; you can redistribute it and/or modify | |
7 ;; it under the terms of the GNU General Public License as published by | |
8 ;; the Free Software Foundation; either version 3, or (at your option) | |
9 ;; any later version. | |
10 ;; | |
11 ;; GCC is distributed in the hope that it will be useful, | |
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 ;; GNU General Public License for more details. | |
15 ;; | |
16 ;; You should have received a copy of the GNU General Public License | |
17 ;; along with GCC; see the file COPYING3. If not see | |
18 ;; <http://www.gnu.org/licenses/>. | |
19 ;; | |
20 ;; | |
21 ;; Atomic integer operations for the Renesas / SuperH SH CPUs. | |
22 ;; | |
23 ;; On SH CPUs atomic integer operations can be done either in 'software' or | |
24 ;; in 'hardware' in various styles. True hardware support was introduced | |
25 ;; with the SH4A. Some SH2A dual-core models (e.g. SH7205) also come with | |
26 ;; 'semaphore' hardware registers, but these are currently unsupported. | |
27 ;; All SH CPUs support the 'tas.b' instruction, which can be optionally used | |
28 ;; to implement the 'atomic_test_and_set' builtin. | |
29 ;; The following atomic options and models are supported. | |
30 ;; | |
31 ;; tas.b atomic_test_and_set (-mtas) | |
32 ;; | |
33 ;; Depending on the particular hardware configuration, usage of the 'tas.b' | |
34 ;; instruction might be undesired or even unsafe. Thus, it has to be | |
35 ;; enabled by the user explicitly. If it is not enabled, the | |
36 ;; 'atomic_test_and_set' builtin is implemented either with hardware or with | |
37 ;; software atomics, depending on which is enabled. It is also possible to | |
38 ;; enable the 'tas.b' instruction only, without enabling support for the | |
39 ;; other atomic operations. | |
40 ;; | |
41 ;; | |
42 ;; Hardware Atomics (-matomic-model=hard-llcs; SH4A only) | |
43 ;; | |
44 ;; Hardware atomics implement all atomic operations using the 'movli.l' and | |
45 ;; 'movco.l' instructions that are availble on SH4A. On multi-core hardware | |
46 ;; configurations hardware atomics is the only safe mode. | |
47 ;; However, it can also be safely used on single-core configurations. | |
48 ;; Since these instructions operate on SImode memory only, QImode and HImode | |
49 ;; have to be emulated with SImode and subreg masking, which results in | |
50 ;; larger code. | |
51 ;; | |
52 ;; | |
53 ;; gUSA Software Atomics (-matomic-model=soft-gusa; SH3*, SH4* only) | |
54 ;; | |
55 ;; On single-core systems there can only be one execution context running | |
56 ;; at a given point in time. This allows the usage of rewindable atomic | |
57 ;; sequences, which effectively emulate locked-load / conditional-store | |
58 ;; operations. This requires complementary support in the interrupt / | |
59 ;; exception handling code (e.g. kernel) and does not work safely on multi- | |
60 ;; core configurations. | |
61 ;; | |
62 ;; When an execution context is interrupted while it is an atomic | |
63 ;; sequence, the interrupted context's PC is rewound to the beginning of | |
64 ;; the atomic sequence by the interrupt / exception handling code, before | |
65 ;; transferring control to another execution context. This is done by | |
66 ;; something like... | |
67 ;; | |
68 ;; if (interrupted_context_in_atomic_sequence | |
69 ;; && interrupted_pc < atomic_exitpoint) | |
70 ;; interrupted_pc = atomic_entrypoint; | |
71 ;; | |
72 ;; This method is also known as gUSA ("g" User Space Atomicity) and the | |
73 ;; Linux kernel for SH3/SH4 implements support for such software atomic | |
74 ;; sequences. It can also be implemented in freestanding environments. | |
75 ;; | |
76 ;; For this the following atomic sequence ABI is used. | |
77 ;; | |
78 ;; r15 >= 0: Execution context is not in an atomic sequence. | |
79 ;; | |
80 ;; r15 < 0: Execution context is in an atomic sequence and r15 | |
81 ;; holds the negative byte length of the atomic sequence. | |
82 ;; In this case the following applies: | |
83 ;; | |
84 ;; r0: PC of the first instruction after the atomic | |
85 ;; write-back instruction (exit point). | |
86 ;; The entry point PC of the atomic sequence can be | |
87 ;; determined by doing r0 + r15. | |
88 ;; | |
89 ;; r1: Saved r15 stack pointer before entering the | |
90 ;; atomic sequence. | |
91 ;; | |
92 ;; An example atomic add sequence would look like: | |
93 ;; | |
94 ;; mova .Lend,r0 ! .Lend must be 4-byte aligned. | |
95 ;; mov r15,r1 | |
96 ;; .align 2 ! Insert aligning nop if needed. | |
97 ;; mov #(.Lstart - .Lend),r15 ! Enter atomic sequence | |
98 ;;.Lstart: | |
99 ;; mov.l @r4,r2 ! read value | |
100 ;; add r2,r5 ! modify value | |
101 ;; mov.l r5,@r4 ! write-back | |
102 ;;.Lend: | |
103 ;; mov r1,r15 ! Exit atomic sequence | |
104 ;; ! r2 holds the previous value. | |
105 ;; ! r5 holds the new value. | |
106 ;; | |
107 ;; Notice that due to the restrictions of the mova instruction, the .Lend | |
108 ;; label must always be 4-byte aligned. Aligning the .Lend label would | |
109 ;; potentially insert a nop after the write-back instruction which could | |
110 ;; make the sequence to be rewound, although it has already passed the | |
111 ;; write-back instruction. This would make it execute twice. | |
112 ;; For correct operation the atomic sequences must not be rewound after | |
113 ;; they have passed the write-back instruction. | |
114 ;; | |
115 ;; This is model works only on SH3* and SH4* because the stack pointer (r15) | |
116 ;; is set to an invalid pointer temporarily. SH1* and SH2* CPUs will try | |
117 ;; to push SR and PC registers on the stack when an interrupt / exception | |
118 ;; occurs, and thus require the stack pointer (r15) always to be valid. | |
119 ;; | |
120 ;; | |
121 ;; TCB Software Atomics (-matomic-model=soft-tcb) | |
122 ;; | |
123 ;; This model is a variation of the gUSA model. The concept of rewindable | |
124 ;; atomic sequences is the same, but it does not use the stack pointer (r15) | |
125 ;; for signaling the 'is in atomic sequence' condition. Instead, a variable | |
126 ;; in the thread control block (TCB) is set to hold the exit point of the | |
127 ;; atomic sequence. This assumes that the GBR is used as a thread pointer | |
128 ;; register. The offset of the variable in the TCB to be used must be | |
129 ;; specified with an additional option 'gbr-offset', such as: | |
130 ;; -matomic-model=soft-tcb,gbr-offset=4 | |
131 ;; | |
132 ;; For this model the following atomic sequence ABI is used. | |
133 ;; | |
134 ;; @(#x,gbr) == 0: Execution context is not in an atomic sequence. | |
135 ;; | |
136 ;; @(#x,gbr) != 0: Execution context is in an atomic sequence. In this | |
137 ;; case the following applies: | |
138 ;; | |
139 ;; @(#x,gbr): PC of the first instruction after the atomic | |
140 ;; write-back instruction (exit point). | |
141 ;; | |
142 ;; r1: Negative byte length of the atomic sequence. | |
143 ;; The entry point PC of the sequence can be | |
144 ;; determined by doing @(#x,gbr) + r1 | |
145 ;; | |
146 ;; Note: #x is the user specified gbr-offset. | |
147 ;; | |
148 ;; | |
149 ;; Interrupt-Flipping Software Atomics (-matomic-model=soft-imask) | |
150 ;; | |
151 ;; This model achieves atomicity by temporarily disabling interrupts for | |
152 ;; the duration of the atomic sequence. This works only when the program | |
153 ;; runs in privileged mode but does not require any support from the | |
154 ;; interrupt / exception handling code. There is no particular ABI. | |
155 ;; To disable interrupts the SR.IMASK bits are set to '1111'. | |
156 ;; This method is not as efficient as the other software atomic models, | |
157 ;; since loading and storing SR (in order to flip interrupts on / off) | |
158 ;; requires using multi-cycle instructions. Moreover, it can potentially | |
159 ;; increase the interrupt latency which might be important for hard-realtime | |
160 ;; applications. | |
161 ;; | |
162 ;; | |
163 ;; Compatibility Notes | |
164 ;; | |
165 ;; On single-core SH4A CPUs software atomic aware interrupt / exception code | |
166 ;; is actually compatible with user code that utilizes hardware atomics. | |
167 ;; Since SImode hardware atomic sequences are more compact on SH4A they are | |
168 ;; always used, regardless of the selected atomic model. This atomic model | |
169 ;; mixing can be disabled by setting the 'strict' flag, like: | |
170 ;; -matomic-model=soft-gusa,strict | |
171 ;; | |
172 ;; The software atomic models are generally compatible with each other, | |
173 ;; but the interrupt / exception handling code has to support both gUSA and | |
174 ;; TCB models. | |
175 ;; | |
176 ;; The current atomic support is limited to QImode, HImode and SImode | |
177 ;; atomic operations. DImode operations could also be implemented but | |
178 ;; would require some ABI modifications to support multiple-instruction | |
179 ;; write-back. This is because SH1/SH2/SH3/SH4 does not have a DImode | |
180 ;; store instruction. DImode stores must be split into two SImode stores. | |
181 | |
182 (define_c_enum "unspec" [ | |
183 UNSPEC_ATOMIC | |
184 ]) | |
185 | |
186 (define_c_enum "unspecv" [ | |
187 UNSPECV_CMPXCHG_1 | |
188 UNSPECV_CMPXCHG_2 | |
189 UNSPECV_CMPXCHG_3 | |
190 ]) | |
191 | |
192 (define_mode_attr i124extend_insn [(QI "exts.b") (HI "exts.w") (SI "mov")]) | |
193 | |
194 (define_code_iterator FETCHOP [plus minus ior xor and]) | |
195 (define_code_attr fetchop_name | |
196 [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")]) | |
197 | |
198 ;;------------------------------------------------------------------------------ | |
199 ;; comapre and swap | |
200 | |
201 ;; Only the hard_llcs SImode patterns can use an I08 for the comparison | |
202 ;; or for the new swapped in value. | |
203 (define_predicate "atomic_arith_operand_0" | |
204 (and (match_code "subreg,reg,const_int") | |
205 (ior (match_operand 0 "arith_reg_operand") | |
206 (and (match_test "satisfies_constraint_I08 (op)") | |
207 (match_test "mode == SImode") | |
208 (ior (match_test "TARGET_ATOMIC_HARD_LLCS") | |
209 (match_test "TARGET_ATOMIC_ANY && TARGET_SH4A | |
210 && !TARGET_ATOMIC_STRICT")))))) | |
211 | |
212 ;; Displacement addressing can be used for all SImode atomic patterns, except | |
213 ;; llcs. | |
214 (define_predicate "atomic_mem_operand_0" | |
215 (and (match_code "mem") | |
216 (ior (match_operand 0 "simple_mem_operand") | |
217 (and (match_test "mode == SImode") | |
218 (and (match_test "!TARGET_ATOMIC_HARD_LLCS") | |
219 (match_test "!TARGET_SH4A || TARGET_ATOMIC_STRICT")) | |
220 (match_operand 0 "short_displacement_mem_operand"))))) | |
221 | |
222 (define_expand "atomic_compare_and_swap<mode>" | |
223 [(match_operand:SI 0 "arith_reg_dest") ;; bool success output | |
224 (match_operand:QIHISI 1 "arith_reg_dest") ;; oldval output | |
225 (match_operand:QIHISI 2 "atomic_mem_operand_0") ;; memory | |
226 (match_operand:QIHISI 3 "atomic_arith_operand_0") ;; expected input | |
227 (match_operand:QIHISI 4 "atomic_arith_operand_0") ;; newval input | |
228 (match_operand:SI 5 "const_int_operand") ;; is_weak | |
229 (match_operand:SI 6 "const_int_operand") ;; success model | |
230 (match_operand:SI 7 "const_int_operand")] ;; failure model | |
231 "TARGET_ATOMIC_ANY" | |
232 { | |
233 rtx mem = operands[2]; | |
234 rtx old_val = gen_lowpart (SImode, operands[1]); | |
235 rtx exp_val = operands[3]; | |
236 rtx new_val = operands[4]; | |
237 rtx atomic_insn; | |
238 | |
239 if (TARGET_ATOMIC_HARD_LLCS | |
240 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT)) | |
241 atomic_insn = gen_atomic_compare_and_swap<mode>_hard (old_val, mem, | |
242 exp_val, new_val); | |
243 else if (TARGET_ATOMIC_SOFT_GUSA) | |
244 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_gusa (old_val, mem, | |
245 exp_val, new_val); | |
246 else if (TARGET_ATOMIC_SOFT_TCB) | |
247 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_tcb (old_val, mem, | |
248 exp_val, new_val, TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX); | |
249 else if (TARGET_ATOMIC_SOFT_IMASK) | |
250 atomic_insn = gen_atomic_compare_and_swap<mode>_soft_imask (old_val, mem, | |
251 exp_val, new_val); | |
252 else | |
253 FAIL; | |
254 | |
255 emit_insn (atomic_insn); | |
256 | |
257 if (<MODE>mode == QImode) | |
258 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[1]), | |
259 operands[1])); | |
260 else if (<MODE>mode == HImode) | |
261 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[1]), | |
262 operands[1])); | |
263 emit_insn (gen_movsi (operands[0], gen_rtx_REG (SImode, T_REG))); | |
264 DONE; | |
265 }) | |
266 | |
267 (define_insn_and_split "atomic_compare_and_swapsi_hard" | |
268 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
269 (unspec_volatile:SI | |
270 [(match_operand:SI 1 "atomic_mem_operand_0" "=Sra") | |
271 (match_operand:SI 2 "arith_operand" "rI08") | |
272 (match_operand:SI 3 "arith_operand" "rI08")] | |
273 UNSPECV_CMPXCHG_1)) | |
274 (set (match_dup 1) | |
275 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_2)) | |
276 (set (reg:SI T_REG) | |
277 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3)) | |
278 (clobber (reg:SI R0_REG))] | |
279 "TARGET_ATOMIC_HARD_LLCS | |
280 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
281 { | |
282 return "\r0: movli.l %1,r0" "\n" | |
283 " cmp/eq %2,r0" "\n" | |
284 " bf{.|/}s 0f" "\n" | |
285 " mov r0,%0" "\n" | |
286 " mov %3,r0" "\n" | |
287 " movco.l r0,%1" "\n" | |
288 " bf 0b" "\n" | |
289 "0:"; | |
290 } | |
291 "&& can_create_pseudo_p () && !satisfies_constraint_I08 (operands[2])" | |
292 [(const_int 0)] | |
293 { | |
294 /* FIXME: Sometimes the 'expected value' operand is not propagated as | |
295 immediate value. See PR 64974. */ | |
296 set_of_reg op2 = sh_find_set_of_reg (operands[2], curr_insn, | |
297 prev_nonnote_insn_bb); | |
298 if (op2.set_src != NULL && satisfies_constraint_I08 (op2.set_src)) | |
299 { | |
300 rtx* r = &XVECEXP (XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1), 0, 1); | |
301 validate_change (curr_insn, r, op2.set_src, false); | |
302 DONE; | |
303 } | |
304 else | |
305 FAIL; | |
306 } | |
307 [(set_attr "length" "14")]) | |
308 | |
309 ;; The QIHImode llcs patterns modify the address register of the memory | |
310 ;; operand. In order to express that, we have to open code the memory | |
311 ;; operand. Initially the insn is expanded like every other atomic insn | |
312 ;; using the memory operand. In split1 the insn is converted and the | |
313 ;; memory operand's address register is exposed. | |
314 (define_insn_and_split "atomic_compare_and_swap<mode>_hard" | |
315 [(set (match_operand:SI 0 "arith_reg_dest") | |
316 (unspec_volatile:SI | |
317 [(match_operand:QIHI 1 "atomic_mem_operand_0") | |
318 (match_operand:QIHI 2 "arith_reg_operand") | |
319 (match_operand:QIHI 3 "arith_reg_operand")] | |
320 UNSPECV_CMPXCHG_1)) | |
321 (set (match_dup 1) | |
322 (unspec_volatile:QIHI [(const_int 0)] UNSPECV_CMPXCHG_2)) | |
323 (set (reg:SI T_REG) | |
324 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3)) | |
325 (clobber (reg:SI R0_REG))] | |
326 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
327 "#" | |
328 "&& 1" | |
329 [(const_int 0)] | |
330 { | |
331 rtx i = gen_atomic_compare_and_swap<mode>_hard_1 ( | |
332 operands[0], XEXP (operands[1], 0), operands[2], operands[3]); | |
333 | |
334 /* Replace the new mems in the new insn with the old mem to preserve | |
335 aliasing info. */ | |
336 XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0) = operands[1]; | |
337 XEXP (XVECEXP (i, 0, 1), 0) = operands[1]; | |
338 emit_insn (i); | |
339 }) | |
340 | |
341 (define_insn "atomic_compare_and_swap<mode>_hard_1" | |
342 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
343 (unspec_volatile:SI | |
344 [(mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")) | |
345 (match_operand:QIHI 2 "arith_reg_operand" "r") | |
346 (match_operand:QIHI 3 "arith_reg_operand" "r")] | |
347 UNSPECV_CMPXCHG_1)) | |
348 (set (mem:QIHI (match_dup 1)) | |
349 (unspec_volatile:QIHI [(const_int 0)] UNSPECV_CMPXCHG_2)) | |
350 (set (reg:SI T_REG) | |
351 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3)) | |
352 (clobber (reg:SI R0_REG)) | |
353 (clobber (match_scratch:SI 4 "=&r")) | |
354 (clobber (match_scratch:SI 5 "=&r")) | |
355 (clobber (match_scratch:SI 6 "=1"))] | |
356 "TARGET_ATOMIC_HARD_LLCS" | |
357 { | |
358 return "\r mov #-4,%5" "\n" | |
359 " <i124extend_insn> %2,%4" "\n" | |
360 " and %1,%5" "\n" | |
361 " xor %5,%1" "\n" | |
362 " add r15,%1" "\n" | |
363 " add #-4,%1" "\n" | |
364 "0: movli.l @%5,r0" "\n" | |
365 " mov.l r0,@-r15" "\n" | |
366 " mov.<bw> @%1,%0" "\n" | |
367 " mov.<bw> %3,@%1" "\n" | |
368 " cmp/eq %4,%0" "\n" | |
369 " bf{.|/}s 0f" "\n" | |
370 " mov.l @r15+,r0" "\n" | |
371 " movco.l r0,@%5" "\n" | |
372 " bf 0b" "\n" | |
373 "0:"; | |
374 } | |
375 [(set_attr "length" "30")]) | |
376 | |
377 (define_insn "atomic_compare_and_swap<mode>_soft_gusa" | |
378 [(set (match_operand:SI 0 "arith_reg_dest" "=&u") | |
379 (unspec_volatile:SI | |
380 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=AraAdd") | |
381 (match_operand:QIHISI 2 "arith_reg_operand" "u") | |
382 (match_operand:QIHISI 3 "arith_reg_operand" "u")] | |
383 UNSPECV_CMPXCHG_1)) | |
384 (set (match_dup 1) | |
385 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2)) | |
386 (set (reg:SI T_REG) | |
387 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3)) | |
388 (clobber (match_scratch:SI 4 "=&u")) | |
389 (clobber (reg:SI R0_REG)) | |
390 (clobber (reg:SI R1_REG))] | |
391 "TARGET_ATOMIC_SOFT_GUSA" | |
392 { | |
393 return "\r mova 1f,r0" "\n" | |
394 " <i124extend_insn> %2,%4" "\n" | |
395 " .align 2" "\n" | |
396 " mov r15,r1" "\n" | |
397 " mov #(0f-1f),r15" "\n" | |
398 "0: mov.<bwl> %1,%0" "\n" | |
399 " cmp/eq %0,%4" "\n" | |
400 " bf 1f" "\n" | |
401 " mov.<bwl> %3,%1" "\n" | |
402 "1: mov r1,r15"; | |
403 } | |
404 [(set_attr "length" "20")]) | |
405 | |
406 (define_insn "atomic_compare_and_swap<mode>_soft_tcb" | |
407 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
408 (unspec_volatile:SI | |
409 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd") | |
410 (match_operand:QIHISI 2 "arith_reg_operand" "r") | |
411 (match_operand:QIHISI 3 "arith_reg_operand" "r")] | |
412 UNSPECV_CMPXCHG_1)) | |
413 (set (match_dup 1) | |
414 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2)) | |
415 (set (reg:SI T_REG) | |
416 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3)) | |
417 (use (match_operand:SI 4 "gbr_displacement")) | |
418 (clobber (match_scratch:SI 5 "=&r")) | |
419 (clobber (reg:SI R0_REG)) | |
420 (clobber (reg:SI R1_REG))] | |
421 "TARGET_ATOMIC_SOFT_TCB" | |
422 { | |
423 return "\r mova 1f,r0" "\n" | |
424 " .align 2" "\n" | |
425 " <i124extend_insn> %2,%5" "\n" | |
426 " mov #(0f-1f),r1" "\n" | |
427 " mov.l r0,@(%O4,gbr)" "\n" | |
428 "0: mov.<bwl> %1,%0" "\n" | |
429 " mov #0,r0" "\n" | |
430 " cmp/eq %0,%5" "\n" | |
431 " bf 1f" "\n" | |
432 " mov.<bwl> %3,%1" "\n" | |
433 "1: mov.l r0,@(%O4,gbr)"; | |
434 } | |
435 [(set_attr "length" "22")]) | |
436 | |
437 (define_insn "atomic_compare_and_swap<mode>_soft_imask" | |
438 [(set (match_operand:SI 0 "arith_reg_dest" "=&z") | |
439 (unspec_volatile:SI | |
440 [(match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd") | |
441 (match_operand:QIHISI 2 "arith_reg_operand" "r") | |
442 (match_operand:QIHISI 3 "arith_reg_operand" "r")] | |
443 UNSPECV_CMPXCHG_1)) | |
444 (set (match_dup 1) | |
445 (unspec_volatile:QIHISI [(const_int 0)] UNSPECV_CMPXCHG_2)) | |
446 (set (reg:SI T_REG) | |
447 (unspec_volatile:SI [(const_int 0)] UNSPECV_CMPXCHG_3)) | |
448 (clobber (match_scratch:SI 4 "=&r")) | |
449 (clobber (match_scratch:SI 5 "=&r"))] | |
450 "TARGET_ATOMIC_SOFT_IMASK" | |
451 { | |
452 /* The comparison result is supposed to be in T_REG. | |
453 Notice that restoring SR will overwrite the T_REG. We handle this by | |
454 rotating the T_REG into the saved SR before restoring SR. On SH2A we | |
455 can do one insn shorter by using the bst insn. */ | |
456 if (!TARGET_SH2A) | |
457 return "\r stc sr,%0" "\n" | |
458 " <i124extend_insn> %2,%4" "\n" | |
459 " mov %0,%5" "\n" | |
460 " or #0xF0,%0" "\n" | |
461 " shlr %5" "\n" | |
462 " ldc %0,sr" "\n" | |
463 " mov.<bwl> %1,%0" "\n" | |
464 " cmp/eq %4,%0" "\n" | |
465 " bf 1f" "\n" | |
466 " mov.<bwl> %3,%1" "\n" | |
467 "1: rotcl %5" "\n" | |
468 " ldc %5,sr"; | |
469 else | |
470 return "\r stc sr,%0" "\n" | |
471 " <i124extend_insn> %2,%4" "\n" | |
472 " mov %0,%5" "\n" | |
473 " or #0xF0,%0" "\n" | |
474 " ldc %0,sr" "\n" | |
475 " mov.<bwl> %1,%0" "\n" | |
476 " cmp/eq %4,%0" "\n" | |
477 " bst #0,%5" "\n" | |
478 " bf 1f" "\n" | |
479 " mov.<bwl> %3,%1" "\n" | |
480 "1: ldc %5,sr"; | |
481 } | |
482 [(set (attr "length") (if_then_else (match_test "!TARGET_SH2A") | |
483 (const_string "24") | |
484 (const_string "22")))]) | |
485 | |
486 ;;------------------------------------------------------------------------------ | |
487 ;; read - write - return old value | |
488 | |
489 (define_expand "atomic_exchange<mode>" | |
490 [(match_operand:QIHISI 0 "arith_reg_dest") ;; oldval output | |
491 (match_operand:QIHISI 1 "atomic_mem_operand_0") ;; memory | |
492 (match_operand:QIHISI 2 "atomic_arith_operand_0") ;; newval input | |
493 (match_operand:SI 3 "const_int_operand")] ;; memory model | |
494 "TARGET_ATOMIC_ANY" | |
495 { | |
496 rtx mem = operands[1]; | |
497 rtx val = operands[2]; | |
498 rtx atomic_insn; | |
499 | |
500 if (TARGET_ATOMIC_HARD_LLCS | |
501 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT)) | |
502 atomic_insn = gen_atomic_exchange<mode>_hard (operands[0], mem, val); | |
503 else if (TARGET_ATOMIC_SOFT_GUSA) | |
504 atomic_insn = gen_atomic_exchange<mode>_soft_gusa (operands[0], mem, val); | |
505 else if (TARGET_ATOMIC_SOFT_TCB) | |
506 atomic_insn = gen_atomic_exchange<mode>_soft_tcb (operands[0], mem, val, | |
507 TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX); | |
508 else if (TARGET_ATOMIC_SOFT_IMASK) | |
509 atomic_insn = gen_atomic_exchange<mode>_soft_imask (operands[0], mem, val); | |
510 else | |
511 FAIL; | |
512 | |
513 emit_insn (atomic_insn); | |
514 | |
515 if (<MODE>mode == QImode) | |
516 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]), | |
517 operands[0])); | |
518 else if (<MODE>mode == HImode) | |
519 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]), | |
520 operands[0])); | |
521 DONE; | |
522 }) | |
523 | |
524 (define_insn "atomic_exchangesi_hard" | |
525 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
526 (match_operand:SI 1 "atomic_mem_operand_0" "=Sra")) | |
527 (set (match_dup 1) | |
528 (unspec:SI | |
529 [(match_operand:SI 2 "arith_operand" "rI08")] UNSPEC_ATOMIC)) | |
530 (set (reg:SI T_REG) (const_int 1)) | |
531 (clobber (reg:SI R0_REG))] | |
532 "TARGET_ATOMIC_HARD_LLCS | |
533 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
534 { | |
535 return "\r0: movli.l %1,r0" "\n" | |
536 " mov r0,%0" "\n" | |
537 " mov %2,r0" "\n" | |
538 " movco.l r0,%1" "\n" | |
539 " bf 0b"; | |
540 } | |
541 [(set_attr "length" "10")]) | |
542 | |
543 ;; The QIHImode llcs patterns modify the address register of the memory | |
544 ;; operand. In order to express that, we have to open code the memory | |
545 ;; operand. Initially the insn is expanded like every other atomic insn | |
546 ;; using the memory operand. In split1 the insn is converted and the | |
547 ;; memory operand's address register is exposed. | |
548 (define_insn_and_split "atomic_exchange<mode>_hard" | |
549 [(set (match_operand:QIHI 0 "arith_reg_dest") | |
550 (match_operand:QIHI 1 "atomic_mem_operand_0")) | |
551 (set (match_dup 1) | |
552 (unspec:QIHI | |
553 [(match_operand:QIHI 2 "arith_reg_operand")] UNSPEC_ATOMIC)) | |
554 (set (reg:SI T_REG) (const_int 1)) | |
555 (clobber (reg:SI R0_REG))] | |
556 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
557 "#" | |
558 "&& 1" | |
559 [(const_int 0)] | |
560 { | |
561 rtx i = gen_atomic_exchange<mode>_hard_1 (operands[0], XEXP (operands[1], 0), | |
562 operands[2]); | |
563 | |
564 /* Replace the new mems in the new insn with the old mem to preserve | |
565 aliasing info. */ | |
566 XEXP (XVECEXP (i, 0, 0), 1) = operands[1]; | |
567 XEXP (XVECEXP (i, 0, 1), 0) = operands[1]; | |
568 emit_insn (i); | |
569 }) | |
570 | |
571 (define_insn "atomic_exchange<mode>_hard_1" | |
572 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
573 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))) | |
574 (set (mem:QIHI (match_dup 1)) | |
575 (unspec:QIHI | |
576 [(match_operand:QIHI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC)) | |
577 (set (reg:SI T_REG) (const_int 1)) | |
578 (clobber (reg:SI R0_REG)) | |
579 (clobber (match_scratch:SI 3 "=&r")) | |
580 (clobber (match_scratch:SI 4 "=1"))] | |
581 "TARGET_ATOMIC_HARD_LLCS" | |
582 { | |
583 return "\r mov #-4,%3" "\n" | |
584 " and %1,%3" "\n" | |
585 " xor %3,%1" "\n" | |
586 " add r15,%1" "\n" | |
587 " add #-4,%1" "\n" | |
588 "0: movli.l @%3,r0" "\n" | |
589 " mov.l r0,@-r15" "\n" | |
590 " mov.<bw> @%1,%0" "\n" | |
591 " mov.<bw> %2,@%1" "\n" | |
592 " mov.l @r15+,r0" "\n" | |
593 " movco.l r0,@%3" "\n" | |
594 " bf 0b"; | |
595 } | |
596 [(set_attr "length" "24")]) | |
597 | |
598 (define_insn "atomic_exchange<mode>_soft_gusa" | |
599 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
600 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=AraAdd")) | |
601 (set (match_dup 1) | |
602 (unspec:QIHISI | |
603 [(match_operand:QIHISI 2 "arith_reg_operand" "u")] UNSPEC_ATOMIC)) | |
604 (clobber (reg:SI R0_REG)) | |
605 (clobber (reg:SI R1_REG))] | |
606 "TARGET_ATOMIC_SOFT_GUSA" | |
607 { | |
608 return "\r mova 1f,r0" "\n" | |
609 " .align 2" "\n" | |
610 " mov r15,r1" "\n" | |
611 " mov #(0f-1f),r15" "\n" | |
612 "0: mov.<bwl> %1,%0" "\n" | |
613 " mov.<bwl> %2,%1" "\n" | |
614 "1: mov r1,r15"; | |
615 } | |
616 [(set_attr "length" "14")]) | |
617 | |
618 (define_insn "atomic_exchange<mode>_soft_tcb" | |
619 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
620 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd")) | |
621 (set (match_dup 1) | |
622 (unspec:QIHISI | |
623 [(match_operand:QIHISI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC)) | |
624 (clobber (reg:SI R0_REG)) | |
625 (clobber (reg:SI R1_REG)) | |
626 (use (match_operand:SI 3 "gbr_displacement"))] | |
627 "TARGET_ATOMIC_SOFT_TCB" | |
628 { | |
629 return "\r mova 1f,r0" "\n" | |
630 " mov #(0f-1f),r1" "\n" | |
631 " .align 2" "\n" | |
632 " mov.l r0,@(%O3,gbr)" "\n" | |
633 "0: mov.<bwl> %1,%0" "\n" | |
634 " mov #0,r0" "\n" | |
635 " mov.<bwl> %2,%1" "\n" | |
636 "1: mov.l r0,@(%O3,gbr)"; | |
637 } | |
638 [(set_attr "length" "16")]) | |
639 | |
640 (define_insn "atomic_exchange<mode>_soft_imask" | |
641 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z") | |
642 (match_operand:QIHISI 1 "atomic_mem_operand_0" "=SraSdd")) | |
643 (set (match_dup 1) | |
644 (unspec:QIHISI | |
645 [(match_operand:QIHISI 2 "arith_reg_operand" "r")] UNSPEC_ATOMIC)) | |
646 (clobber (match_scratch:SI 3 "=&r"))] | |
647 "TARGET_ATOMIC_SOFT_IMASK" | |
648 { | |
649 return "\r stc sr,%0" "\n" | |
650 " mov %0,%3" "\n" | |
651 " or #0xF0,%0" "\n" | |
652 " ldc %0,sr" "\n" | |
653 " mov.<bwl> %1,%0" "\n" | |
654 " mov.<bwl> %2,%1" "\n" | |
655 " ldc %3,sr"; | |
656 } | |
657 [(set_attr "length" "14")]) | |
658 | |
659 ;;------------------------------------------------------------------------------ | |
660 ;; read - add|sub|or|and|xor|nand - write - return old value | |
661 | |
662 ;; atomic_arith_operand_1 can be used by any atomic type for a plus op, | |
663 ;; since there's no r0 restriction. | |
664 (define_predicate "atomic_arith_operand_1" | |
665 (and (match_code "subreg,reg,const_int") | |
666 (ior (match_operand 0 "arith_reg_operand") | |
667 (match_test "satisfies_constraint_I08 (op)")))) | |
668 | |
669 ;; atomic_logic_operand_1 can be used by the hard_llcs, tcb and soft_imask | |
670 ;; patterns only due to its r0 restriction. | |
671 (define_predicate "atomic_logical_operand_1" | |
672 (and (match_code "subreg,reg,const_int") | |
673 (ior (match_operand 0 "arith_reg_operand") | |
674 (and (match_test "satisfies_constraint_K08 (op)") | |
675 (ior (match_test "TARGET_ATOMIC_HARD_LLCS") | |
676 (match_test "TARGET_ATOMIC_SOFT_IMASK") | |
677 (match_test "TARGET_ATOMIC_SOFT_TCB") | |
678 (match_test "TARGET_ATOMIC_ANY && TARGET_SH4A | |
679 && mode == SImode | |
680 && !TARGET_ATOMIC_STRICT")))))) | |
681 | |
682 (define_code_attr fetchop_predicate_1 | |
683 [(plus "atomic_arith_operand_1") (minus "arith_reg_operand") | |
684 (ior "atomic_logical_operand_1") (xor "atomic_logical_operand_1") | |
685 (and "atomic_logical_operand_1")]) | |
686 | |
687 (define_code_attr fetchop_constraint_1_llcs | |
688 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")]) | |
689 | |
690 (define_code_attr fetchop_constraint_1_gusa | |
691 [(plus "uI08") (minus "u") (ior "u") (xor "u") (and "u")]) | |
692 | |
693 (define_code_attr fetchop_constraint_1_tcb | |
694 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")]) | |
695 | |
696 (define_code_attr fetchop_constraint_1_imask | |
697 [(plus "rI08") (minus "r") (ior "rK08") (xor "rK08") (and "rK08")]) | |
698 | |
699 ;; Displacement addressing mode (incl. GBR relative) can be used by tcb and | |
700 ;; imask atomic patterns in any mode, since all the patterns use R0 as the | |
701 ;; register operand for memory loads/stores. gusa and llcs patterns can only | |
702 ;; use displacement addressing for SImode. | |
703 (define_predicate "atomic_mem_operand_1" | |
704 (and (match_code "mem") | |
705 (ior (match_operand 0 "simple_mem_operand") | |
706 (and (match_test "mode == SImode") | |
707 (match_test "TARGET_ATOMIC_SOFT_GUSA | |
708 && (!TARGET_SH4A || TARGET_ATOMIC_STRICT)") | |
709 (match_operand 0 "short_displacement_mem_operand")) | |
710 (and (ior (match_test "(TARGET_ATOMIC_SOFT_TCB | |
711 || TARGET_ATOMIC_SOFT_IMASK) | |
712 && (!TARGET_SH4A || TARGET_ATOMIC_STRICT)") | |
713 (match_test "(TARGET_ATOMIC_SOFT_TCB | |
714 || TARGET_ATOMIC_SOFT_IMASK) | |
715 && TARGET_SH4A && !TARGET_ATOMIC_STRICT | |
716 && mode != SImode")) | |
717 (ior (match_operand 0 "short_displacement_mem_operand") | |
718 (match_operand 0 "gbr_address_mem")))))) | |
719 | |
720 (define_expand "atomic_fetch_<fetchop_name><mode>" | |
721 [(set (match_operand:QIHISI 0 "arith_reg_dest") | |
722 (match_operand:QIHISI 1 "atomic_mem_operand_1")) | |
723 (set (match_dup 1) | |
724 (unspec:QIHISI | |
725 [(FETCHOP:QIHISI (match_dup 1) | |
726 (match_operand:QIHISI 2 "<fetchop_predicate_1>"))] | |
727 UNSPEC_ATOMIC)) | |
728 (match_operand:SI 3 "const_int_operand")] | |
729 "TARGET_ATOMIC_ANY" | |
730 { | |
731 rtx mem = operands[1]; | |
732 rtx atomic_insn; | |
733 | |
734 if (TARGET_ATOMIC_HARD_LLCS | |
735 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT)) | |
736 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_hard (operands[0], mem, | |
737 operands[2]); | |
738 else if (TARGET_ATOMIC_SOFT_GUSA) | |
739 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_gusa (operands[0], | |
740 mem, operands[2]); | |
741 else if (TARGET_ATOMIC_SOFT_TCB) | |
742 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_tcb (operands[0], | |
743 mem, operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX); | |
744 else if (TARGET_ATOMIC_SOFT_IMASK) | |
745 atomic_insn = gen_atomic_fetch_<fetchop_name><mode>_soft_imask (operands[0], | |
746 mem, operands[2]); | |
747 else | |
748 FAIL; | |
749 | |
750 emit_insn (atomic_insn); | |
751 | |
752 if (<MODE>mode == QImode) | |
753 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]), | |
754 operands[0])); | |
755 else if (<MODE>mode == HImode) | |
756 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]), | |
757 operands[0])); | |
758 DONE; | |
759 }) | |
760 | |
761 (define_insn_and_split "atomic_fetch_<fetchop_name>si_hard" | |
762 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
763 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")) | |
764 (set (match_dup 1) | |
765 (unspec:SI | |
766 [(FETCHOP:SI (match_dup 1) | |
767 (match_operand:SI 2 "<fetchop_predicate_1>" | |
768 "<fetchop_constraint_1_llcs>"))] | |
769 UNSPEC_ATOMIC)) | |
770 (set (reg:SI T_REG) (const_int 1)) | |
771 (clobber (reg:SI R0_REG))] | |
772 "TARGET_ATOMIC_HARD_LLCS | |
773 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
774 { | |
775 return "\r0: movli.l %1,r0" "\n" | |
776 " mov r0,%0" "\n" | |
777 " <fetchop_name> %2,r0" "\n" | |
778 " movco.l r0,%1" "\n" | |
779 " bf 0b"; | |
780 } | |
781 "&& can_create_pseudo_p () && optimize | |
782 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
783 [(const_int 0)] | |
784 { | |
785 emit_insn (gen_atomic_<fetchop_name>_fetchsi_hard (gen_reg_rtx (SImode), | |
786 operands[1], operands[2])); | |
787 } | |
788 [(set_attr "length" "10")]) | |
789 | |
790 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
791 (define_insn_and_split "atomic_fetch_notsi_hard" | |
792 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
793 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")) | |
794 (set (match_dup 1) | |
795 (unspec:SI [(not:SI (match_dup 1))] UNSPEC_ATOMIC)) | |
796 (set (reg:SI T_REG) (const_int 1)) | |
797 (clobber (reg:SI R0_REG))] | |
798 "TARGET_ATOMIC_HARD_LLCS | |
799 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
800 { | |
801 return "\r0: movli.l %1,r0" "\n" | |
802 " mov r0,%0" "\n" | |
803 " not r0,r0" "\n" | |
804 " movco.l r0,%1" "\n" | |
805 " bf 0b"; | |
806 } | |
807 "&& can_create_pseudo_p () && optimize | |
808 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
809 [(const_int 0)] | |
810 { | |
811 emit_insn (gen_atomic_not_fetchsi_hard (gen_reg_rtx (SImode), operands[1])); | |
812 } | |
813 [(set_attr "length" "10")]) | |
814 | |
815 ;; The QIHImode llcs patterns modify the address register of the memory | |
816 ;; operand. In order to express that, we have to open code the memory | |
817 ;; operand. Initially the insn is expanded like every other atomic insn | |
818 ;; using the memory operand. In split1 the insn is converted and the | |
819 ;; memory operand's address register is exposed. | |
820 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_hard" | |
821 [(set (match_operand:QIHI 0 "arith_reg_dest") | |
822 (match_operand:QIHI 1 "atomic_mem_operand_1")) | |
823 (set (match_dup 1) | |
824 (unspec:QIHI | |
825 [(FETCHOP:QIHI (match_dup 1) | |
826 (match_operand:QIHI 2 "<fetchop_predicate_1>"))] | |
827 UNSPEC_ATOMIC)) | |
828 (set (reg:SI T_REG) (const_int 1)) | |
829 (clobber (reg:SI R0_REG))] | |
830 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
831 "#" | |
832 "&& 1" | |
833 [(const_int 0)] | |
834 { | |
835 if (optimize | |
836 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0]))) | |
837 emit_insn (gen_atomic_<fetchop_name><mode>_hard (operands[1], operands[2])); | |
838 else | |
839 { | |
840 rtx i = gen_atomic_fetch_<fetchop_name><mode>_hard_1 ( | |
841 operands[0], XEXP (operands[1], 0), operands[2]); | |
842 | |
843 /* Replace the new mems in the new insn with the old mem to preserve | |
844 aliasing info. */ | |
845 XEXP (XVECEXP (i, 0, 0), 1) = operands[1]; | |
846 XEXP (XVECEXP (i, 0, 1), 0) = operands[1]; | |
847 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0) = operands[1]; | |
848 emit_insn (i); | |
849 } | |
850 }) | |
851 | |
852 (define_insn "atomic_fetch_<fetchop_name><mode>_hard_1" | |
853 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
854 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))) | |
855 (set (mem:QIHI (match_dup 1)) | |
856 (unspec:QIHI | |
857 [(FETCHOP:QIHI (mem:QIHI (match_dup 1)) | |
858 (match_operand:QIHI 2 "<fetchop_predicate_1>" | |
859 "<fetchop_constraint_1_llcs>"))] | |
860 UNSPEC_ATOMIC)) | |
861 (set (reg:SI T_REG) (const_int 1)) | |
862 (clobber (reg:SI R0_REG)) | |
863 (clobber (match_scratch:SI 3 "=&r")) | |
864 (clobber (match_scratch:SI 4 "=1"))] | |
865 "TARGET_ATOMIC_HARD_LLCS" | |
866 { | |
867 return "\r mov #-4,%3" "\n" | |
868 " and %1,%3" "\n" | |
869 " xor %3,%1" "\n" | |
870 " add r15,%1" "\n" | |
871 " add #-4,%1" "\n" | |
872 "0: movli.l @%3,r0" "\n" | |
873 " mov.l r0,@-r15" "\n" | |
874 " mov.<bw> @%1,r0" "\n" | |
875 " mov r0,%0" "\n" | |
876 " <fetchop_name> %2,r0" "\n" | |
877 " mov.<bw> r0,@%1" "\n" | |
878 " mov.l @r15+,r0" "\n" | |
879 " movco.l r0,@%3" "\n" | |
880 " bf 0b"; | |
881 } | |
882 [(set_attr "length" "28")]) | |
883 | |
884 ;; The QIHImode llcs patterns modify the address register of the memory | |
885 ;; operand. In order to express that, we have to open code the memory | |
886 ;; operand. Initially the insn is expanded like every other atomic insn | |
887 ;; using the memory operand. In split1 the insn is converted and the | |
888 ;; memory operand's address register is exposed. | |
889 (define_insn_and_split "atomic_<fetchop_name><mode>_hard" | |
890 [(set (match_operand:QIHI 0 "atomic_mem_operand_1") | |
891 (unspec:QIHI | |
892 [(FETCHOP:QIHI (match_dup 0) | |
893 (match_operand:QIHI 1 "<fetchop_predicate_1>"))] | |
894 UNSPEC_ATOMIC)) | |
895 (set (reg:SI T_REG) (const_int 1)) | |
896 (clobber (reg:SI R0_REG))] | |
897 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
898 "#" | |
899 "&& 1" | |
900 [(const_int 0)] | |
901 { | |
902 rtx i = gen_atomic_<fetchop_name><mode>_hard_1 (XEXP (operands[0], 0), | |
903 operands[1]); | |
904 /* Replace the new mems in the new insn with the old mem to preserve | |
905 aliasing info. */ | |
906 XEXP (XVECEXP (i, 0, 0), 0) = operands[0]; | |
907 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = operands[0]; | |
908 emit_insn (i); | |
909 }) | |
910 | |
911 (define_insn "atomic_<fetchop_name><mode>_hard_1" | |
912 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r")) | |
913 (unspec:QIHI | |
914 [(FETCHOP:QIHI (mem:QIHI (match_dup 0)) | |
915 (match_operand:QIHI 1 "<fetchop_predicate_1>" | |
916 "<fetchop_constraint_1_llcs>"))] | |
917 UNSPEC_ATOMIC)) | |
918 (set (reg:SI T_REG) (const_int 1)) | |
919 (clobber (reg:SI R0_REG)) | |
920 (clobber (match_scratch:SI 2 "=&r")) | |
921 (clobber (match_scratch:SI 3 "=0"))] | |
922 "TARGET_ATOMIC_HARD_LLCS" | |
923 { | |
924 return "\r mov #-4,%2" "\n" | |
925 " and %0,%2" "\n" | |
926 " xor %2,%0" "\n" | |
927 " add r15,%0" "\n" | |
928 " add #-4,%0" "\n" | |
929 "0: movli.l @%2,r0" "\n" | |
930 " mov.l r0,@-r15" "\n" | |
931 " mov.<bw> @%0,r0" "\n" | |
932 " <fetchop_name> %1,r0" "\n" | |
933 " mov.<bw> r0,@%0" "\n" | |
934 " mov.l @r15+,r0" "\n" | |
935 " movco.l r0,@%2" "\n" | |
936 " bf 0b"; | |
937 } | |
938 [(set_attr "length" "26")]) | |
939 | |
940 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
941 (define_insn_and_split "atomic_fetch_not<mode>_hard" | |
942 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
943 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))) | |
944 (set (mem:QIHI (match_dup 1)) | |
945 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 1)))] UNSPEC_ATOMIC)) | |
946 (set (reg:SI T_REG) (const_int 1)) | |
947 (clobber (reg:SI R0_REG)) | |
948 (clobber (match_scratch:SI 2 "=&r")) | |
949 (clobber (match_scratch:SI 3 "=1"))] | |
950 "TARGET_ATOMIC_HARD_LLCS" | |
951 { | |
952 return "\r mov #-4,%2" "\n" | |
953 " and %1,%2" "\n" | |
954 " xor %2,%1" "\n" | |
955 " add r15,%1" "\n" | |
956 " add #-4,%1" "\n" | |
957 "0: movli.l @%2,r0" "\n" | |
958 " mov.l r0,@-r15" "\n" | |
959 " mov.<bw> @%1,%0" "\n" | |
960 " not %0,r0" "\n" | |
961 " mov.<bw> r0,@%1" "\n" | |
962 " mov.l @r15+,r0" "\n" | |
963 " movco.l r0,@%2" "\n" | |
964 " bf 0b"; | |
965 } | |
966 "&& can_create_pseudo_p () && optimize | |
967 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
968 [(const_int 0)] | |
969 { | |
970 rtx i = gen_atomic_not<mode>_hard (operands[1]); | |
971 | |
972 /* Replace the new mems in the new insn with the old mem to preserve | |
973 aliasing info. */ | |
974 rtx m = XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1); | |
975 XEXP (XVECEXP (i, 0, 0), 0) = m; | |
976 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = m; | |
977 emit_insn (i); | |
978 } | |
979 [(set_attr "length" "26")]) | |
980 | |
981 (define_insn "atomic_not<mode>_hard" | |
982 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r")) | |
983 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 0)))] UNSPEC_ATOMIC)) | |
984 (set (reg:SI T_REG) (const_int 1)) | |
985 (clobber (reg:SI R0_REG)) | |
986 (clobber (match_scratch:SI 1 "=&r")) | |
987 (clobber (match_scratch:SI 2 "=0"))] | |
988 "TARGET_ATOMIC_HARD_LLCS" | |
989 { | |
990 return "\r mov #-4,%1" "\n" | |
991 " and %0,%1" "\n" | |
992 " xor %1,%0" "\n" | |
993 " add r15,%0" "\n" | |
994 " add #-4,%0" "\n" | |
995 "0: movli.l @%1,r0" "\n" | |
996 " mov.l r0,@-r15" "\n" | |
997 " mov.<bw> @%0,r0" "\n" | |
998 " not r0,r0" "\n" | |
999 " mov.<bw> r0,@%0" "\n" | |
1000 " mov.l @r15+,r0" "\n" | |
1001 " movco.l r0,@%1" "\n" | |
1002 " bf 0b"; | |
1003 } | |
1004 [(set_attr "length" "26")]) | |
1005 | |
1006 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_gusa" | |
1007 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
1008 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")) | |
1009 (set (match_dup 1) | |
1010 (unspec:QIHISI | |
1011 [(FETCHOP:QIHISI | |
1012 (match_dup 1) | |
1013 (match_operand:QIHISI 2 "<fetchop_predicate_1>" | |
1014 "<fetchop_constraint_1_gusa>"))] | |
1015 UNSPEC_ATOMIC)) | |
1016 (clobber (match_scratch:QIHISI 3 "=&u")) | |
1017 (clobber (reg:SI R0_REG)) | |
1018 (clobber (reg:SI R1_REG))] | |
1019 "TARGET_ATOMIC_SOFT_GUSA" | |
1020 { | |
1021 return "\r mova 1f,r0" "\n" | |
1022 " .align 2" "\n" | |
1023 " mov r15,r1" "\n" | |
1024 " mov #(0f-1f),r15" "\n" | |
1025 "0: mov.<bwl> %1,%0" "\n" | |
1026 " mov %0,%3" "\n" | |
1027 " <fetchop_name> %2,%3" "\n" | |
1028 " mov.<bwl> %3,%1" "\n" | |
1029 "1: mov r1,r15"; | |
1030 } | |
1031 "&& can_create_pseudo_p () && optimize | |
1032 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1033 [(const_int 0)] | |
1034 { | |
1035 emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft_gusa ( | |
1036 gen_reg_rtx (<MODE>mode), operands[1], operands[2])); | |
1037 } | |
1038 [(set_attr "length" "18")]) | |
1039 | |
1040 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1041 (define_insn_and_split "atomic_fetch_not<mode>_soft_gusa" | |
1042 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
1043 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")) | |
1044 (set (match_dup 1) | |
1045 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC)) | |
1046 (clobber (match_scratch:QIHISI 2 "=&u")) | |
1047 (clobber (reg:SI R0_REG)) | |
1048 (clobber (reg:SI R1_REG))] | |
1049 "TARGET_ATOMIC_SOFT_GUSA" | |
1050 { | |
1051 return "\r mova 1f,r0" "\n" | |
1052 " mov r15,r1" "\n" | |
1053 " .align 2" "\n" | |
1054 " mov #(0f-1f),r15" "\n" | |
1055 "0: mov.<bwl> %1,%0" "\n" | |
1056 " not %0,%2" "\n" | |
1057 " mov.<bwl> %2,%1" "\n" | |
1058 "1: mov r1,r15"; | |
1059 } | |
1060 "&& can_create_pseudo_p () && optimize | |
1061 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1062 [(const_int 0)] | |
1063 { | |
1064 emit_insn (gen_atomic_not_fetch<mode>_soft_gusa (gen_reg_rtx (<MODE>mode), | |
1065 operands[1])); | |
1066 } | |
1067 [(set_attr "length" "16")]) | |
1068 | |
1069 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_tcb" | |
1070 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1071 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")) | |
1072 (set (match_dup 1) | |
1073 (unspec:QIHISI | |
1074 [(FETCHOP:QIHISI | |
1075 (match_dup 1) | |
1076 (match_operand:QIHISI 2 "<fetchop_predicate_1>" | |
1077 "<fetchop_constraint_1_tcb>"))] | |
1078 UNSPEC_ATOMIC)) | |
1079 (use (match_operand:SI 3 "gbr_displacement")) | |
1080 (clobber (reg:SI R0_REG)) | |
1081 (clobber (reg:SI R1_REG))] | |
1082 "TARGET_ATOMIC_SOFT_TCB" | |
1083 { | |
1084 return "\r mova 1f,r0" "\n" | |
1085 " .align 2" "\n" | |
1086 " mov #(0f-1f),r1" "\n" | |
1087 " mov.l r0,@(%O3,gbr)" "\n" | |
1088 "0: mov.<bwl> %1,r0" "\n" | |
1089 " mov r0,%0" "\n" | |
1090 " <fetchop_name> %2,r0" "\n" | |
1091 " mov.<bwl> r0,%1" "\n" | |
1092 "1: mov #0,r0" "\n" | |
1093 " mov.l r0,@(%O3,gbr)"; | |
1094 } | |
1095 "&& can_create_pseudo_p () && optimize | |
1096 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1097 [(const_int 0)] | |
1098 { | |
1099 emit_insn (gen_atomic_<fetchop_name><mode>_soft_tcb ( | |
1100 operands[1], operands[2], operands[3])); | |
1101 } | |
1102 [(set_attr "length" "20")]) | |
1103 | |
1104 (define_insn "atomic_<fetchop_name><mode>_soft_tcb" | |
1105 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd") | |
1106 (unspec:QIHISI | |
1107 [(FETCHOP:QIHISI | |
1108 (match_dup 0) | |
1109 (match_operand:QIHISI 1 "<fetchop_predicate_1>" | |
1110 "<fetchop_constraint_1_tcb>"))] | |
1111 UNSPEC_ATOMIC)) | |
1112 (use (match_operand:SI 2 "gbr_displacement")) | |
1113 (clobber (reg:SI R0_REG)) | |
1114 (clobber (reg:SI R1_REG))] | |
1115 "TARGET_ATOMIC_SOFT_TCB" | |
1116 { | |
1117 return "\r mova 1f,r0" "\n" | |
1118 " mov #(0f-1f),r1" "\n" | |
1119 " .align 2" "\n" | |
1120 " mov.l r0,@(%O2,gbr)" "\n" | |
1121 "0: mov.<bwl> %0,r0" "\n" | |
1122 " <fetchop_name> %1,r0" "\n" | |
1123 " mov.<bwl> r0,%0" "\n" | |
1124 "1: mov #0,r0" "\n" | |
1125 " mov.l r0,@(%O2,gbr)"; | |
1126 } | |
1127 [(set_attr "length" "18")]) | |
1128 | |
1129 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1130 (define_insn_and_split "atomic_fetch_not<mode>_soft_tcb" | |
1131 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1132 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")) | |
1133 (set (match_dup 1) | |
1134 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC)) | |
1135 (use (match_operand:SI 2 "gbr_displacement")) | |
1136 (clobber (reg:SI R0_REG)) | |
1137 (clobber (reg:SI R1_REG))] | |
1138 "TARGET_ATOMIC_SOFT_TCB" | |
1139 { | |
1140 return "\r mova 1f,r0" "\n" | |
1141 " .align 2" "\n" | |
1142 " mov #(0f-1f),r1" "\n" | |
1143 " mov.l r0,@(%O2,gbr)" "\n" | |
1144 "0: mov.<bwl> %1,r0" "\n" | |
1145 " mov r0,%0" "\n" | |
1146 " not r0,r0" "\n" | |
1147 " mov.<bwl> r0,%1" "\n" | |
1148 "1: mov #0,r0" "\n" | |
1149 " mov.l r0,@(%O2,gbr)"; | |
1150 } | |
1151 "&& can_create_pseudo_p () && optimize | |
1152 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1153 [(const_int 0)] | |
1154 { | |
1155 emit_insn (gen_atomic_not<mode>_soft_tcb (operands[1], operands[2])); | |
1156 } | |
1157 [(set_attr "length" "20")]) | |
1158 | |
1159 (define_insn "atomic_not<mode>_soft_tcb" | |
1160 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd") | |
1161 (unspec:QIHISI [(not:QIHISI (match_dup 0))] UNSPEC_ATOMIC)) | |
1162 (use (match_operand:SI 1 "gbr_displacement")) | |
1163 (clobber (reg:SI R0_REG)) | |
1164 (clobber (reg:SI R1_REG))] | |
1165 "TARGET_ATOMIC_SOFT_TCB" | |
1166 { | |
1167 return "\r mova 1f,r0" "\n" | |
1168 " mov #(0f-1f),r1" "\n" | |
1169 " .align 2" "\n" | |
1170 " mov.l r0,@(%O1,gbr)" "\n" | |
1171 "0: mov.<bwl> %0,r0" "\n" | |
1172 " not r0,r0" "\n" | |
1173 " mov.<bwl> r0,%0" "\n" | |
1174 "1: mov #0,r0" "\n" | |
1175 " mov.l r0,@(%O1,gbr)"; | |
1176 } | |
1177 [(set_attr "length" "18")]) | |
1178 | |
1179 (define_insn_and_split "atomic_fetch_<fetchop_name><mode>_soft_imask" | |
1180 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1181 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")) | |
1182 (set (match_dup 1) | |
1183 (unspec:QIHISI | |
1184 [(FETCHOP:QIHISI | |
1185 (match_dup 1) | |
1186 (match_operand:QIHISI 2 "<fetchop_predicate_1>" | |
1187 "<fetchop_constraint_1_imask>"))] | |
1188 UNSPEC_ATOMIC)) | |
1189 (clobber (reg:SI R0_REG)) | |
1190 (clobber (match_scratch:QIHISI 3 "=&r"))] | |
1191 "TARGET_ATOMIC_SOFT_IMASK" | |
1192 { | |
1193 return "\r stc sr,r0" "\n" | |
1194 " mov r0,%3" "\n" | |
1195 " or #0xF0,r0" "\n" | |
1196 " ldc r0,sr" "\n" | |
1197 " mov.<bwl> %1,r0" "\n" | |
1198 " mov r0,%0" "\n" | |
1199 " <fetchop_name> %2,r0" "\n" | |
1200 " mov.<bwl> r0,%1" "\n" | |
1201 " ldc %3,sr"; | |
1202 } | |
1203 "&& can_create_pseudo_p () && optimize | |
1204 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1205 [(const_int 0)] | |
1206 { | |
1207 emit_insn (gen_atomic_<fetchop_name>_fetch<mode>_soft_imask ( | |
1208 gen_reg_rtx (<MODE>mode), operands[1], operands[2])); | |
1209 } | |
1210 [(set_attr "length" "18")]) | |
1211 | |
1212 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1213 (define_insn_and_split "atomic_fetch_not<mode>_soft_imask" | |
1214 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1215 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")) | |
1216 (set (match_dup 1) | |
1217 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC)) | |
1218 (clobber (reg:SI R0_REG)) | |
1219 (clobber (match_scratch:QIHISI 2 "=&r"))] | |
1220 "TARGET_ATOMIC_SOFT_IMASK" | |
1221 { | |
1222 return "\r stc sr,r0" "\n" | |
1223 " mov r0,%2" "\n" | |
1224 " or #0xF0,r0" "\n" | |
1225 " ldc r0,sr" "\n" | |
1226 " mov.<bwl> %1,r0" "\n" | |
1227 " mov r0,%0" "\n" | |
1228 " not r0,r0" "\n" | |
1229 " mov.<bwl> r0,%1" "\n" | |
1230 " ldc %2,sr"; | |
1231 } | |
1232 "&& can_create_pseudo_p () && optimize | |
1233 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1234 [(const_int 0)] | |
1235 { | |
1236 emit_insn (gen_atomic_not_fetch<mode>_soft_imask (gen_reg_rtx (<MODE>mode), | |
1237 operands[1])); | |
1238 } | |
1239 [(set_attr "length" "18")]) | |
1240 | |
1241 (define_expand "atomic_fetch_nand<mode>" | |
1242 [(set (match_operand:QIHISI 0 "arith_reg_dest") | |
1243 (match_operand:QIHISI 1 "atomic_mem_operand_1")) | |
1244 (set (match_dup 1) | |
1245 (unspec:QIHISI | |
1246 [(not:QIHISI (and:QIHISI (match_dup 1) | |
1247 (match_operand:QIHISI 2 "atomic_logical_operand_1")))] | |
1248 UNSPEC_ATOMIC)) | |
1249 (match_operand:SI 3 "const_int_operand")] | |
1250 "TARGET_ATOMIC_ANY" | |
1251 { | |
1252 rtx mem = operands[1]; | |
1253 rtx atomic_insn; | |
1254 | |
1255 if (TARGET_ATOMIC_HARD_LLCS | |
1256 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT)) | |
1257 atomic_insn = gen_atomic_fetch_nand<mode>_hard (operands[0], mem, | |
1258 operands[2]); | |
1259 else if (TARGET_ATOMIC_SOFT_GUSA) | |
1260 atomic_insn = gen_atomic_fetch_nand<mode>_soft_gusa (operands[0], mem, | |
1261 operands[2]); | |
1262 else if (TARGET_ATOMIC_SOFT_TCB) | |
1263 atomic_insn = gen_atomic_fetch_nand<mode>_soft_tcb (operands[0], mem, | |
1264 operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX); | |
1265 else if (TARGET_ATOMIC_SOFT_IMASK) | |
1266 atomic_insn = gen_atomic_fetch_nand<mode>_soft_imask (operands[0], mem, | |
1267 operands[2]); | |
1268 else | |
1269 FAIL; | |
1270 | |
1271 emit_insn (atomic_insn); | |
1272 | |
1273 if (<MODE>mode == QImode) | |
1274 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]), | |
1275 operands[0])); | |
1276 else if (<MODE>mode == HImode) | |
1277 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]), | |
1278 operands[0])); | |
1279 DONE; | |
1280 }) | |
1281 | |
1282 (define_insn_and_split "atomic_fetch_nandsi_hard" | |
1283 [(set (match_operand:SI 0 "arith_reg_dest" "=&r") | |
1284 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra")) | |
1285 (set (match_dup 1) | |
1286 (unspec:SI | |
1287 [(not:SI (and:SI (match_dup 1) | |
1288 (match_operand:SI 2 "logical_operand" "rK08")))] | |
1289 UNSPEC_ATOMIC)) | |
1290 (set (reg:SI T_REG) (const_int 1)) | |
1291 (clobber (reg:SI R0_REG))] | |
1292 "TARGET_ATOMIC_HARD_LLCS | |
1293 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
1294 { | |
1295 return "\r0: movli.l %1,r0" "\n" | |
1296 " mov r0,%0" "\n" | |
1297 " and %2,r0" "\n" | |
1298 " not r0,r0" "\n" | |
1299 " movco.l r0,%1" "\n" | |
1300 " bf 0b"; | |
1301 } | |
1302 "&& can_create_pseudo_p () && optimize | |
1303 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1304 [(const_int 0)] | |
1305 { | |
1306 emit_insn (gen_atomic_nand_fetchsi_hard (gen_reg_rtx (SImode), operands[1], | |
1307 operands[2])); | |
1308 } | |
1309 [(set_attr "length" "12")]) | |
1310 | |
1311 ;; The QIHImode llcs patterns modify the address register of the memory | |
1312 ;; operand. In order to express that, we have to open code the memory | |
1313 ;; operand. Initially the insn is expanded like every other atomic insn | |
1314 ;; using the memory operand. In split1 the insn is converted and the | |
1315 ;; memory operand's address register is exposed. | |
1316 (define_insn_and_split "atomic_fetch_nand<mode>_hard" | |
1317 [(set (match_operand:QIHI 0 "arith_reg_dest") | |
1318 (match_operand:QIHI 1 "atomic_mem_operand_1")) | |
1319 (set (match_dup 1) | |
1320 (unspec:QIHI | |
1321 [(not:QIHI (and:QIHI (match_dup 1) | |
1322 (match_operand:QIHI 2 "logical_operand" "rK08")))] | |
1323 UNSPEC_ATOMIC)) | |
1324 (set (reg:SI T_REG) (const_int 1)) | |
1325 (clobber (reg:SI R0_REG))] | |
1326 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
1327 "#" | |
1328 "&& 1" | |
1329 [(const_int 0)] | |
1330 { | |
1331 if (optimize | |
1332 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0]))) | |
1333 emit_insn (gen_atomic_nand<mode>_hard (operands[1], operands[2])); | |
1334 else | |
1335 { | |
1336 rtx i = gen_atomic_fetch_nand<mode>_hard_1 ( | |
1337 operands[0], XEXP (operands[1], 0), operands[2]); | |
1338 | |
1339 /* Replace the new mems in the new insn with the old mem to preserve | |
1340 aliasing info. */ | |
1341 XEXP (XVECEXP (i, 0, 0), 1) = operands[1]; | |
1342 XEXP (XVECEXP (i, 0, 1), 0) = operands[1]; | |
1343 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0), | |
1344 0) = operands[1]; | |
1345 emit_insn (i); | |
1346 } | |
1347 }) | |
1348 | |
1349 (define_insn "atomic_fetch_nand<mode>_hard_1" | |
1350 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
1351 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r"))) | |
1352 (set (mem:QIHI (match_dup 1)) | |
1353 (unspec:QIHI | |
1354 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 1)) | |
1355 (match_operand:QIHI 2 "logical_operand" "rK08")))] | |
1356 UNSPEC_ATOMIC)) | |
1357 (set (reg:SI T_REG) (const_int 1)) | |
1358 (clobber (reg:SI R0_REG)) | |
1359 (clobber (match_scratch:SI 3 "=&r")) | |
1360 (clobber (match_scratch:SI 4 "=1"))] | |
1361 "TARGET_ATOMIC_HARD_LLCS" | |
1362 { | |
1363 return "\r mov #-4,%3" "\n" | |
1364 " and %1,%3" "\n" | |
1365 " xor %3,%1" "\n" | |
1366 " add r15,%1" "\n" | |
1367 " add #-4,%1" "\n" | |
1368 "0: movli.l @%3,r0" "\n" | |
1369 " mov.l r0,@-r15" "\n" | |
1370 " mov.<bw> @%1,r0" "\n" | |
1371 " mov r0,%0" "\n" | |
1372 " and %2,r0" "\n" | |
1373 " not r0,r0" "\n" | |
1374 " mov.<bw> r0,@%1" "\n" | |
1375 " mov.l @r15+,r0" "\n" | |
1376 " movco.l r0,@%3" "\n" | |
1377 " bf 0b"; | |
1378 } | |
1379 [(set_attr "length" "30")]) | |
1380 | |
1381 ;; The QIHImode llcs patterns modify the address register of the memory | |
1382 ;; operand. In order to express that, we have to open code the memory | |
1383 ;; operand. Initially the insn is expanded like every other atomic insn | |
1384 ;; using the memory operand. In split1 the insn is converted and the | |
1385 ;; memory operand's address register is exposed. | |
1386 (define_insn_and_split "atomic_nand<mode>_hard" | |
1387 [(set (match_operand:QIHI 0 "atomic_mem_operand_1") | |
1388 (unspec:QIHI | |
1389 [(not:QIHI (and:QIHI (match_dup 0) | |
1390 (match_operand:QIHI 1 "logical_operand")))] | |
1391 UNSPEC_ATOMIC)) | |
1392 (set (reg:SI T_REG) (const_int 1)) | |
1393 (clobber (reg:SI R0_REG))] | |
1394 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
1395 "#" | |
1396 "&& 1" | |
1397 [(const_int 0)] | |
1398 { | |
1399 rtx i = gen_atomic_nand<mode>_hard_1 (XEXP (operands[0], 0), operands[1]); | |
1400 | |
1401 /* Replace the new mems in the new insn with the old mem to preserve | |
1402 aliasing info. */ | |
1403 XEXP (XVECEXP (i, 0, 0), 0) = operands[0]; | |
1404 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0), 0) = operands[0]; | |
1405 emit_insn (i); | |
1406 }) | |
1407 | |
1408 (define_insn "atomic_nand<mode>_hard_1" | |
1409 [(set (mem:QIHI (match_operand:SI 0 "arith_reg_operand" "r")) | |
1410 (unspec:QIHI | |
1411 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 0)) | |
1412 (match_operand:QIHI 1 "logical_operand" "rK08")))] | |
1413 UNSPEC_ATOMIC)) | |
1414 (set (reg:SI T_REG) (const_int 1)) | |
1415 (clobber (reg:SI R0_REG)) | |
1416 (clobber (match_scratch:SI 2 "=&r")) | |
1417 (clobber (match_scratch:SI 3 "=0"))] | |
1418 "TARGET_ATOMIC_HARD_LLCS" | |
1419 { | |
1420 return "\r mov #-4,%2" "\n" | |
1421 " and %0,%2" "\n" | |
1422 " xor %2,%0" "\n" | |
1423 " add r15,%0" "\n" | |
1424 " add #-4,%0" "\n" | |
1425 "0: movli.l @%2,r0" "\n" | |
1426 " mov.l r0,@-r15" "\n" | |
1427 " mov.<bw> @%0,r0" "\n" | |
1428 " and %1,r0" "\n" | |
1429 " not r0,r0" "\n" | |
1430 " mov.<bw> r0,@%0" "\n" | |
1431 " mov.l @r15+,r0" "\n" | |
1432 " movco.l r0,@%2" "\n" | |
1433 " bf 0b"; | |
1434 } | |
1435 [(set_attr "length" "28")]) | |
1436 | |
1437 (define_insn_and_split "atomic_fetch_nand<mode>_soft_gusa" | |
1438 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
1439 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd")) | |
1440 (set (match_dup 1) | |
1441 (unspec:QIHISI | |
1442 [(not:QIHISI | |
1443 (and:QIHISI (match_dup 1) | |
1444 (match_operand:QIHISI 2 "arith_reg_operand" "u")))] | |
1445 UNSPEC_ATOMIC)) | |
1446 (clobber (match_scratch:QIHISI 3 "=&u")) | |
1447 (clobber (reg:SI R0_REG)) | |
1448 (clobber (reg:SI R1_REG))] | |
1449 "TARGET_ATOMIC_SOFT_GUSA" | |
1450 { | |
1451 return "\r mova 1f,r0" "\n" | |
1452 " mov r15,r1" "\n" | |
1453 " .align 2" "\n" | |
1454 " mov #(0f-1f),r15" "\n" | |
1455 "0: mov.<bwl> %1,%0" "\n" | |
1456 " mov %2,%3" "\n" | |
1457 " and %0,%3" "\n" | |
1458 " not %3,%3" "\n" | |
1459 " mov.<bwl> %3,%1" "\n" | |
1460 "1: mov r1,r15"; | |
1461 } | |
1462 "&& can_create_pseudo_p () && optimize | |
1463 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1464 [(const_int 0)] | |
1465 { | |
1466 emit_insn (gen_atomic_nand_fetch<mode>_soft_gusa (gen_reg_rtx (<MODE>mode), | |
1467 operands[1], operands[2])); | |
1468 } | |
1469 [(set_attr "length" "20")]) | |
1470 | |
1471 (define_insn_and_split "atomic_fetch_nand<mode>_soft_tcb" | |
1472 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1473 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")) | |
1474 (set (match_dup 1) | |
1475 (unspec:QIHISI | |
1476 [(not:QIHISI | |
1477 (and:QIHISI (match_dup 1) | |
1478 (match_operand:QIHISI 2 "logical_operand" "rK08")))] | |
1479 UNSPEC_ATOMIC)) | |
1480 (use (match_operand:SI 3 "gbr_displacement")) | |
1481 (clobber (reg:SI R0_REG)) | |
1482 (clobber (reg:SI R1_REG))] | |
1483 "TARGET_ATOMIC_SOFT_TCB" | |
1484 { | |
1485 return "\r mova 1f,r0" "\n" | |
1486 " mov #(0f-1f),r1" "\n" | |
1487 " .align 2" "\n" | |
1488 " mov.l r0,@(%O3,gbr)" "\n" | |
1489 "0: mov.<bwl> %1,r0" "\n" | |
1490 " mov r0,%0" "\n" | |
1491 " and %2,r0" "\n" | |
1492 " not r0,r0" "\n" | |
1493 " mov.<bwl> r0,%1" "\n" | |
1494 "1: mov #0,r0" "\n" | |
1495 " mov.l r0,@(%O3,gbr)"; | |
1496 } | |
1497 "&& can_create_pseudo_p () && optimize | |
1498 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1499 [(const_int 0)] | |
1500 { | |
1501 emit_insn (gen_atomic_nand<mode>_soft_tcb (operands[1], operands[2], | |
1502 operands[3])); | |
1503 } | |
1504 [(set_attr "length" "22")]) | |
1505 | |
1506 (define_insn "atomic_nand<mode>_soft_tcb" | |
1507 [(set (match_operand:QIHISI 0 "atomic_mem_operand_1" "=SraSdd") | |
1508 (unspec:QIHISI | |
1509 [(not:QIHISI | |
1510 (and:QIHISI (match_dup 0) | |
1511 (match_operand:QIHISI 1 "logical_operand" "rK08")))] | |
1512 UNSPEC_ATOMIC)) | |
1513 (use (match_operand:SI 2 "gbr_displacement")) | |
1514 (clobber (reg:SI R0_REG)) | |
1515 (clobber (reg:SI R1_REG))] | |
1516 "TARGET_ATOMIC_SOFT_TCB" | |
1517 { | |
1518 return "\r mova 1f,r0" "\n" | |
1519 " .align 2" "\n" | |
1520 " mov #(0f-1f),r1" "\n" | |
1521 " mov.l r0,@(%O2,gbr)" "\n" | |
1522 "0: mov.<bwl> %0,r0" "\n" | |
1523 " and %1,r0" "\n" | |
1524 " not r0,r0" "\n" | |
1525 " mov.<bwl> r0,%0" "\n" | |
1526 "1: mov #0,r0" "\n" | |
1527 " mov.l r0,@(%O2,gbr)"; | |
1528 } | |
1529 [(set_attr "length" "20")]) | |
1530 | |
1531 (define_insn_and_split "atomic_fetch_nand<mode>_soft_imask" | |
1532 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1533 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd")) | |
1534 (set (match_dup 1) | |
1535 (unspec:QIHISI | |
1536 [(not:QIHISI | |
1537 (and:QIHISI (match_dup 1) | |
1538 (match_operand:QIHISI 2 "logical_operand" "rK08")))] | |
1539 UNSPEC_ATOMIC)) | |
1540 (clobber (reg:SI R0_REG)) | |
1541 (clobber (match_scratch:SI 3 "=&r"))] | |
1542 "TARGET_ATOMIC_SOFT_IMASK" | |
1543 { | |
1544 return "\r stc sr,r0" "\n" | |
1545 " mov r0,%3" "\n" | |
1546 " or #0xF0,r0" "\n" | |
1547 " ldc r0,sr" "\n" | |
1548 " mov.<bwl> %1,r0" "\n" | |
1549 " mov r0,%0" "\n" | |
1550 " and %2,r0" "\n" | |
1551 " not r0,r0" "\n" | |
1552 " mov.<bwl> r0,%1" "\n" | |
1553 " ldc %3,sr"; | |
1554 } | |
1555 "&& can_create_pseudo_p () && optimize | |
1556 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1557 [(const_int 0)] | |
1558 { | |
1559 emit_insn (gen_atomic_nand_fetch<mode>_soft_imask (gen_reg_rtx (<MODE>mode), | |
1560 operands[1], operands[2])); | |
1561 } | |
1562 [(set_attr "length" "20")]) | |
1563 | |
1564 ;;------------------------------------------------------------------------------ | |
1565 ;; read - add|sub|or|and|xor|nand - write - return new value | |
1566 | |
1567 (define_expand "atomic_<fetchop_name>_fetch<mode>" | |
1568 [(set (match_operand:QIHISI 0 "arith_reg_dest") | |
1569 (FETCHOP:QIHISI | |
1570 (match_operand:QIHISI 1 "atomic_mem_operand_1") | |
1571 (match_operand:QIHISI 2 "<fetchop_predicate_1>"))) | |
1572 (set (match_dup 1) | |
1573 (unspec:QIHISI | |
1574 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))] | |
1575 UNSPEC_ATOMIC)) | |
1576 (match_operand:SI 3 "const_int_operand" "")] | |
1577 "TARGET_ATOMIC_ANY" | |
1578 { | |
1579 rtx mem = operands[1]; | |
1580 rtx atomic_insn; | |
1581 | |
1582 if (TARGET_ATOMIC_HARD_LLCS | |
1583 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT)) | |
1584 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_hard (operands[0], mem, | |
1585 operands[2]); | |
1586 else if (TARGET_ATOMIC_SOFT_GUSA) | |
1587 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_gusa (operands[0], | |
1588 mem, operands[2]); | |
1589 else if (TARGET_ATOMIC_SOFT_TCB) | |
1590 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_tcb (operands[0], | |
1591 mem, operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX); | |
1592 else if (TARGET_ATOMIC_SOFT_IMASK) | |
1593 atomic_insn = gen_atomic_<fetchop_name>_fetch<mode>_soft_imask (operands[0], | |
1594 mem, operands[2]); | |
1595 else | |
1596 FAIL; | |
1597 | |
1598 emit_insn (atomic_insn); | |
1599 | |
1600 if (<MODE>mode == QImode) | |
1601 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]), | |
1602 operands[0])); | |
1603 else if (<MODE>mode == HImode) | |
1604 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]), | |
1605 operands[0])); | |
1606 DONE; | |
1607 }) | |
1608 | |
1609 (define_insn "atomic_<fetchop_name>_fetchsi_hard" | |
1610 [(set (match_operand:SI 0 "arith_reg_dest" "=&z") | |
1611 (FETCHOP:SI | |
1612 (match_operand:SI 1 "atomic_mem_operand_1" "=Sra") | |
1613 (match_operand:SI 2 "<fetchop_predicate_1>" | |
1614 "<fetchop_constraint_1_llcs>"))) | |
1615 (set (match_dup 1) | |
1616 (unspec:SI | |
1617 [(FETCHOP:SI (match_dup 1) (match_dup 2))] | |
1618 UNSPEC_ATOMIC)) | |
1619 (set (reg:SI T_REG) (const_int 1))] | |
1620 "TARGET_ATOMIC_HARD_LLCS | |
1621 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
1622 { | |
1623 return "\r0: movli.l %1,%0" "\n" | |
1624 " <fetchop_name> %2,%0" "\n" | |
1625 " movco.l %0,%1" "\n" | |
1626 " bf 0b"; | |
1627 } | |
1628 [(set_attr "length" "8")]) | |
1629 | |
1630 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1631 (define_insn "atomic_not_fetchsi_hard" | |
1632 [(set (match_operand:SI 0 "arith_reg_dest" "=&z") | |
1633 (not:SI (match_operand:SI 1 "atomic_mem_operand_1" "=Sra"))) | |
1634 (set (match_dup 1) | |
1635 (unspec:SI [(not:SI (match_dup 1))] UNSPEC_ATOMIC)) | |
1636 (set (reg:SI T_REG) (const_int 1))] | |
1637 "TARGET_ATOMIC_HARD_LLCS | |
1638 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
1639 { | |
1640 return "\r0: movli.l %1,%0" "\n" | |
1641 " not %0,%0" "\n" | |
1642 " movco.l %0,%1" "\n" | |
1643 " bf 0b"; | |
1644 } | |
1645 [(set_attr "length" "8")]) | |
1646 | |
1647 ;; The QIHImode llcs patterns modify the address register of the memory | |
1648 ;; operand. In order to express that, we have to open code the memory | |
1649 ;; operand. Initially the insn is expanded like every other atomic insn | |
1650 ;; using the memory operand. In split1 the insn is converted and the | |
1651 ;; memory operand's address register is exposed. | |
1652 (define_insn_and_split "atomic_<fetchop_name>_fetch<mode>_hard" | |
1653 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
1654 (FETCHOP:QIHI (match_operand:QIHI 1 "atomic_mem_operand_1") | |
1655 (match_operand:QIHI 2 "<fetchop_predicate_1>"))) | |
1656 (set (match_dup 1) (unspec:QIHI [(FETCHOP:QIHI (match_dup 1) (match_dup 2))] | |
1657 UNSPEC_ATOMIC)) | |
1658 (set (reg:SI T_REG) (const_int 1)) | |
1659 (clobber (reg:SI R0_REG))] | |
1660 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
1661 "#" | |
1662 "&& 1" | |
1663 [(const_int 0)] | |
1664 { | |
1665 if (optimize | |
1666 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0]))) | |
1667 emit_insn (gen_atomic_<fetchop_name><mode>_hard (operands[1], operands[2])); | |
1668 else | |
1669 { | |
1670 rtx i = gen_atomic_<fetchop_name>_fetch<mode>_hard_1 ( | |
1671 operands[0], XEXP (operands[1], 0), operands[2]); | |
1672 | |
1673 /* Replace the new mems in the new insn with the old mem to preserve | |
1674 aliasing info. */ | |
1675 XEXP (XEXP (XVECEXP (i, 0, 0), 1), 0) = operands[1]; | |
1676 XEXP (XVECEXP (i, 0, 1), 0) = operands[1]; | |
1677 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0) = operands[1]; | |
1678 emit_insn (i); | |
1679 } | |
1680 }) | |
1681 | |
1682 (define_insn "atomic_<fetchop_name>_fetch<mode>_hard_1" | |
1683 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
1684 (FETCHOP:QIHI | |
1685 (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")) | |
1686 (match_operand:QIHI 2 "<fetchop_predicate_1>" | |
1687 "<fetchop_constraint_1_llcs>"))) | |
1688 (set (mem:QIHI (match_dup 1)) | |
1689 (unspec:QIHI | |
1690 [(FETCHOP:QIHI (mem:QIHI (match_dup 1)) (match_dup 2))] | |
1691 UNSPEC_ATOMIC)) | |
1692 (set (reg:SI T_REG) (const_int 1)) | |
1693 (clobber (reg:SI R0_REG)) | |
1694 (clobber (match_scratch:SI 3 "=&r")) | |
1695 (clobber (match_scratch:SI 4 "=1"))] | |
1696 "TARGET_ATOMIC_HARD_LLCS" | |
1697 { | |
1698 return "\r mov #-4,%3" "\n" | |
1699 " and %1,%3" "\n" | |
1700 " xor %3,%1" "\n" | |
1701 " add r15,%1" "\n" | |
1702 " add #-4,%1" "\n" | |
1703 "0: movli.l @%3,r0" "\n" | |
1704 " mov.l r0,@-r15" "\n" | |
1705 " mov.<bw> @%1,r0" "\n" | |
1706 " <fetchop_name> %2,r0" "\n" | |
1707 " mov.<bw> r0,@%1" "\n" | |
1708 " mov r0,%0" "\n" | |
1709 " mov.l @r15+,r0" "\n" | |
1710 " movco.l r0,@%3" "\n" | |
1711 " bf 0b"; | |
1712 } | |
1713 [(set_attr "length" "28")]) | |
1714 | |
1715 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1716 (define_insn_and_split "atomic_not_fetch<mode>_hard" | |
1717 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
1718 (not:QIHI (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")))) | |
1719 (set (mem:QIHI (match_dup 1)) | |
1720 (unspec:QIHI [(not:QIHI (mem:QIHI (match_dup 1)))] UNSPEC_ATOMIC)) | |
1721 (set (reg:SI T_REG) (const_int 1)) | |
1722 (clobber (reg:SI R0_REG)) | |
1723 (clobber (match_scratch:SI 2 "=&r")) | |
1724 (clobber (match_scratch:SI 3 "=1"))] | |
1725 "TARGET_ATOMIC_HARD_LLCS" | |
1726 { | |
1727 return "\r mov #-4,%2" "\n" | |
1728 " and %1,%2" "\n" | |
1729 " xor %2,%1" "\n" | |
1730 " add r15,%1" "\n" | |
1731 " add #-4,%1" "\n" | |
1732 "0: movli.l @%2,r0" "\n" | |
1733 " mov.l r0,@-r15" "\n" | |
1734 " mov.<bw> @%1,r0" "\n" | |
1735 " not r0,r0" "\n" | |
1736 " mov.<bw> r0,@%1" "\n" | |
1737 " mov r0,%0" "\n" | |
1738 " mov.l @r15+,r0" "\n" | |
1739 " movco.l r0,@%2" "\n" | |
1740 " bf 0b"; | |
1741 } | |
1742 "&& can_create_pseudo_p () && optimize | |
1743 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1744 [(const_int 0)] | |
1745 { | |
1746 rtx i = gen_atomic_not<mode>_hard (operands[1]); | |
1747 | |
1748 /* Replace the new mems in the new insn with the old mem to preserve | |
1749 aliasing info. */ | |
1750 rtx m = XEXP (XEXP (XVECEXP (PATTERN (curr_insn), 0, 0), 1), 0); | |
1751 XEXP (XVECEXP (i, 0, 0), 0) = m; | |
1752 XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 0), 1), 0, 0), 0) = m; | |
1753 emit_insn (i); | |
1754 } | |
1755 [(set_attr "length" "28")]) | |
1756 | |
1757 (define_insn "atomic_<fetchop_name>_fetch<mode>_soft_gusa" | |
1758 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
1759 (FETCHOP:QIHISI | |
1760 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd") | |
1761 (match_operand:QIHISI 2 "<fetchop_predicate_1>" | |
1762 "<fetchop_constraint_1_gusa>"))) | |
1763 (set (match_dup 1) | |
1764 (unspec:QIHISI | |
1765 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))] | |
1766 UNSPEC_ATOMIC)) | |
1767 (clobber (reg:SI R0_REG)) | |
1768 (clobber (reg:SI R1_REG))] | |
1769 "TARGET_ATOMIC_SOFT_GUSA" | |
1770 { | |
1771 return "\r mova 1f,r0" "\n" | |
1772 " mov r15,r1" "\n" | |
1773 " .align 2" "\n" | |
1774 " mov #(0f-1f),r15" "\n" | |
1775 "0: mov.<bwl> %1,%0" "\n" | |
1776 " <fetchop_name> %2,%0" "\n" | |
1777 " mov.<bwl> %0,%1" "\n" | |
1778 "1: mov r1,r15"; | |
1779 } | |
1780 [(set_attr "length" "16")]) | |
1781 | |
1782 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1783 (define_insn "atomic_not_fetch<mode>_soft_gusa" | |
1784 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
1785 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd"))) | |
1786 (set (match_dup 1) | |
1787 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC)) | |
1788 (clobber (reg:SI R0_REG)) | |
1789 (clobber (reg:SI R1_REG))] | |
1790 "TARGET_ATOMIC_SOFT_GUSA" | |
1791 { | |
1792 return "\r mova 1f,r0" "\n" | |
1793 " mov r15,r1" "\n" | |
1794 " .align 2" "\n" | |
1795 " mov #(0f-1f),r15" "\n" | |
1796 "0: mov.<bwl> %1,%0" "\n" | |
1797 " not %0,%0" "\n" | |
1798 " mov.<bwl> %0,%1" "\n" | |
1799 "1: mov r1,r15"; | |
1800 } | |
1801 [(set_attr "length" "16")]) | |
1802 | |
1803 (define_insn_and_split "atomic_<fetchop_name>_fetch<mode>_soft_tcb" | |
1804 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1805 (FETCHOP:QIHISI | |
1806 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd") | |
1807 (match_operand:QIHISI 2 "<fetchop_predicate_1>" | |
1808 "<fetchop_constraint_1_tcb>"))) | |
1809 (set (match_dup 1) | |
1810 (unspec:QIHISI | |
1811 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))] | |
1812 UNSPEC_ATOMIC)) | |
1813 (clobber (reg:SI R0_REG)) | |
1814 (clobber (reg:SI R1_REG)) | |
1815 (use (match_operand:SI 3 "gbr_displacement"))] | |
1816 "TARGET_ATOMIC_SOFT_TCB" | |
1817 { | |
1818 return "\r mova 1f,r0" "\n" | |
1819 " mov #(0f-1f),r1" "\n" | |
1820 " .align 2" "\n" | |
1821 " mov.l r0,@(%O3,gbr)" "\n" | |
1822 "0: mov.<bwl> %1,r0" "\n" | |
1823 " <fetchop_name> %2,r0" "\n" | |
1824 " mov.<bwl> r0,%1" "\n" | |
1825 "1: mov r0,%0" "\n" | |
1826 " mov #0,r0" "\n" | |
1827 " mov.l r0,@(%O3,gbr)"; | |
1828 } | |
1829 "&& can_create_pseudo_p () && optimize | |
1830 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1831 [(const_int 0)] | |
1832 { | |
1833 emit_insn (gen_atomic_<fetchop_name><mode>_soft_tcb ( | |
1834 operands[1], operands[2], operands[3])); | |
1835 } | |
1836 [(set_attr "length" "20")]) | |
1837 | |
1838 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1839 (define_insn_and_split "atomic_not_fetch<mode>_soft_tcb" | |
1840 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
1841 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))) | |
1842 (set (match_dup 1) | |
1843 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC)) | |
1844 (clobber (reg:SI R0_REG)) | |
1845 (clobber (reg:SI R1_REG)) | |
1846 (use (match_operand:SI 2 "gbr_displacement"))] | |
1847 "TARGET_ATOMIC_SOFT_TCB" | |
1848 { | |
1849 return "\r mova 1f,r0" "\n" | |
1850 " mov #(0f-1f),r1" "\n" | |
1851 " .align 2" "\n" | |
1852 " mov.l r0,@(%O2,gbr)" "\n" | |
1853 "0: mov.<bwl> %1,r0" "\n" | |
1854 " not r0,r0" "\n" | |
1855 " mov.<bwl> r0,%1" "\n" | |
1856 "1: mov r0,%0" "\n" | |
1857 " mov #0,r0" "\n" | |
1858 " mov.l r0,@(%O2,gbr)"; | |
1859 } | |
1860 "&& can_create_pseudo_p () && optimize | |
1861 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
1862 [(const_int 0)] | |
1863 { | |
1864 emit_insn (gen_atomic_not<mode>_soft_tcb (operands[1], operands[2])); | |
1865 } | |
1866 [(set_attr "length" "20")]) | |
1867 | |
1868 (define_insn "atomic_<fetchop_name>_fetch<mode>_soft_imask" | |
1869 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z") | |
1870 (FETCHOP:QIHISI | |
1871 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd") | |
1872 (match_operand:QIHISI 2 "<fetchop_predicate_1>" | |
1873 "<fetchop_constraint_1_imask>"))) | |
1874 (set (match_dup 1) | |
1875 (unspec:QIHISI | |
1876 [(FETCHOP:QIHISI (match_dup 1) (match_dup 2))] | |
1877 UNSPEC_ATOMIC)) | |
1878 (clobber (match_scratch:SI 3 "=&r"))] | |
1879 "TARGET_ATOMIC_SOFT_IMASK" | |
1880 { | |
1881 return "\r stc sr,%0" "\n" | |
1882 " mov %0,%3" "\n" | |
1883 " or #0xF0,%0" "\n" | |
1884 " ldc %0,sr" "\n" | |
1885 " mov.<bwl> %1,%0" "\n" | |
1886 " <fetchop_name> %2,%0" "\n" | |
1887 " mov.<bwl> %0,%1" "\n" | |
1888 " ldc %3,sr"; | |
1889 } | |
1890 [(set_attr "length" "16")]) | |
1891 | |
1892 ;; Combine pattern for xor (val, -1) / nand (val, -1). | |
1893 (define_insn "atomic_not_fetch<mode>_soft_imask" | |
1894 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z") | |
1895 (not:QIHISI (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd"))) | |
1896 (set (match_dup 1) | |
1897 (unspec:QIHISI [(not:QIHISI (match_dup 1))] UNSPEC_ATOMIC)) | |
1898 (clobber (match_scratch:SI 2 "=&r"))] | |
1899 "TARGET_ATOMIC_SOFT_IMASK" | |
1900 { | |
1901 return "\r stc sr,%0" "\n" | |
1902 " mov %0,%2" "\n" | |
1903 " or #0xF0,%0" "\n" | |
1904 " ldc %0,sr" "\n" | |
1905 " mov.<bwl> %1,%0" "\n" | |
1906 " not %0,%0" "\n" | |
1907 " mov.<bwl> %0,%1" "\n" | |
1908 " ldc %2,sr"; | |
1909 } | |
1910 [(set_attr "length" "16")]) | |
1911 | |
1912 (define_expand "atomic_nand_fetch<mode>" | |
1913 [(set (match_operand:QIHISI 0 "arith_reg_dest") | |
1914 (not:QIHISI (and:QIHISI | |
1915 (match_operand:QIHISI 1 "atomic_mem_operand_1") | |
1916 (match_operand:QIHISI 2 "atomic_logical_operand_1")))) | |
1917 (set (match_dup 1) | |
1918 (unspec:QIHISI | |
1919 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))] | |
1920 UNSPEC_ATOMIC)) | |
1921 (match_operand:SI 3 "const_int_operand")] | |
1922 "TARGET_ATOMIC_ANY" | |
1923 { | |
1924 rtx mem = operands[1]; | |
1925 rtx atomic_insn; | |
1926 | |
1927 if (TARGET_ATOMIC_HARD_LLCS | |
1928 || (TARGET_SH4A && <MODE>mode == SImode && !TARGET_ATOMIC_STRICT)) | |
1929 atomic_insn = gen_atomic_nand_fetch<mode>_hard (operands[0], mem, | |
1930 operands[2]); | |
1931 else if (TARGET_ATOMIC_SOFT_GUSA) | |
1932 atomic_insn = gen_atomic_nand_fetch<mode>_soft_gusa (operands[0], mem, | |
1933 operands[2]); | |
1934 else if (TARGET_ATOMIC_SOFT_TCB) | |
1935 atomic_insn = gen_atomic_nand_fetch<mode>_soft_tcb (operands[0], mem, | |
1936 operands[2], TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX); | |
1937 else if (TARGET_ATOMIC_SOFT_IMASK) | |
1938 atomic_insn = gen_atomic_nand_fetch<mode>_soft_imask (operands[0], mem, | |
1939 operands[2]); | |
1940 else | |
1941 FAIL; | |
1942 | |
1943 emit_insn (atomic_insn); | |
1944 | |
1945 if (<MODE>mode == QImode) | |
1946 emit_insn (gen_zero_extendqisi2 (gen_lowpart (SImode, operands[0]), | |
1947 operands[0])); | |
1948 else if (<MODE>mode == HImode) | |
1949 emit_insn (gen_zero_extendhisi2 (gen_lowpart (SImode, operands[0]), | |
1950 operands[0])); | |
1951 DONE; | |
1952 }) | |
1953 | |
1954 (define_insn "atomic_nand_fetchsi_hard" | |
1955 [(set (match_operand:SI 0 "arith_reg_dest" "=&z") | |
1956 (not:SI (and:SI (match_operand:SI 1 "atomic_mem_operand_1" "=Sra") | |
1957 (match_operand:SI 2 "logical_operand" "rK08")))) | |
1958 (set (match_dup 1) | |
1959 (unspec:SI | |
1960 [(not:SI (and:SI (match_dup 1) (match_dup 2)))] | |
1961 UNSPEC_ATOMIC)) | |
1962 (set (reg:SI T_REG) (const_int 1))] | |
1963 "TARGET_ATOMIC_HARD_LLCS | |
1964 || (TARGET_SH4A && TARGET_ATOMIC_ANY && !TARGET_ATOMIC_STRICT)" | |
1965 { | |
1966 return "\r0: movli.l %1,%0" "\n" | |
1967 " and %2,%0" "\n" | |
1968 " not %0,%0" "\n" | |
1969 " movco.l %0,%1" "\n" | |
1970 " bf 0b"; | |
1971 } | |
1972 [(set_attr "length" "10")]) | |
1973 | |
1974 ;; The QIHImode llcs patterns modify the address register of the memory | |
1975 ;; operand. In order to express that, we have to open code the memory | |
1976 ;; operand. Initially the insn is expanded like every other atomic insn | |
1977 ;; using the memory operand. In split1 the insn is converted and the | |
1978 ;; memory operand's address register is exposed. | |
1979 (define_insn_and_split "atomic_nand_fetch<mode>_hard" | |
1980 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
1981 (not:QIHI (and:QIHI (match_operand:QIHI 1 "atomic_mem_operand_1") | |
1982 (match_operand:QIHI 2 "logical_operand")))) | |
1983 (set (match_dup 1) | |
1984 (unspec:QIHI [(not:QIHI (and:QIHI (match_dup 1) (match_dup 2)))] | |
1985 UNSPEC_ATOMIC)) | |
1986 (set (reg:SI T_REG) (const_int 1)) | |
1987 (clobber (reg:SI R0_REG))] | |
1988 "TARGET_ATOMIC_HARD_LLCS && can_create_pseudo_p ()" | |
1989 "#" | |
1990 "&& 1" | |
1991 [(const_int 0)] | |
1992 { | |
1993 if (optimize | |
1994 && sh_reg_dead_or_unused_after_insn (curr_insn, REGNO (operands[0]))) | |
1995 emit_insn (gen_atomic_nand<mode>_hard (operands[1], operands[2])); | |
1996 else | |
1997 { | |
1998 rtx i = gen_atomic_nand_fetch<mode>_hard_1 ( | |
1999 operands[0], XEXP (operands[1], 0), operands[2]); | |
2000 | |
2001 /* Replace the new mems in the new insn with the old mem to preserve | |
2002 aliasing info. */ | |
2003 XEXP (XEXP (XEXP (XVECEXP (i, 0, 0), 1), 0), 0) = operands[1]; | |
2004 XEXP (XVECEXP (i, 0, 1), 0) = operands[1]; | |
2005 XEXP (XEXP (XVECEXP (XEXP (XVECEXP (i, 0, 1), 1), 0, 0), 0), | |
2006 0) = operands[1]; | |
2007 emit_insn (i); | |
2008 } | |
2009 }) | |
2010 | |
2011 (define_insn "atomic_nand_fetch<mode>_hard_1" | |
2012 [(set (match_operand:QIHI 0 "arith_reg_dest" "=&r") | |
2013 (not:QIHI | |
2014 (and:QIHI (mem:QIHI (match_operand:SI 1 "arith_reg_operand" "r")) | |
2015 (match_operand:QIHI 2 "logical_operand" "rK08")))) | |
2016 (set (mem:QIHI (match_dup 1)) | |
2017 (unspec:QIHI | |
2018 [(not:QIHI (and:QIHI (mem:QIHI (match_dup 1)) (match_dup 2)))] | |
2019 UNSPEC_ATOMIC)) | |
2020 (set (reg:SI T_REG) (const_int 1)) | |
2021 (clobber (reg:SI R0_REG)) | |
2022 (clobber (match_scratch:SI 3 "=&r")) | |
2023 (clobber (match_scratch:SI 4 "=1"))] | |
2024 "TARGET_ATOMIC_HARD_LLCS" | |
2025 { | |
2026 return "\r mov #-4,%3" "\n" | |
2027 " and %1,%3" "\n" | |
2028 " xor %3,%1" "\n" | |
2029 " add r15,%1" "\n" | |
2030 " add #-4,%1" "\n" | |
2031 "0: movli.l @%3,r0" "\n" | |
2032 " mov.l r0,@-r15" "\n" | |
2033 " mov.<bw> @%1,r0" "\n" | |
2034 " and %2,r0" "\n" | |
2035 " not r0,%0" "\n" | |
2036 " mov.<bw> %0,@%1" "\n" | |
2037 " mov.l @r15+,r0" "\n" | |
2038 " movco.l r0,@%3" "\n" | |
2039 " bf 0b"; | |
2040 } | |
2041 [(set_attr "length" "28")]) | |
2042 | |
2043 (define_insn "atomic_nand_fetch<mode>_soft_gusa" | |
2044 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&u") | |
2045 (not:QIHISI (and:QIHISI | |
2046 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=AraAdd") | |
2047 (match_operand:QIHISI 2 "arith_reg_operand" "u")))) | |
2048 (set (match_dup 1) | |
2049 (unspec:QIHISI | |
2050 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))] | |
2051 UNSPEC_ATOMIC)) | |
2052 (clobber (reg:SI R0_REG)) | |
2053 (clobber (reg:SI R1_REG))] | |
2054 "TARGET_ATOMIC_SOFT_GUSA" | |
2055 { | |
2056 return "\r mova 1f,r0" "\n" | |
2057 " .align 2" "\n" | |
2058 " mov r15,r1" "\n" | |
2059 " mov #(0f-1f),r15" "\n" | |
2060 "0: mov.<bwl> %1,%0" "\n" | |
2061 " and %2,%0" "\n" | |
2062 " not %0,%0" "\n" | |
2063 " mov.<bwl> %0,%1" "\n" | |
2064 "1: mov r1,r15"; | |
2065 } | |
2066 [(set_attr "length" "18")]) | |
2067 | |
2068 (define_insn_and_split "atomic_nand_fetch<mode>_soft_tcb" | |
2069 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&r") | |
2070 (not:QIHISI (and:QIHISI | |
2071 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd") | |
2072 (match_operand:QIHISI 2 "logical_operand" "rK08")))) | |
2073 (set (match_dup 1) | |
2074 (unspec:QIHISI | |
2075 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))] | |
2076 UNSPEC_ATOMIC)) | |
2077 (clobber (reg:SI R0_REG)) | |
2078 (clobber (reg:SI R1_REG)) | |
2079 (use (match_operand:SI 3 "gbr_displacement"))] | |
2080 "TARGET_ATOMIC_SOFT_TCB" | |
2081 { | |
2082 return "\r mova 1f,r0" "\n" | |
2083 " mov #(0f-1f),r1" "\n" | |
2084 " .align 2" "\n" | |
2085 " mov.l r0,@(%O3,gbr)" "\n" | |
2086 "0: mov.<bwl> %1,r0" "\n" | |
2087 " and %2,r0" "\n" | |
2088 " not r0,r0" "\n" | |
2089 " mov r0,%0" "\n" | |
2090 " mov.<bwl> r0,%1" "\n" | |
2091 "1: mov #0,r0" "\n" | |
2092 " mov.l r0,@(%O3,gbr)"; | |
2093 } | |
2094 "&& can_create_pseudo_p () && optimize | |
2095 && sh_reg_dead_or_unused_after_insn (insn, REGNO (operands[0]))" | |
2096 [(const_int 0)] | |
2097 { | |
2098 emit_insn (gen_atomic_nand<mode>_soft_tcb (operands[1], operands[2], | |
2099 operands[3])); | |
2100 } | |
2101 [(set_attr "length" "22")]) | |
2102 | |
2103 (define_insn "atomic_nand_fetch<mode>_soft_imask" | |
2104 [(set (match_operand:QIHISI 0 "arith_reg_dest" "=&z") | |
2105 (not:QIHISI (and:QIHISI | |
2106 (match_operand:QIHISI 1 "atomic_mem_operand_1" "=SraSdd") | |
2107 (match_operand:QIHISI 2 "logical_operand" "rK08")))) | |
2108 (set (match_dup 1) | |
2109 (unspec:QIHISI | |
2110 [(not:QIHISI (and:QIHISI (match_dup 1) (match_dup 2)))] | |
2111 UNSPEC_ATOMIC)) | |
2112 (clobber (match_scratch:SI 3 "=&r"))] | |
2113 "TARGET_ATOMIC_SOFT_IMASK" | |
2114 { | |
2115 return "\r stc sr,%0" "\n" | |
2116 " mov %0,%3" "\n" | |
2117 " or #0xF0,%0" "\n" | |
2118 " ldc %0,sr" "\n" | |
2119 " mov.<bwl> %1,%0" "\n" | |
2120 " and %2,%0" "\n" | |
2121 " not %0,%0" "\n" | |
2122 " mov.<bwl> %0,%1" "\n" | |
2123 " ldc %3,sr"; | |
2124 } | |
2125 [(set_attr "length" "18")]) | |
2126 | |
2127 ;;------------------------------------------------------------------------------ | |
2128 ;; read - test against zero - or with 0x80 - write - return test result | |
2129 | |
2130 (define_expand "atomic_test_and_set" | |
2131 [(match_operand:SI 0 "register_operand" "") ;; bool result output | |
2132 (match_operand:QI 1 "memory_operand" "") ;; memory | |
2133 (match_operand:SI 2 "const_int_operand" "")] ;; model | |
2134 "TARGET_ATOMIC_ANY || TARGET_ENABLE_TAS" | |
2135 { | |
2136 rtx addr = force_reg (Pmode, XEXP (operands[1], 0)); | |
2137 | |
2138 if (TARGET_ENABLE_TAS) | |
2139 emit_insn (gen_tasb (addr)); | |
2140 else | |
2141 { | |
2142 rtx val = gen_int_mode (targetm.atomic_test_and_set_trueval, QImode); | |
2143 val = force_reg (QImode, val); | |
2144 | |
2145 if (TARGET_ATOMIC_HARD_LLCS) | |
2146 emit_insn (gen_atomic_test_and_set_hard (addr, val)); | |
2147 else if (TARGET_ATOMIC_SOFT_GUSA) | |
2148 emit_insn (gen_atomic_test_and_set_soft_gusa (addr, val)); | |
2149 else if (TARGET_ATOMIC_SOFT_TCB) | |
2150 emit_insn (gen_atomic_test_and_set_soft_tcb (addr, val, | |
2151 TARGET_ATOMIC_SOFT_TCB_GBR_OFFSET_RTX)); | |
2152 else if (TARGET_ATOMIC_SOFT_IMASK) | |
2153 emit_insn (gen_atomic_test_and_set_soft_imask (addr, val)); | |
2154 else | |
2155 FAIL; | |
2156 } | |
2157 | |
2158 /* The result of the test op is the inverse of what we are | |
2159 supposed to return. Thus invert the T bit. The inversion will be | |
2160 potentially optimized away and integrated into surrounding code. */ | |
2161 emit_insn (gen_movnegt (operands[0], get_t_reg_rtx ())); | |
2162 DONE; | |
2163 }) | |
2164 | |
2165 (define_insn "tasb" | |
2166 [(set (reg:SI T_REG) | |
2167 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r")) | |
2168 (const_int 0))) | |
2169 (set (mem:QI (match_dup 0)) | |
2170 (unspec:QI [(const_int 128)] UNSPEC_ATOMIC))] | |
2171 "TARGET_ENABLE_TAS" | |
2172 "tas.b @%0" | |
2173 [(set_attr "insn_class" "co_group")]) | |
2174 | |
2175 (define_insn "atomic_test_and_set_soft_gusa" | |
2176 [(set (reg:SI T_REG) | |
2177 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "u")) | |
2178 (const_int 0))) | |
2179 (set (mem:QI (match_dup 0)) | |
2180 (unspec:QI [(match_operand:QI 1 "register_operand" "u")] UNSPEC_ATOMIC)) | |
2181 (clobber (match_scratch:QI 2 "=&u")) | |
2182 (clobber (reg:SI R0_REG)) | |
2183 (clobber (reg:SI R1_REG))] | |
2184 "TARGET_ATOMIC_SOFT_GUSA && !TARGET_ENABLE_TAS" | |
2185 { | |
2186 return "\r mova 1f,r0" "\n" | |
2187 " .align 2" "\n" | |
2188 " mov r15,r1" "\n" | |
2189 " mov #(0f-1f),r15" "\n" | |
2190 "0: mov.b @%0,%2" "\n" | |
2191 " mov.b %1,@%0" "\n" | |
2192 "1: mov r1,r15" "\n" | |
2193 " tst %2,%2"; | |
2194 } | |
2195 [(set_attr "length" "16")]) | |
2196 | |
2197 (define_insn "atomic_test_and_set_soft_tcb" | |
2198 [(set (reg:SI T_REG) | |
2199 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r")) | |
2200 (const_int 0))) | |
2201 (set (mem:QI (match_dup 0)) | |
2202 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC)) | |
2203 (use (match_operand:SI 2 "gbr_displacement")) | |
2204 (clobber (match_scratch:QI 3 "=&r")) | |
2205 (clobber (reg:SI R0_REG)) | |
2206 (clobber (reg:SI R1_REG))] | |
2207 "TARGET_ATOMIC_SOFT_TCB && !TARGET_ENABLE_TAS" | |
2208 { | |
2209 return "\r mova 1f,r0" "\n" | |
2210 " mov #(0f-1f),r1" "\n" | |
2211 " .align 2" "\n" | |
2212 " mov.l r0,@(%O2,gbr)" "\n" | |
2213 "0: mov.b @%0,%3" "\n" | |
2214 " mov #0,r0" "\n" | |
2215 " mov.b %1,@%0" "\n" | |
2216 "1: mov.l r0,@(%O2,gbr)" "\n" | |
2217 " tst %3,%3"; | |
2218 } | |
2219 [(set_attr "length" "18")]) | |
2220 | |
2221 (define_insn "atomic_test_and_set_soft_imask" | |
2222 [(set (reg:SI T_REG) | |
2223 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r")) | |
2224 (const_int 0))) | |
2225 (set (mem:QI (match_dup 0)) | |
2226 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC)) | |
2227 (clobber (match_scratch:SI 2 "=&r")) | |
2228 (clobber (reg:SI R0_REG))] | |
2229 "TARGET_ATOMIC_SOFT_IMASK && !TARGET_ENABLE_TAS" | |
2230 { | |
2231 return "\r stc sr,r0" "\n" | |
2232 " mov r0,%2" "\n" | |
2233 " or #0xF0,r0" "\n" | |
2234 " ldc r0,sr" "\n" | |
2235 " mov.b @%0,r0" "\n" | |
2236 " mov.b %1,@%0" "\n" | |
2237 " ldc %2,sr" "\n" | |
2238 " tst r0,r0"; | |
2239 } | |
2240 [(set_attr "length" "16")]) | |
2241 | |
2242 (define_insn "atomic_test_and_set_hard" | |
2243 [(set (reg:SI T_REG) | |
2244 (eq:SI (mem:QI (match_operand:SI 0 "register_operand" "r")) | |
2245 (const_int 0))) | |
2246 (set (mem:QI (match_dup 0)) | |
2247 (unspec:QI [(match_operand:QI 1 "register_operand" "r")] UNSPEC_ATOMIC)) | |
2248 (clobber (reg:SI R0_REG)) | |
2249 (clobber (match_scratch:SI 2 "=&r")) | |
2250 (clobber (match_scratch:SI 3 "=&r")) | |
2251 (clobber (match_scratch:SI 4 "=0"))] | |
2252 "TARGET_ATOMIC_HARD_LLCS && !TARGET_ENABLE_TAS" | |
2253 { | |
2254 return "\r mov #-4,%2" "\n" | |
2255 " and %0,%2" "\n" | |
2256 " xor %2,%0" "\n" | |
2257 " add r15,%0" "\n" | |
2258 " add #-4,%0" "\n" | |
2259 "0: movli.l @%2,r0" "\n" | |
2260 " mov.l r0,@-r15" "\n" | |
2261 " mov.b @%0,%3" "\n" | |
2262 " mov.b %1,@%0" "\n" | |
2263 " mov.l @r15+,r0" "\n" | |
2264 " movco.l r0,@%2" "\n" | |
2265 " bf 0b" "\n" | |
2266 " tst %3,%3"; | |
2267 } | |
2268 [(set_attr "length" "26")]) | |
2269 |