0
|
1 /* Linux-specific atomic operations for PA Linux.
|
|
2 Copyright (C) 2008, 2009 Free Software Foundation, Inc.
|
|
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
|
|
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
|
|
5
|
|
6 This file is part of GCC.
|
|
7
|
|
8 GCC is free software; you can redistribute it and/or modify it under
|
|
9 the terms of the GNU General Public License as published by the Free
|
|
10 Software Foundation; either version 3, or (at your option) any later
|
|
11 version.
|
|
12
|
|
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
16 for more details.
|
|
17
|
|
18 Under Section 7 of GPL version 3, you are granted additional
|
|
19 permissions described in the GCC Runtime Library Exception, version
|
|
20 3.1, as published by the Free Software Foundation.
|
|
21
|
|
22 You should have received a copy of the GNU General Public License and
|
|
23 a copy of the GCC Runtime Library Exception along with this program;
|
|
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
25 <http://www.gnu.org/licenses/>. */
|
|
26
|
|
27 #define EFAULT 14
|
|
28 #define EBUSY 16
|
|
29 #define ENOSYS 251
|
|
30
|
|
31 /* All PA-RISC implementations supported by linux have strongly
|
|
32 ordered loads and stores. Only cache flushes and purges can be
|
|
33 delayed. The data cache implementations are all globally
|
|
34 coherent. Thus, there is no need to synchonize memory accesses.
|
|
35
|
|
36 GCC automatically issues a asm memory barrier when it encounters
|
|
37 a __sync_synchronize builtin. Thus, we do not need to define this
|
|
38 builtin.
|
|
39
|
|
40 We implement byte, short and int versions of each atomic operation
|
|
41 using the kernel helper defined below. There is no support for
|
|
42 64-bit operations yet. */
|
|
43
|
|
44 /* A privileged instruction to crash a userspace program with SIGILL. */
|
|
45 #define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
|
|
46
|
|
47 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
|
|
48 #define LWS_CAS (sizeof(unsigned long) == 4 ? 0 : 1)
|
|
49
|
|
50 /* Kernel helper for compare-and-exchange a 32-bit value. */
|
|
51 static inline long
|
|
52 __kernel_cmpxchg (int oldval, int newval, int *mem)
|
|
53 {
|
|
54 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
|
|
55 register long lws_ret asm("r28");
|
|
56 register long lws_errno asm("r21");
|
|
57 register int lws_old asm("r25") = oldval;
|
|
58 register int lws_new asm("r24") = newval;
|
|
59 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
|
|
60 "ldi %5, %%r20 \n\t"
|
|
61 : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
|
|
62 "=r" (lws_old), "=r" (lws_new)
|
|
63 : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
|
|
64 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
|
|
65 );
|
|
66 if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
|
|
67 ABORT_INSTRUCTION;
|
|
68
|
|
69 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
|
|
70 the old value from memory. If this value is equal to OLDVAL, the
|
|
71 new value was written to memory. If not, return -EBUSY. */
|
|
72 if (!lws_errno && lws_ret != oldval)
|
|
73 lws_errno = -EBUSY;
|
|
74
|
|
75 return lws_errno;
|
|
76 }
|
|
77
|
|
78 #define HIDDEN __attribute__ ((visibility ("hidden")))
|
|
79
|
|
80 /* Big endian masks */
|
|
81 #define INVERT_MASK_1 24
|
|
82 #define INVERT_MASK_2 16
|
|
83
|
|
84 #define MASK_1 0xffu
|
|
85 #define MASK_2 0xffffu
|
|
86
|
|
87 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
|
|
88 int HIDDEN \
|
|
89 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
|
|
90 { \
|
|
91 int failure, tmp; \
|
|
92 \
|
|
93 do { \
|
|
94 tmp = *ptr; \
|
|
95 failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
|
|
96 } while (failure != 0); \
|
|
97 \
|
|
98 return tmp; \
|
|
99 }
|
|
100
|
|
101 FETCH_AND_OP_WORD (add, , +)
|
|
102 FETCH_AND_OP_WORD (sub, , -)
|
|
103 FETCH_AND_OP_WORD (or, , |)
|
|
104 FETCH_AND_OP_WORD (and, , &)
|
|
105 FETCH_AND_OP_WORD (xor, , ^)
|
|
106 FETCH_AND_OP_WORD (nand, ~, &)
|
|
107
|
|
108 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
|
|
109 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
|
|
110
|
|
111 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
|
|
112 subword-sized quantities. */
|
|
113
|
|
114 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
|
|
115 TYPE HIDDEN \
|
|
116 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
|
|
117 { \
|
|
118 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
|
|
119 unsigned int mask, shift, oldval, newval; \
|
|
120 int failure; \
|
|
121 \
|
|
122 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
|
|
123 mask = MASK_##WIDTH << shift; \
|
|
124 \
|
|
125 do { \
|
|
126 oldval = *wordptr; \
|
|
127 newval = ((PFX_OP ((oldval & mask) >> shift) \
|
|
128 INF_OP (unsigned int) val) << shift) & mask; \
|
|
129 newval |= oldval & ~mask; \
|
|
130 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
|
|
131 } while (failure != 0); \
|
|
132 \
|
|
133 return (RETURN & mask) >> shift; \
|
|
134 }
|
|
135
|
|
136 SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
|
|
137 SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
|
|
138 SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
|
|
139 SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
|
|
140 SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
|
|
141 SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
|
|
142
|
|
143 SUBWORD_SYNC_OP (add, , +, char, 1, oldval)
|
|
144 SUBWORD_SYNC_OP (sub, , -, char, 1, oldval)
|
|
145 SUBWORD_SYNC_OP (or, , |, char, 1, oldval)
|
|
146 SUBWORD_SYNC_OP (and, , &, char, 1, oldval)
|
|
147 SUBWORD_SYNC_OP (xor, , ^, char, 1, oldval)
|
|
148 SUBWORD_SYNC_OP (nand, ~, &, char, 1, oldval)
|
|
149
|
|
150 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
|
|
151 int HIDDEN \
|
|
152 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
|
|
153 { \
|
|
154 int tmp, failure; \
|
|
155 \
|
|
156 do { \
|
|
157 tmp = *ptr; \
|
|
158 failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
|
|
159 } while (failure != 0); \
|
|
160 \
|
|
161 return PFX_OP tmp INF_OP val; \
|
|
162 }
|
|
163
|
|
164 OP_AND_FETCH_WORD (add, , +)
|
|
165 OP_AND_FETCH_WORD (sub, , -)
|
|
166 OP_AND_FETCH_WORD (or, , |)
|
|
167 OP_AND_FETCH_WORD (and, , &)
|
|
168 OP_AND_FETCH_WORD (xor, , ^)
|
|
169 OP_AND_FETCH_WORD (nand, ~, &)
|
|
170
|
|
171 SUBWORD_SYNC_OP (add, , +, short, 2, newval)
|
|
172 SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
|
|
173 SUBWORD_SYNC_OP (or, , |, short, 2, newval)
|
|
174 SUBWORD_SYNC_OP (and, , &, short, 2, newval)
|
|
175 SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
|
|
176 SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
|
|
177
|
|
178 SUBWORD_SYNC_OP (add, , +, char, 1, newval)
|
|
179 SUBWORD_SYNC_OP (sub, , -, char, 1, newval)
|
|
180 SUBWORD_SYNC_OP (or, , |, char, 1, newval)
|
|
181 SUBWORD_SYNC_OP (and, , &, char, 1, newval)
|
|
182 SUBWORD_SYNC_OP (xor, , ^, char, 1, newval)
|
|
183 SUBWORD_SYNC_OP (nand, ~, &, char, 1, newval)
|
|
184
|
|
185 int HIDDEN
|
|
186 __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
|
|
187 {
|
|
188 int actual_oldval, fail;
|
|
189
|
|
190 while (1)
|
|
191 {
|
|
192 actual_oldval = *ptr;
|
|
193
|
|
194 if (oldval != actual_oldval)
|
|
195 return actual_oldval;
|
|
196
|
|
197 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
|
|
198
|
|
199 if (!fail)
|
|
200 return oldval;
|
|
201 }
|
|
202 }
|
|
203
|
|
204 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
|
|
205 TYPE HIDDEN \
|
|
206 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
|
|
207 TYPE newval) \
|
|
208 { \
|
|
209 int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
|
|
210 unsigned int mask, shift, actual_oldval, actual_newval; \
|
|
211 \
|
|
212 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
|
|
213 mask = MASK_##WIDTH << shift; \
|
|
214 \
|
|
215 while (1) \
|
|
216 { \
|
|
217 actual_oldval = *wordptr; \
|
|
218 \
|
|
219 if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
|
|
220 return (actual_oldval & mask) >> shift; \
|
|
221 \
|
|
222 actual_newval = (actual_oldval & ~mask) \
|
|
223 | (((unsigned int) newval << shift) & mask); \
|
|
224 \
|
|
225 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
|
|
226 wordptr); \
|
|
227 \
|
|
228 if (!fail) \
|
|
229 return oldval; \
|
|
230 } \
|
|
231 }
|
|
232
|
|
233 SUBWORD_VAL_CAS (short, 2)
|
|
234 SUBWORD_VAL_CAS (char, 1)
|
|
235
|
|
236 typedef unsigned char bool;
|
|
237
|
|
238 bool HIDDEN
|
|
239 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
|
|
240 {
|
|
241 int failure = __kernel_cmpxchg (oldval, newval, ptr);
|
|
242 return (failure == 0);
|
|
243 }
|
|
244
|
|
245 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
|
|
246 bool HIDDEN \
|
|
247 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
|
|
248 TYPE newval) \
|
|
249 { \
|
|
250 TYPE actual_oldval \
|
|
251 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
|
|
252 return (oldval == actual_oldval); \
|
|
253 }
|
|
254
|
|
255 SUBWORD_BOOL_CAS (short, 2)
|
|
256 SUBWORD_BOOL_CAS (char, 1)
|
|
257
|
|
258 int HIDDEN
|
|
259 __sync_lock_test_and_set_4 (int *ptr, int val)
|
|
260 {
|
|
261 int failure, oldval;
|
|
262
|
|
263 do {
|
|
264 oldval = *ptr;
|
|
265 failure = __kernel_cmpxchg (oldval, val, ptr);
|
|
266 } while (failure != 0);
|
|
267
|
|
268 return oldval;
|
|
269 }
|
|
270
|
|
271 #define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
|
|
272 TYPE HIDDEN \
|
|
273 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
|
|
274 { \
|
|
275 int failure; \
|
|
276 unsigned int oldval, newval, shift, mask; \
|
|
277 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
|
|
278 \
|
|
279 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
|
|
280 mask = MASK_##WIDTH << shift; \
|
|
281 \
|
|
282 do { \
|
|
283 oldval = *wordptr; \
|
|
284 newval = (oldval & ~mask) \
|
|
285 | (((unsigned int) val << shift) & mask); \
|
|
286 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
|
|
287 } while (failure != 0); \
|
|
288 \
|
|
289 return (oldval & mask) >> shift; \
|
|
290 }
|
|
291
|
|
292 SUBWORD_TEST_AND_SET (short, 2)
|
|
293 SUBWORD_TEST_AND_SET (char, 1)
|
|
294
|
|
295 #define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
|
|
296 void HIDDEN \
|
|
297 __sync_lock_release_##WIDTH (TYPE *ptr) \
|
|
298 { \
|
|
299 *ptr = 0; \
|
|
300 }
|
|
301
|
|
302 SYNC_LOCK_RELEASE (int, 4)
|
|
303 SYNC_LOCK_RELEASE (short, 2)
|
|
304 SYNC_LOCK_RELEASE (char, 1)
|