]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/atomic.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef __ASM_ARM_ATOMIC_H | |
12 | #define __ASM_ARM_ATOMIC_H | |
13 | ||
8dc39b88 | 14 | #include <linux/compiler.h> |
ea435467 | 15 | #include <linux/types.h> |
2856f5e3 | 16 | #include <asm/system.h> |
1da177e4 | 17 | |
1da177e4 LT |
18 | #define ATOMIC_INIT(i) { (i) } |
19 | ||
20 | #ifdef __KERNEL__ | |
21 | ||
200b812d CM |
22 | /* |
23 | * On ARM, ordinary assignment (str instruction) doesn't clear the local | |
24 | * strex/ldrex monitor on some implementations. The reason we can use it for | |
25 | * atomic_set() is the clrex or dummy strex done on every exception return. | |
26 | */ | |
1da177e4 | 27 | #define atomic_read(v) ((v)->counter) |
200b812d | 28 | #define atomic_set(v,i) (((v)->counter) = (i)) |
1da177e4 LT |
29 | |
30 | #if __LINUX_ARM_ARCH__ >= 6 | |
31 | ||
32 | /* | |
33 | * ARMv6 UP and SMP safe atomic ops. We use load exclusive and | |
34 | * store exclusive to ensure that these are atomic. We may loop | |
200b812d | 35 | * to ensure that the update happens. |
1da177e4 | 36 | */ |
bac4e960 RK |
37 | static inline void atomic_add(int i, atomic_t *v) |
38 | { | |
39 | unsigned long tmp; | |
40 | int result; | |
41 | ||
42 | __asm__ __volatile__("@ atomic_add\n" | |
43 | "1: ldrex %0, [%2]\n" | |
44 | " add %0, %0, %3\n" | |
45 | " strex %1, %0, [%2]\n" | |
46 | " teq %1, #0\n" | |
47 | " bne 1b" | |
48 | : "=&r" (result), "=&r" (tmp) | |
49 | : "r" (&v->counter), "Ir" (i) | |
50 | : "cc"); | |
51 | } | |
52 | ||
1da177e4 LT |
53 | static inline int atomic_add_return(int i, atomic_t *v) |
54 | { | |
55 | unsigned long tmp; | |
56 | int result; | |
57 | ||
bac4e960 RK |
58 | smp_mb(); |
59 | ||
1da177e4 LT |
60 | __asm__ __volatile__("@ atomic_add_return\n" |
61 | "1: ldrex %0, [%2]\n" | |
62 | " add %0, %0, %3\n" | |
63 | " strex %1, %0, [%2]\n" | |
64 | " teq %1, #0\n" | |
65 | " bne 1b" | |
66 | : "=&r" (result), "=&r" (tmp) | |
67 | : "r" (&v->counter), "Ir" (i) | |
68 | : "cc"); | |
69 | ||
bac4e960 RK |
70 | smp_mb(); |
71 | ||
1da177e4 LT |
72 | return result; |
73 | } | |
74 | ||
bac4e960 RK |
75 | static inline void atomic_sub(int i, atomic_t *v) |
76 | { | |
77 | unsigned long tmp; | |
78 | int result; | |
79 | ||
80 | __asm__ __volatile__("@ atomic_sub\n" | |
81 | "1: ldrex %0, [%2]\n" | |
82 | " sub %0, %0, %3\n" | |
83 | " strex %1, %0, [%2]\n" | |
84 | " teq %1, #0\n" | |
85 | " bne 1b" | |
86 | : "=&r" (result), "=&r" (tmp) | |
87 | : "r" (&v->counter), "Ir" (i) | |
88 | : "cc"); | |
89 | } | |
90 | ||
1da177e4 LT |
91 | static inline int atomic_sub_return(int i, atomic_t *v) |
92 | { | |
93 | unsigned long tmp; | |
94 | int result; | |
95 | ||
bac4e960 RK |
96 | smp_mb(); |
97 | ||
1da177e4 LT |
98 | __asm__ __volatile__("@ atomic_sub_return\n" |
99 | "1: ldrex %0, [%2]\n" | |
100 | " sub %0, %0, %3\n" | |
101 | " strex %1, %0, [%2]\n" | |
102 | " teq %1, #0\n" | |
103 | " bne 1b" | |
104 | : "=&r" (result), "=&r" (tmp) | |
105 | : "r" (&v->counter), "Ir" (i) | |
106 | : "cc"); | |
107 | ||
bac4e960 RK |
108 | smp_mb(); |
109 | ||
1da177e4 LT |
110 | return result; |
111 | } | |
112 | ||
4a6dae6d NP |
113 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
114 | { | |
49ee57a3 | 115 | unsigned long oldval, res; |
4a6dae6d | 116 | |
bac4e960 RK |
117 | smp_mb(); |
118 | ||
4a6dae6d NP |
119 | do { |
120 | __asm__ __volatile__("@ atomic_cmpxchg\n" | |
121 | "ldrex %1, [%2]\n" | |
a7d06833 | 122 | "mov %0, #0\n" |
4a6dae6d NP |
123 | "teq %1, %3\n" |
124 | "strexeq %0, %4, [%2]\n" | |
125 | : "=&r" (res), "=&r" (oldval) | |
126 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | |
127 | : "cc"); | |
128 | } while (res); | |
129 | ||
bac4e960 RK |
130 | smp_mb(); |
131 | ||
4a6dae6d NP |
132 | return oldval; |
133 | } | |
134 | ||
1da177e4 LT |
135 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
136 | { | |
137 | unsigned long tmp, tmp2; | |
138 | ||
139 | __asm__ __volatile__("@ atomic_clear_mask\n" | |
0803c30c | 140 | "1: ldrex %0, [%2]\n" |
1da177e4 | 141 | " bic %0, %0, %3\n" |
0803c30c | 142 | " strex %1, %0, [%2]\n" |
1da177e4 LT |
143 | " teq %1, #0\n" |
144 | " bne 1b" | |
145 | : "=&r" (tmp), "=&r" (tmp2) | |
146 | : "r" (addr), "Ir" (mask) | |
147 | : "cc"); | |
148 | } | |
149 | ||
150 | #else /* ARM_ARCH_6 */ | |
151 | ||
1da177e4 LT |
152 | #ifdef CONFIG_SMP |
153 | #error SMP not supported on pre-ARMv6 CPUs | |
154 | #endif | |
155 | ||
1da177e4 LT |
156 | static inline int atomic_add_return(int i, atomic_t *v) |
157 | { | |
158 | unsigned long flags; | |
159 | int val; | |
160 | ||
8dd5c845 | 161 | raw_local_irq_save(flags); |
1da177e4 LT |
162 | val = v->counter; |
163 | v->counter = val += i; | |
8dd5c845 | 164 | raw_local_irq_restore(flags); |
1da177e4 LT |
165 | |
166 | return val; | |
167 | } | |
bac4e960 | 168 | #define atomic_add(i, v) (void) atomic_add_return(i, v) |
1da177e4 LT |
169 | |
170 | static inline int atomic_sub_return(int i, atomic_t *v) | |
171 | { | |
172 | unsigned long flags; | |
173 | int val; | |
174 | ||
8dd5c845 | 175 | raw_local_irq_save(flags); |
1da177e4 LT |
176 | val = v->counter; |
177 | v->counter = val -= i; | |
8dd5c845 | 178 | raw_local_irq_restore(flags); |
1da177e4 LT |
179 | |
180 | return val; | |
181 | } | |
bac4e960 | 182 | #define atomic_sub(i, v) (void) atomic_sub_return(i, v) |
1da177e4 | 183 | |
4a6dae6d NP |
184 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
185 | { | |
186 | int ret; | |
187 | unsigned long flags; | |
188 | ||
8dd5c845 | 189 | raw_local_irq_save(flags); |
4a6dae6d NP |
190 | ret = v->counter; |
191 | if (likely(ret == old)) | |
192 | v->counter = new; | |
8dd5c845 | 193 | raw_local_irq_restore(flags); |
4a6dae6d NP |
194 | |
195 | return ret; | |
196 | } | |
197 | ||
1da177e4 LT |
198 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
199 | { | |
200 | unsigned long flags; | |
201 | ||
8dd5c845 | 202 | raw_local_irq_save(flags); |
1da177e4 | 203 | *addr &= ~mask; |
8dd5c845 | 204 | raw_local_irq_restore(flags); |
1da177e4 LT |
205 | } |
206 | ||
207 | #endif /* __LINUX_ARM_ARCH__ */ | |
208 | ||
ffbf670f IM |
209 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
210 | ||
8426e1f6 NP |
211 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
212 | { | |
213 | int c, old; | |
214 | ||
215 | c = atomic_read(v); | |
216 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | |
217 | c = old; | |
218 | return c != u; | |
219 | } | |
220 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
221 | ||
bac4e960 RK |
222 | #define atomic_inc(v) atomic_add(1, v) |
223 | #define atomic_dec(v) atomic_sub(1, v) | |
1da177e4 LT |
224 | |
225 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
226 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
227 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | |
228 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | |
229 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
230 | ||
231 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | |
232 | ||
bac4e960 RK |
233 | #define smp_mb__before_atomic_dec() smp_mb() |
234 | #define smp_mb__after_atomic_dec() smp_mb() | |
235 | #define smp_mb__before_atomic_inc() smp_mb() | |
236 | #define smp_mb__after_atomic_inc() smp_mb() | |
1da177e4 | 237 | |
24b44a66 WD |
238 | #ifndef CONFIG_GENERIC_ATOMIC64 |
239 | typedef struct { | |
240 | u64 __aligned(8) counter; | |
241 | } atomic64_t; | |
242 | ||
243 | #define ATOMIC64_INIT(i) { (i) } | |
244 | ||
245 | static inline u64 atomic64_read(atomic64_t *v) | |
246 | { | |
247 | u64 result; | |
248 | ||
249 | __asm__ __volatile__("@ atomic64_read\n" | |
250 | " ldrexd %0, %H0, [%1]" | |
251 | : "=&r" (result) | |
252 | : "r" (&v->counter) | |
253 | ); | |
254 | ||
255 | return result; | |
256 | } | |
257 | ||
258 | static inline void atomic64_set(atomic64_t *v, u64 i) | |
259 | { | |
260 | u64 tmp; | |
261 | ||
262 | __asm__ __volatile__("@ atomic64_set\n" | |
263 | "1: ldrexd %0, %H0, [%1]\n" | |
264 | " strexd %0, %2, %H2, [%1]\n" | |
265 | " teq %0, #0\n" | |
266 | " bne 1b" | |
267 | : "=&r" (tmp) | |
268 | : "r" (&v->counter), "r" (i) | |
269 | : "cc"); | |
270 | } | |
271 | ||
272 | static inline void atomic64_add(u64 i, atomic64_t *v) | |
273 | { | |
274 | u64 result; | |
275 | unsigned long tmp; | |
276 | ||
277 | __asm__ __volatile__("@ atomic64_add\n" | |
278 | "1: ldrexd %0, %H0, [%2]\n" | |
279 | " adds %0, %0, %3\n" | |
280 | " adc %H0, %H0, %H3\n" | |
281 | " strexd %1, %0, %H0, [%2]\n" | |
282 | " teq %1, #0\n" | |
283 | " bne 1b" | |
284 | : "=&r" (result), "=&r" (tmp) | |
285 | : "r" (&v->counter), "r" (i) | |
286 | : "cc"); | |
287 | } | |
288 | ||
289 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |
290 | { | |
291 | u64 result; | |
292 | unsigned long tmp; | |
293 | ||
294 | smp_mb(); | |
295 | ||
296 | __asm__ __volatile__("@ atomic64_add_return\n" | |
297 | "1: ldrexd %0, %H0, [%2]\n" | |
298 | " adds %0, %0, %3\n" | |
299 | " adc %H0, %H0, %H3\n" | |
300 | " strexd %1, %0, %H0, [%2]\n" | |
301 | " teq %1, #0\n" | |
302 | " bne 1b" | |
303 | : "=&r" (result), "=&r" (tmp) | |
304 | : "r" (&v->counter), "r" (i) | |
305 | : "cc"); | |
306 | ||
307 | smp_mb(); | |
308 | ||
309 | return result; | |
310 | } | |
311 | ||
312 | static inline void atomic64_sub(u64 i, atomic64_t *v) | |
313 | { | |
314 | u64 result; | |
315 | unsigned long tmp; | |
316 | ||
317 | __asm__ __volatile__("@ atomic64_sub\n" | |
318 | "1: ldrexd %0, %H0, [%2]\n" | |
319 | " subs %0, %0, %3\n" | |
320 | " sbc %H0, %H0, %H3\n" | |
321 | " strexd %1, %0, %H0, [%2]\n" | |
322 | " teq %1, #0\n" | |
323 | " bne 1b" | |
324 | : "=&r" (result), "=&r" (tmp) | |
325 | : "r" (&v->counter), "r" (i) | |
326 | : "cc"); | |
327 | } | |
328 | ||
329 | static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | |
330 | { | |
331 | u64 result; | |
332 | unsigned long tmp; | |
333 | ||
334 | smp_mb(); | |
335 | ||
336 | __asm__ __volatile__("@ atomic64_sub_return\n" | |
337 | "1: ldrexd %0, %H0, [%2]\n" | |
338 | " subs %0, %0, %3\n" | |
339 | " sbc %H0, %H0, %H3\n" | |
340 | " strexd %1, %0, %H0, [%2]\n" | |
341 | " teq %1, #0\n" | |
342 | " bne 1b" | |
343 | : "=&r" (result), "=&r" (tmp) | |
344 | : "r" (&v->counter), "r" (i) | |
345 | : "cc"); | |
346 | ||
347 | smp_mb(); | |
348 | ||
349 | return result; | |
350 | } | |
351 | ||
352 | static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | |
353 | { | |
354 | u64 oldval; | |
355 | unsigned long res; | |
356 | ||
357 | smp_mb(); | |
358 | ||
359 | do { | |
360 | __asm__ __volatile__("@ atomic64_cmpxchg\n" | |
361 | "ldrexd %1, %H1, [%2]\n" | |
362 | "mov %0, #0\n" | |
363 | "teq %1, %3\n" | |
364 | "teqeq %H1, %H3\n" | |
365 | "strexdeq %0, %4, %H4, [%2]" | |
366 | : "=&r" (res), "=&r" (oldval) | |
367 | : "r" (&ptr->counter), "r" (old), "r" (new) | |
368 | : "cc"); | |
369 | } while (res); | |
370 | ||
371 | smp_mb(); | |
372 | ||
373 | return oldval; | |
374 | } | |
375 | ||
376 | static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | |
377 | { | |
378 | u64 result; | |
379 | unsigned long tmp; | |
380 | ||
381 | smp_mb(); | |
382 | ||
383 | __asm__ __volatile__("@ atomic64_xchg\n" | |
384 | "1: ldrexd %0, %H0, [%2]\n" | |
385 | " strexd %1, %3, %H3, [%2]\n" | |
386 | " teq %1, #0\n" | |
387 | " bne 1b" | |
388 | : "=&r" (result), "=&r" (tmp) | |
389 | : "r" (&ptr->counter), "r" (new) | |
390 | : "cc"); | |
391 | ||
392 | smp_mb(); | |
393 | ||
394 | return result; | |
395 | } | |
396 | ||
397 | static inline u64 atomic64_dec_if_positive(atomic64_t *v) | |
398 | { | |
399 | u64 result; | |
400 | unsigned long tmp; | |
401 | ||
402 | smp_mb(); | |
403 | ||
404 | __asm__ __volatile__("@ atomic64_dec_if_positive\n" | |
405 | "1: ldrexd %0, %H0, [%2]\n" | |
406 | " subs %0, %0, #1\n" | |
407 | " sbc %H0, %H0, #0\n" | |
408 | " teq %H0, #0\n" | |
409 | " bmi 2f\n" | |
410 | " strexd %1, %0, %H0, [%2]\n" | |
411 | " teq %1, #0\n" | |
412 | " bne 1b\n" | |
413 | "2:" | |
414 | : "=&r" (result), "=&r" (tmp) | |
415 | : "r" (&v->counter) | |
416 | : "cc"); | |
417 | ||
418 | smp_mb(); | |
419 | ||
420 | return result; | |
421 | } | |
422 | ||
423 | static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | |
424 | { | |
425 | u64 val; | |
426 | unsigned long tmp; | |
427 | int ret = 1; | |
428 | ||
429 | smp_mb(); | |
430 | ||
431 | __asm__ __volatile__("@ atomic64_add_unless\n" | |
432 | "1: ldrexd %0, %H0, [%3]\n" | |
433 | " teq %0, %4\n" | |
434 | " teqeq %H0, %H4\n" | |
435 | " moveq %1, #0\n" | |
436 | " beq 2f\n" | |
437 | " adds %0, %0, %5\n" | |
438 | " adc %H0, %H0, %H5\n" | |
439 | " strexd %2, %0, %H0, [%3]\n" | |
440 | " teq %2, #0\n" | |
441 | " bne 1b\n" | |
442 | "2:" | |
443 | : "=&r" (val), "=&r" (ret), "=&r" (tmp) | |
444 | : "r" (&v->counter), "r" (u), "r" (a) | |
445 | : "cc"); | |
446 | ||
447 | if (ret) | |
448 | smp_mb(); | |
449 | ||
450 | return ret; | |
451 | } | |
452 | ||
453 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
454 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | |
455 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | |
456 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
457 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | |
458 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | |
459 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | |
460 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | |
461 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | |
462 | ||
463 | #else /* !CONFIG_GENERIC_ATOMIC64 */ | |
464 | #include <asm-generic/atomic64.h> | |
465 | #endif | |
72099ed2 | 466 | #include <asm-generic/atomic-long.h> |
1da177e4 LT |
467 | #endif |
468 | #endif |