]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/include/asm/atomic.h
Merge branch 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[net-next-2.6.git] / arch / arm / include / asm / atomic.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/atomic.h
1da177e4
LT
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
8dc39b88 14#include <linux/compiler.h>
ea435467 15#include <linux/types.h>
2856f5e3 16#include <asm/system.h>
1da177e4 17
1da177e4
LT
18#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
22#define atomic_read(v) ((v)->counter)
23
24#if __LINUX_ARM_ARCH__ >= 6
25
26/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter'
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
bac4e960
RK
47static inline void atomic_add(int i, atomic_t *v)
48{
49 unsigned long tmp;
50 int result;
51
52 __asm__ __volatile__("@ atomic_add\n"
53"1: ldrex %0, [%2]\n"
54" add %0, %0, %3\n"
55" strex %1, %0, [%2]\n"
56" teq %1, #0\n"
57" bne 1b"
58 : "=&r" (result), "=&r" (tmp)
59 : "r" (&v->counter), "Ir" (i)
60 : "cc");
61}
62
1da177e4
LT
63static inline int atomic_add_return(int i, atomic_t *v)
64{
65 unsigned long tmp;
66 int result;
67
bac4e960
RK
68 smp_mb();
69
1da177e4
LT
70 __asm__ __volatile__("@ atomic_add_return\n"
71"1: ldrex %0, [%2]\n"
72" add %0, %0, %3\n"
73" strex %1, %0, [%2]\n"
74" teq %1, #0\n"
75" bne 1b"
76 : "=&r" (result), "=&r" (tmp)
77 : "r" (&v->counter), "Ir" (i)
78 : "cc");
79
bac4e960
RK
80 smp_mb();
81
1da177e4
LT
82 return result;
83}
84
bac4e960
RK
85static inline void atomic_sub(int i, atomic_t *v)
86{
87 unsigned long tmp;
88 int result;
89
90 __asm__ __volatile__("@ atomic_sub\n"
91"1: ldrex %0, [%2]\n"
92" sub %0, %0, %3\n"
93" strex %1, %0, [%2]\n"
94" teq %1, #0\n"
95" bne 1b"
96 : "=&r" (result), "=&r" (tmp)
97 : "r" (&v->counter), "Ir" (i)
98 : "cc");
99}
100
1da177e4
LT
101static inline int atomic_sub_return(int i, atomic_t *v)
102{
103 unsigned long tmp;
104 int result;
105
bac4e960
RK
106 smp_mb();
107
1da177e4
LT
108 __asm__ __volatile__("@ atomic_sub_return\n"
109"1: ldrex %0, [%2]\n"
110" sub %0, %0, %3\n"
111" strex %1, %0, [%2]\n"
112" teq %1, #0\n"
113" bne 1b"
114 : "=&r" (result), "=&r" (tmp)
115 : "r" (&v->counter), "Ir" (i)
116 : "cc");
117
bac4e960
RK
118 smp_mb();
119
1da177e4
LT
120 return result;
121}
122
4a6dae6d
NP
123static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
124{
49ee57a3 125 unsigned long oldval, res;
4a6dae6d 126
bac4e960
RK
127 smp_mb();
128
4a6dae6d
NP
129 do {
130 __asm__ __volatile__("@ atomic_cmpxchg\n"
131 "ldrex %1, [%2]\n"
a7d06833 132 "mov %0, #0\n"
4a6dae6d
NP
133 "teq %1, %3\n"
134 "strexeq %0, %4, [%2]\n"
135 : "=&r" (res), "=&r" (oldval)
136 : "r" (&ptr->counter), "Ir" (old), "r" (new)
137 : "cc");
138 } while (res);
139
bac4e960
RK
140 smp_mb();
141
4a6dae6d
NP
142 return oldval;
143}
144
1da177e4
LT
145static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
146{
147 unsigned long tmp, tmp2;
148
149 __asm__ __volatile__("@ atomic_clear_mask\n"
0803c30c 150"1: ldrex %0, [%2]\n"
1da177e4 151" bic %0, %0, %3\n"
0803c30c 152" strex %1, %0, [%2]\n"
1da177e4
LT
153" teq %1, #0\n"
154" bne 1b"
155 : "=&r" (tmp), "=&r" (tmp2)
156 : "r" (addr), "Ir" (mask)
157 : "cc");
158}
159
160#else /* ARM_ARCH_6 */
161
1da177e4
LT
162#ifdef CONFIG_SMP
163#error SMP not supported on pre-ARMv6 CPUs
164#endif
165
166#define atomic_set(v,i) (((v)->counter) = (i))
167
168static inline int atomic_add_return(int i, atomic_t *v)
169{
170 unsigned long flags;
171 int val;
172
8dd5c845 173 raw_local_irq_save(flags);
1da177e4
LT
174 val = v->counter;
175 v->counter = val += i;
8dd5c845 176 raw_local_irq_restore(flags);
1da177e4
LT
177
178 return val;
179}
bac4e960 180#define atomic_add(i, v) (void) atomic_add_return(i, v)
1da177e4
LT
181
182static inline int atomic_sub_return(int i, atomic_t *v)
183{
184 unsigned long flags;
185 int val;
186
8dd5c845 187 raw_local_irq_save(flags);
1da177e4
LT
188 val = v->counter;
189 v->counter = val -= i;
8dd5c845 190 raw_local_irq_restore(flags);
1da177e4
LT
191
192 return val;
193}
bac4e960 194#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1da177e4 195
4a6dae6d
NP
196static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
197{
198 int ret;
199 unsigned long flags;
200
8dd5c845 201 raw_local_irq_save(flags);
4a6dae6d
NP
202 ret = v->counter;
203 if (likely(ret == old))
204 v->counter = new;
8dd5c845 205 raw_local_irq_restore(flags);
4a6dae6d
NP
206
207 return ret;
208}
209
1da177e4
LT
210static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
211{
212 unsigned long flags;
213
8dd5c845 214 raw_local_irq_save(flags);
1da177e4 215 *addr &= ~mask;
8dd5c845 216 raw_local_irq_restore(flags);
1da177e4
LT
217}
218
219#endif /* __LINUX_ARM_ARCH__ */
220
ffbf670f
IM
221#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
222
8426e1f6
NP
223static inline int atomic_add_unless(atomic_t *v, int a, int u)
224{
225 int c, old;
226
227 c = atomic_read(v);
228 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
229 c = old;
230 return c != u;
231}
232#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
233
bac4e960
RK
234#define atomic_inc(v) atomic_add(1, v)
235#define atomic_dec(v) atomic_sub(1, v)
1da177e4
LT
236
237#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
238#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
239#define atomic_inc_return(v) (atomic_add_return(1, v))
240#define atomic_dec_return(v) (atomic_sub_return(1, v))
241#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
242
243#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
244
bac4e960
RK
245#define smp_mb__before_atomic_dec() smp_mb()
246#define smp_mb__after_atomic_dec() smp_mb()
247#define smp_mb__before_atomic_inc() smp_mb()
248#define smp_mb__after_atomic_inc() smp_mb()
1da177e4 249
72099ed2 250#include <asm-generic/atomic-long.h>
1da177e4
LT
251#endif
252#endif