]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/x86/include/asm/cmpxchg_32.h
x86, asm: Clean up and simplify set_64bit()
[net-next-2.6.git] / arch / x86 / include / asm / cmpxchg_32.h
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
3
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
5
6 /*
7  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8  *       you need to test for the feature in boot_cpu_data.
9  */
10
11 extern void __xchg_wrong_size(void);
12
13 /*
14  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15  * Note 2: xchg has side effect, so that attribute volatile is necessary,
16  *        but generally the primitive is invalid, *ptr is output argument. --ANK
17  */
18
19 struct __xchg_dummy {
20         unsigned long a[100];
21 };
22 #define __xg(x) ((struct __xchg_dummy *)(x))
23
24 #define __xchg(x, ptr, size)                                            \
25 ({                                                                      \
26         __typeof(*(ptr)) __x = (x);                                     \
27         switch (size) {                                                 \
28         case 1:                                                         \
29                 asm volatile("xchgb %b0,%1"                             \
30                              : "=q" (__x), "+m" (*__xg(ptr))            \
31                              : "0" (__x)                                \
32                              : "memory");                               \
33                 break;                                                  \
34         case 2:                                                         \
35                 asm volatile("xchgw %w0,%1"                             \
36                              : "=r" (__x), "+m" (*__xg(ptr))            \
37                              : "0" (__x)                                \
38                              : "memory");                               \
39                 break;                                                  \
40         case 4:                                                         \
41                 asm volatile("xchgl %0,%1"                              \
42                              : "=r" (__x), "+m" (*__xg(ptr))            \
43                              : "0" (__x)                                \
44                              : "memory");                               \
45                 break;                                                  \
46         default:                                                        \
47                 __xchg_wrong_size();                                    \
48         }                                                               \
49         __x;                                                            \
50 })
51
52 #define xchg(ptr, v)                                                    \
53         __xchg((v), (ptr), sizeof(*ptr))
54
55 /*
56  * CMPXCHG8B only writes to the target if we had the previous
57  * value in registers, otherwise it acts as a read and gives us the
58  * "new previous" value.  That is why there is a loop.  Preloading
59  * EDX:EAX is a performance optimization: in the common case it means
60  * we need only one locked operation.
61  *
62  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
63  * least an FPU save and/or %cr0.ts manipulation.
64  *
65  * cmpxchg8b must be used with the lock prefix here to allow the
66  * instruction to be executed atomically.  We need to have the reader
67  * side to see the coherent 64bit value.
68  */
69 static inline void set_64bit(volatile u64 *ptr, u64 value)
70 {
71         u32 low  = value;
72         u32 high = value >> 32;
73         u64 prev = *ptr;
74
75         asm volatile("\n1:\t"
76                      LOCK_PREFIX "cmpxchg8b %0\n\t"
77                      "jnz 1b"
78                      : "=m" (*ptr), "+A" (prev)
79                      : "b" (low), "c" (high)
80                      : "memory");
81 }
82
83 extern void __cmpxchg_wrong_size(void);
84
85 /*
86  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
87  * store NEW in MEM.  Return the initial value in MEM.  Success is
88  * indicated by comparing RETURN with OLD.
89  */
90 #define __raw_cmpxchg(ptr, old, new, size, lock)                        \
91 ({                                                                      \
92         __typeof__(*(ptr)) __ret;                                       \
93         __typeof__(*(ptr)) __old = (old);                               \
94         __typeof__(*(ptr)) __new = (new);                               \
95         switch (size) {                                                 \
96         case 1:                                                         \
97                 asm volatile(lock "cmpxchgb %b2,%1"                     \
98                              : "=a" (__ret), "+m" (*__xg(ptr))          \
99                              : "q" (__new), "0" (__old)                 \
100                              : "memory");                               \
101                 break;                                                  \
102         case 2:                                                         \
103                 asm volatile(lock "cmpxchgw %w2,%1"                     \
104                              : "=a" (__ret), "+m" (*__xg(ptr))          \
105                              : "r" (__new), "0" (__old)                 \
106                              : "memory");                               \
107                 break;                                                  \
108         case 4:                                                         \
109                 asm volatile(lock "cmpxchgl %2,%1"                      \
110                              : "=a" (__ret), "+m" (*__xg(ptr))          \
111                              : "r" (__new), "0" (__old)                 \
112                              : "memory");                               \
113                 break;                                                  \
114         default:                                                        \
115                 __cmpxchg_wrong_size();                                 \
116         }                                                               \
117         __ret;                                                          \
118 })
119
120 #define __cmpxchg(ptr, old, new, size)                                  \
121         __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
122
123 #define __sync_cmpxchg(ptr, old, new, size)                             \
124         __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
125
126 #define __cmpxchg_local(ptr, old, new, size)                            \
127         __raw_cmpxchg((ptr), (old), (new), (size), "")
128
129 #ifdef CONFIG_X86_CMPXCHG
130 #define __HAVE_ARCH_CMPXCHG 1
131
132 #define cmpxchg(ptr, old, new)                                          \
133         __cmpxchg((ptr), (old), (new), sizeof(*ptr))
134
135 #define sync_cmpxchg(ptr, old, new)                                     \
136         __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
137
138 #define cmpxchg_local(ptr, old, new)                                    \
139         __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
140 #endif
141
142 #ifdef CONFIG_X86_CMPXCHG64
143 #define cmpxchg64(ptr, o, n)                                            \
144         ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
145                                          (unsigned long long)(n)))
146 #define cmpxchg64_local(ptr, o, n)                                      \
147         ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
148                                                (unsigned long long)(n)))
149 #endif
150
151 static inline unsigned long long __cmpxchg64(volatile void *ptr,
152                                              unsigned long long old,
153                                              unsigned long long new)
154 {
155         unsigned long long prev;
156         asm volatile(LOCK_PREFIX "cmpxchg8b %1"
157                      : "=A" (prev),
158                        "+m" (*__xg(ptr))
159                      : "b" ((unsigned long)new),
160                        "c" ((unsigned long)(new >> 32)),
161                        "0" (old)
162                      : "memory");
163         return prev;
164 }
165
166 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
167                                                    unsigned long long old,
168                                                    unsigned long long new)
169 {
170         unsigned long long prev;
171         asm volatile("cmpxchg8b %1"
172                      : "=A" (prev),
173                        "+m" (*__xg(ptr))
174                      : "b" ((unsigned long)new),
175                        "c" ((unsigned long)(new >> 32)),
176                        "0" (old)
177                      : "memory");
178         return prev;
179 }
180
181 #ifndef CONFIG_X86_CMPXCHG
182 /*
183  * Building a kernel capable running on 80386. It may be necessary to
184  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
185  * a function for each of the sizes we support.
186  */
187
188 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
189 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
190 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
191
192 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
193                                         unsigned long new, int size)
194 {
195         switch (size) {
196         case 1:
197                 return cmpxchg_386_u8(ptr, old, new);
198         case 2:
199                 return cmpxchg_386_u16(ptr, old, new);
200         case 4:
201                 return cmpxchg_386_u32(ptr, old, new);
202         }
203         return old;
204 }
205
206 #define cmpxchg(ptr, o, n)                                              \
207 ({                                                                      \
208         __typeof__(*(ptr)) __ret;                                       \
209         if (likely(boot_cpu_data.x86 > 3))                              \
210                 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr),            \
211                                 (unsigned long)(o), (unsigned long)(n), \
212                                 sizeof(*(ptr)));                        \
213         else                                                            \
214                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
215                                 (unsigned long)(o), (unsigned long)(n), \
216                                 sizeof(*(ptr)));                        \
217         __ret;                                                          \
218 })
219 #define cmpxchg_local(ptr, o, n)                                        \
220 ({                                                                      \
221         __typeof__(*(ptr)) __ret;                                       \
222         if (likely(boot_cpu_data.x86 > 3))                              \
223                 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),      \
224                                 (unsigned long)(o), (unsigned long)(n), \
225                                 sizeof(*(ptr)));                        \
226         else                                                            \
227                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
228                                 (unsigned long)(o), (unsigned long)(n), \
229                                 sizeof(*(ptr)));                        \
230         __ret;                                                          \
231 })
232 #endif
233
234 #ifndef CONFIG_X86_CMPXCHG64
235 /*
236  * Building a kernel capable running on 80386 and 80486. It may be necessary
237  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
238  */
239
240 extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
241
242 #define cmpxchg64(ptr, o, n)                                    \
243 ({                                                              \
244         __typeof__(*(ptr)) __ret;                               \
245         __typeof__(*(ptr)) __old = (o);                         \
246         __typeof__(*(ptr)) __new = (n);                         \
247         alternative_io(LOCK_PREFIX_HERE                         \
248                         "call cmpxchg8b_emu",                   \
249                         "lock; cmpxchg8b (%%esi)" ,             \
250                        X86_FEATURE_CX8,                         \
251                        "=A" (__ret),                            \
252                        "S" ((ptr)), "0" (__old),                \
253                        "b" ((unsigned int)__new),               \
254                        "c" ((unsigned int)(__new>>32))          \
255                        : "memory");                             \
256         __ret; })
257
258
259
260 #define cmpxchg64_local(ptr, o, n)                                      \
261 ({                                                                      \
262         __typeof__(*(ptr)) __ret;                                       \
263         if (likely(boot_cpu_data.x86 > 4))                              \
264                 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr),    \
265                                 (unsigned long long)(o),                \
266                                 (unsigned long long)(n));               \
267         else                                                            \
268                 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),      \
269                                 (unsigned long long)(o),                \
270                                 (unsigned long long)(n));               \
271         __ret;                                                          \
272 })
273
274 #endif
275
276 #endif /* _ASM_X86_CMPXCHG_32_H */