]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ARCH_H8300_ATOMIC__ |
2 | #define __ARCH_H8300_ATOMIC__ | |
3 | ||
ea435467 MW |
4 | #include <linux/types.h> |
5 | ||
1da177e4 LT |
6 | /* |
7 | * Atomic operations that C can't guarantee us. Useful for | |
8 | * resource counting etc.. | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #define ATOMIC_INIT(i) { (i) } |
12 | ||
f3d46f9d | 13 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
1da177e4 LT |
14 | #define atomic_set(v, i) (((v)->counter) = i) |
15 | ||
16 | #include <asm/system.h> | |
17 | #include <linux/kernel.h> | |
18 | ||
19 | static __inline__ int atomic_add_return(int i, atomic_t *v) | |
20 | { | |
3ab61eb9 DH |
21 | unsigned long flags; |
22 | int ret; | |
1da177e4 LT |
23 | local_irq_save(flags); |
24 | ret = v->counter += i; | |
25 | local_irq_restore(flags); | |
26 | return ret; | |
27 | } | |
28 | ||
29 | #define atomic_add(i, v) atomic_add_return(i, v) | |
30 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
31 | ||
32 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | |
33 | { | |
3ab61eb9 DH |
34 | unsigned long flags; |
35 | int ret; | |
1da177e4 LT |
36 | local_irq_save(flags); |
37 | ret = v->counter -= i; | |
38 | local_irq_restore(flags); | |
39 | return ret; | |
40 | } | |
41 | ||
42 | #define atomic_sub(i, v) atomic_sub_return(i, v) | |
b67405bb | 43 | #define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0) |
1da177e4 LT |
44 | |
45 | static __inline__ int atomic_inc_return(atomic_t *v) | |
46 | { | |
3ab61eb9 DH |
47 | unsigned long flags; |
48 | int ret; | |
1da177e4 LT |
49 | local_irq_save(flags); |
50 | v->counter++; | |
51 | ret = v->counter; | |
52 | local_irq_restore(flags); | |
53 | return ret; | |
54 | } | |
55 | ||
56 | #define atomic_inc(v) atomic_inc_return(v) | |
57 | ||
58 | /* | |
59 | * atomic_inc_and_test - increment and test | |
60 | * @v: pointer of type atomic_t | |
61 | * | |
62 | * Atomically increments @v by 1 | |
63 | * and returns true if the result is zero, or false for all | |
64 | * other cases. | |
65 | */ | |
66 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
67 | ||
68 | static __inline__ int atomic_dec_return(atomic_t *v) | |
69 | { | |
3ab61eb9 DH |
70 | unsigned long flags; |
71 | int ret; | |
1da177e4 LT |
72 | local_irq_save(flags); |
73 | --v->counter; | |
74 | ret = v->counter; | |
75 | local_irq_restore(flags); | |
76 | return ret; | |
77 | } | |
78 | ||
79 | #define atomic_dec(v) atomic_dec_return(v) | |
80 | ||
81 | static __inline__ int atomic_dec_and_test(atomic_t *v) | |
82 | { | |
3ab61eb9 DH |
83 | unsigned long flags; |
84 | int ret; | |
1da177e4 LT |
85 | local_irq_save(flags); |
86 | --v->counter; | |
87 | ret = v->counter; | |
88 | local_irq_restore(flags); | |
89 | return ret == 0; | |
90 | } | |
91 | ||
4a6dae6d NP |
92 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
93 | { | |
94 | int ret; | |
95 | unsigned long flags; | |
96 | ||
97 | local_irq_save(flags); | |
98 | ret = v->counter; | |
99 | if (likely(ret == old)) | |
100 | v->counter = new; | |
101 | local_irq_restore(flags); | |
102 | return ret; | |
103 | } | |
104 | ||
ffbf670f IM |
105 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
106 | ||
8426e1f6 NP |
107 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
108 | { | |
109 | int ret; | |
110 | unsigned long flags; | |
111 | ||
112 | local_irq_save(flags); | |
113 | ret = v->counter; | |
114 | if (ret != u) | |
115 | v->counter += a; | |
116 | local_irq_restore(flags); | |
117 | return ret != u; | |
118 | } | |
119 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
120 | ||
1da177e4 LT |
121 | static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) |
122 | { | |
123 | __asm__ __volatile__("stc ccr,r1l\n\t" | |
124 | "orc #0x80,ccr\n\t" | |
125 | "mov.l %0,er0\n\t" | |
126 | "and.l %1,er0\n\t" | |
127 | "mov.l er0,%0\n\t" | |
128 | "ldc r1l,ccr" | |
129 | : "=m" (*v) : "g" (~(mask)) :"er0","er1"); | |
130 | } | |
131 | ||
132 | static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) | |
133 | { | |
134 | __asm__ __volatile__("stc ccr,r1l\n\t" | |
135 | "orc #0x80,ccr\n\t" | |
136 | "mov.l %0,er0\n\t" | |
137 | "or.l %1,er0\n\t" | |
138 | "mov.l er0,%0\n\t" | |
139 | "ldc r1l,ccr" | |
140 | : "=m" (*v) : "g" (mask) :"er0","er1"); | |
141 | } | |
142 | ||
143 | /* Atomic operations are already serializing */ | |
144 | #define smp_mb__before_atomic_dec() barrier() | |
145 | #define smp_mb__after_atomic_dec() barrier() | |
146 | #define smp_mb__before_atomic_inc() barrier() | |
147 | #define smp_mb__after_atomic_inc() barrier() | |
148 | ||
72099ed2 | 149 | #include <asm-generic/atomic-long.h> |
1da177e4 | 150 | #endif /* __ARCH_H8300_ATOMIC __ */ |