]> bbs.cooldavid.org Git - net-next-2.6.git/blame - lib/rwsem.c
rwsem: fully separate code paths to wake writers vs readers
[net-next-2.6.git] / lib / rwsem.c
CommitLineData
1da177e4
LT
1/* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 */
6#include <linux/rwsem.h>
7#include <linux/sched.h>
8#include <linux/init.h>
9#include <linux/module.h>
10
4ea2176d
IM
11/*
12 * Initialize an rwsem:
13 */
14void __init_rwsem(struct rw_semaphore *sem, const char *name,
15 struct lock_class_key *key)
16{
17#ifdef CONFIG_DEBUG_LOCK_ALLOC
18 /*
19 * Make sure we are not reinitializing a held semaphore:
20 */
21 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
4dfbb9d8 22 lockdep_init_map(&sem->dep_map, name, key, 0);
4ea2176d
IM
23#endif
24 sem->count = RWSEM_UNLOCKED_VALUE;
25 spin_lock_init(&sem->wait_lock);
26 INIT_LIST_HEAD(&sem->wait_list);
27}
28
29EXPORT_SYMBOL(__init_rwsem);
30
1da177e4
LT
31struct rwsem_waiter {
32 struct list_head list;
33 struct task_struct *task;
34 unsigned int flags;
35#define RWSEM_WAITING_FOR_READ 0x00000001
36#define RWSEM_WAITING_FOR_WRITE 0x00000002
37};
38
1da177e4
LT
39/*
40 * handle the lock release when processes blocked on it that can now run
41 * - if we come here from up_xxxx(), then:
42 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
43 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
345af7bf 44 * - there must be someone on the queue
1da177e4
LT
45 * - the spinlock must be held by the caller
46 * - woken process blocks are discarded from the list after having task zeroed
47 * - writers are only woken if downgrading is false
48 */
49static inline struct rw_semaphore *
50__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
51{
52 struct rwsem_waiter *waiter;
53 struct task_struct *tsk;
54 struct list_head *next;
55 signed long oldcount, woken, loop;
56
345af7bf
ML
57 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
58 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
59 goto readers_only;
60
1da177e4 61 if (downgrading)
345af7bf 62 goto out;
1da177e4 63
345af7bf
ML
64 /* There's a writer at the front of the queue - try to grant it the
65 * write lock. However, we only wake this writer if we can transition
66 * the active part of the count from 0 -> 1
1da177e4 67 */
345af7bf 68 try_again_write:
1da177e4
LT
69 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
70 - RWSEM_ACTIVE_BIAS;
71 if (oldcount & RWSEM_ACTIVE_MASK)
345af7bf
ML
72 /* Someone grabbed the sem already */
73 goto undo_write;
1da177e4
LT
74
75 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
76 * It is an allocated on the waiter's stack and may become invalid at
77 * any time after that point (due to a wakeup from another source).
78 */
79 list_del(&waiter->list);
80 tsk = waiter->task;
d59dd462 81 smp_mb();
1da177e4
LT
82 waiter->task = NULL;
83 wake_up_process(tsk);
84 put_task_struct(tsk);
85 goto out;
86
345af7bf
ML
87 readers_only:
88 if (downgrading)
89 goto wake_readers;
90
91 /* if we came through an up_xxxx() call, we only only wake someone up
92 * if we can transition the active part of the count from 0 -> 1 */
93 try_again_read:
94 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
95 - RWSEM_ACTIVE_BIAS;
96 if (oldcount & RWSEM_ACTIVE_MASK)
97 /* Someone grabbed the sem already */
98 goto undo_read;
1da177e4 99
345af7bf
ML
100 wake_readers:
101 /* Grant an infinite number of read locks to the readers at the front
102 * of the queue. Note we increment the 'active part' of the count by
103 * the number of readers before waking any processes up.
1da177e4 104 */
1da177e4
LT
105 woken = 0;
106 do {
107 woken++;
108
109 if (waiter->list.next == &sem->wait_list)
110 break;
111
112 waiter = list_entry(waiter->list.next,
113 struct rwsem_waiter, list);
114
115 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
116
117 loop = woken;
118 woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
119 if (!downgrading)
120 /* we'd already done one increment earlier */
121 woken -= RWSEM_ACTIVE_BIAS;
122
123 rwsem_atomic_add(woken, sem);
124
125 next = sem->wait_list.next;
126 for (; loop > 0; loop--) {
127 waiter = list_entry(next, struct rwsem_waiter, list);
128 next = waiter->list.next;
129 tsk = waiter->task;
d59dd462 130 smp_mb();
1da177e4
LT
131 waiter->task = NULL;
132 wake_up_process(tsk);
133 put_task_struct(tsk);
134 }
135
136 sem->wait_list.next = next;
137 next->prev = &sem->wait_list;
138
139 out:
1da177e4
LT
140 return sem;
141
91af7081
ML
142 /* undo the change to the active count, but check for a transition
143 * 1->0 */
345af7bf
ML
144 undo_write:
145 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
146 goto out;
147 goto try_again_write;
148 undo_read:
91af7081 149 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
1da177e4 150 goto out;
345af7bf 151 goto try_again_read;
1da177e4
LT
152}
153
154/*
155 * wait for a lock to be granted
156 */
c7af77b5 157static struct rw_semaphore __sched *
1da177e4
LT
158rwsem_down_failed_common(struct rw_semaphore *sem,
159 struct rwsem_waiter *waiter, signed long adjustment)
160{
161 struct task_struct *tsk = current;
162 signed long count;
163
164 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
165
166 /* set up my own style of waitqueue */
167 spin_lock_irq(&sem->wait_lock);
168 waiter->task = tsk;
169 get_task_struct(tsk);
170
171 list_add_tail(&waiter->list, &sem->wait_list);
172
173 /* we're now waiting on the lock, but no longer actively read-locking */
174 count = rwsem_atomic_update(adjustment, sem);
175
176 /* if there are no active locks, wake the front queued process(es) up */
177 if (!(count & RWSEM_ACTIVE_MASK))
178 sem = __rwsem_do_wake(sem, 0);
179
180 spin_unlock_irq(&sem->wait_lock);
181
182 /* wait to be given the lock */
183 for (;;) {
184 if (!waiter->task)
185 break;
186 schedule();
187 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
188 }
189
190 tsk->state = TASK_RUNNING;
191
192 return sem;
193}
194
195/*
196 * wait for the read lock to be granted
197 */
d50efc6c 198asmregparm struct rw_semaphore __sched *
1da177e4
LT
199rwsem_down_read_failed(struct rw_semaphore *sem)
200{
201 struct rwsem_waiter waiter;
202
1da177e4
LT
203 waiter.flags = RWSEM_WAITING_FOR_READ;
204 rwsem_down_failed_common(sem, &waiter,
205 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
1da177e4
LT
206 return sem;
207}
208
209/*
210 * wait for the write lock to be granted
211 */
d50efc6c 212asmregparm struct rw_semaphore __sched *
1da177e4
LT
213rwsem_down_write_failed(struct rw_semaphore *sem)
214{
215 struct rwsem_waiter waiter;
216
1da177e4
LT
217 waiter.flags = RWSEM_WAITING_FOR_WRITE;
218 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
219
1da177e4
LT
220 return sem;
221}
222
223/*
224 * handle waking up a waiter on the semaphore
225 * - up_read/up_write has decremented the active part of count if we come here
226 */
d50efc6c 227asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
1da177e4
LT
228{
229 unsigned long flags;
230
1da177e4
LT
231 spin_lock_irqsave(&sem->wait_lock, flags);
232
233 /* do nothing if list empty */
234 if (!list_empty(&sem->wait_list))
235 sem = __rwsem_do_wake(sem, 0);
236
237 spin_unlock_irqrestore(&sem->wait_lock, flags);
238
1da177e4
LT
239 return sem;
240}
241
242/*
243 * downgrade a write lock into a read lock
244 * - caller incremented waiting part of count and discovered it still negative
245 * - just wake up any readers at the front of the queue
246 */
d50efc6c 247asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
1da177e4
LT
248{
249 unsigned long flags;
250
1da177e4
LT
251 spin_lock_irqsave(&sem->wait_lock, flags);
252
253 /* do nothing if list empty */
254 if (!list_empty(&sem->wait_list))
255 sem = __rwsem_do_wake(sem, 1);
256
257 spin_unlock_irqrestore(&sem->wait_lock, flags);
258
1da177e4
LT
259 return sem;
260}
261
262EXPORT_SYMBOL(rwsem_down_read_failed);
263EXPORT_SYMBOL(rwsem_down_write_failed);
264EXPORT_SYMBOL(rwsem_wake);
265EXPORT_SYMBOL(rwsem_downgrade_wake);