]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/mutex.c
[MIPS] i8259: Add disable method.
[net-next-2.6.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * Also see Documentation/mutex-design.txt.
14 */
15#include <linux/mutex.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
9a11b49a 20#include <linux/debug_locks.h>
6053ee3b
IM
21
22/*
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
25 */
26#ifdef CONFIG_DEBUG_MUTEXES
27# include "mutex-debug.h"
28# include <asm-generic/mutex-null.h>
29#else
30# include "mutex.h"
31# include <asm/mutex.h>
32#endif
33
34/***
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
37 *
38 * Initialize the mutex to unlocked state.
39 *
40 * It is not allowed to initialize an already locked mutex.
41 */
ef5d4707
IM
42void
43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
44{
45 atomic_set(&lock->count, 1);
46 spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
48
ef5d4707 49 debug_mutex_init(lock, name, key);
6053ee3b
IM
50}
51
52EXPORT_SYMBOL(__mutex_init);
53
54/*
55 * We split the mutex lock/unlock logic into separate fastpath and
56 * slowpath functions, to reduce the register pressure on the fastpath.
57 * We also put the fastpath first in the kernel image, to make sure the
58 * branch is predicted by the CPU as default-untaken.
59 */
60static void fastcall noinline __sched
9a11b49a 61__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b
IM
62
63/***
64 * mutex_lock - acquire the mutex
65 * @lock: the mutex to be acquired
66 *
67 * Lock the mutex exclusively for this task. If the mutex is not
68 * available right now, it will sleep until it can get it.
69 *
70 * The mutex must later on be released by the same task that
71 * acquired it. Recursive locking is not allowed. The task
72 * may not exit without first unlocking the mutex. Also, kernel
73 * memory where the mutex resides mutex must not be freed with
74 * the mutex still locked. The mutex must first be initialized
75 * (or statically defined) before it can be locked. memset()-ing
76 * the mutex to 0 is not allowed.
77 *
78 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
79 * checks that will enforce the restrictions and will also do
80 * deadlock debugging. )
81 *
82 * This function is similar to (but not equivalent to) down().
83 */
9a11b49a 84void inline fastcall __sched mutex_lock(struct mutex *lock)
6053ee3b 85{
c544bdb1 86 might_sleep();
6053ee3b
IM
87 /*
88 * The locking fastpath is the 1->0 transition from
89 * 'unlocked' into 'locked' state.
6053ee3b
IM
90 */
91 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
92}
93
94EXPORT_SYMBOL(mutex_lock);
95
96static void fastcall noinline __sched
9a11b49a 97__mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b
IM
98
99/***
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
102 *
103 * Unlock a mutex that has been locked by this task previously.
104 *
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
107 *
108 * This function is similar to (but not equivalent to) up().
109 */
110void fastcall __sched mutex_unlock(struct mutex *lock)
111{
112 /*
113 * The unlocking fastpath is the 0->1 transition from 'locked'
114 * into 'unlocked' state:
6053ee3b
IM
115 */
116 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
117}
118
119EXPORT_SYMBOL(mutex_unlock);
120
121/*
122 * Lock a mutex (possibly interruptible), slowpath:
123 */
124static inline int __sched
9a11b49a 125__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
6053ee3b
IM
126{
127 struct task_struct *task = current;
128 struct mutex_waiter waiter;
129 unsigned int old_val;
1fb00c6c 130 unsigned long flags;
6053ee3b 131
1fb00c6c 132 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 133
9a11b49a 134 debug_mutex_lock_common(lock, &waiter);
ef5d4707 135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
c9f4f06d 136 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
137
138 /* add waiting tasks to the end of the waitqueue (FIFO): */
139 list_add_tail(&waiter.list, &lock->wait_list);
140 waiter.task = task;
141
4fe87745
PZ
142 old_val = atomic_xchg(&lock->count, -1);
143 if (old_val == 1)
144 goto done;
145
146 lock_contended(&lock->dep_map, _RET_IP_);
147
6053ee3b
IM
148 for (;;) {
149 /*
150 * Lets try to take the lock again - this is needed even if
151 * we get here for the first time (shortly after failing to
152 * acquire the lock), to make sure that we get a wakeup once
153 * it's unlocked. Later on, if we sleep, this is the
154 * operation that gives us the lock. We xchg it to -1, so
155 * that when we release the lock, we properly wake up the
156 * other waiters:
157 */
158 old_val = atomic_xchg(&lock->count, -1);
159 if (old_val == 1)
160 break;
161
162 /*
163 * got a signal? (This code gets eliminated in the
164 * TASK_UNINTERRUPTIBLE case.)
165 */
166 if (unlikely(state == TASK_INTERRUPTIBLE &&
167 signal_pending(task))) {
c9f4f06d 168 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
ef5d4707 169 mutex_release(&lock->dep_map, 1, _RET_IP_);
1fb00c6c 170 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
171
172 debug_mutex_free_waiter(&waiter);
173 return -EINTR;
174 }
175 __set_task_state(task, state);
176
177 /* didnt get the lock, go to sleep: */
1fb00c6c 178 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b 179 schedule();
1fb00c6c 180 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
181 }
182
4fe87745 183done:
96645678 184 lock_acquired(&lock->dep_map);
6053ee3b 185 /* got the lock - rejoice! */
c9f4f06d
RZ
186 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
187 debug_mutex_set_owner(lock, task_thread_info(task));
6053ee3b
IM
188
189 /* set it to 0 if there are no waiters left: */
190 if (likely(list_empty(&lock->wait_list)))
191 atomic_set(&lock->count, 0);
192
1fb00c6c 193 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
194
195 debug_mutex_free_waiter(&waiter);
196
6053ee3b
IM
197 return 0;
198}
199
200static void fastcall noinline __sched
9a11b49a 201__mutex_lock_slowpath(atomic_t *lock_count)
6053ee3b
IM
202{
203 struct mutex *lock = container_of(lock_count, struct mutex, count);
204
9a11b49a 205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
6053ee3b
IM
206}
207
ef5d4707
IM
208#ifdef CONFIG_DEBUG_LOCK_ALLOC
209void __sched
210mutex_lock_nested(struct mutex *lock, unsigned int subclass)
211{
212 might_sleep();
213 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
214}
215
216EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74
N
217
218int __sched
219mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
220{
221 might_sleep();
222 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
223}
224
225EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707
IM
226#endif
227
6053ee3b
IM
228/*
229 * Release the lock, slowpath:
230 */
9a11b49a 231static fastcall inline void
ef5d4707 232__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 233{
02706647 234 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 235 unsigned long flags;
6053ee3b 236
1fb00c6c 237 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 238 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 239 debug_mutex_unlock(lock);
6053ee3b
IM
240
241 /*
242 * some architectures leave the lock unlocked in the fastpath failure
243 * case, others need to leave it locked. In the later case we have to
244 * unlock it here
245 */
246 if (__mutex_slowpath_needs_to_unlock())
247 atomic_set(&lock->count, 1);
248
6053ee3b
IM
249 if (!list_empty(&lock->wait_list)) {
250 /* get the first entry from the wait-list: */
251 struct mutex_waiter *waiter =
252 list_entry(lock->wait_list.next,
253 struct mutex_waiter, list);
254
255 debug_mutex_wake_waiter(lock, waiter);
256
257 wake_up_process(waiter->task);
258 }
259
260 debug_mutex_clear_owner(lock);
261
1fb00c6c 262 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
263}
264
9a11b49a
IM
265/*
266 * Release the lock, slowpath:
267 */
268static fastcall noinline void
269__mutex_unlock_slowpath(atomic_t *lock_count)
270{
ef5d4707 271 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
272}
273
6053ee3b
IM
274/*
275 * Here come the less common (and hence less performance-critical) APIs:
276 * mutex_lock_interruptible() and mutex_trylock().
277 */
278static int fastcall noinline __sched
9a11b49a 279__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b
IM
280
281/***
282 * mutex_lock_interruptible - acquire the mutex, interruptable
283 * @lock: the mutex to be acquired
284 *
285 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
286 * been acquired or sleep until the mutex becomes available. If a
287 * signal arrives while waiting for the lock then this function
288 * returns -EINTR.
289 *
290 * This function is similar to (but not equivalent to) down_interruptible().
291 */
292int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
293{
c544bdb1 294 might_sleep();
6053ee3b
IM
295 return __mutex_fastpath_lock_retval
296 (&lock->count, __mutex_lock_interruptible_slowpath);
297}
298
299EXPORT_SYMBOL(mutex_lock_interruptible);
300
301static int fastcall noinline __sched
9a11b49a 302__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
303{
304 struct mutex *lock = container_of(lock_count, struct mutex, count);
305
9a11b49a 306 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
6053ee3b
IM
307}
308
309/*
310 * Spinlock based trylock, we take the spinlock and check whether we
311 * can get the lock:
312 */
313static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
314{
315 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 316 unsigned long flags;
6053ee3b
IM
317 int prev;
318
1fb00c6c 319 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
320
321 prev = atomic_xchg(&lock->count, -1);
ef5d4707 322 if (likely(prev == 1)) {
9a11b49a 323 debug_mutex_set_owner(lock, current_thread_info());
ef5d4707
IM
324 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
325 }
6053ee3b
IM
326 /* Set it back to 0 if there are no waiters: */
327 if (likely(list_empty(&lock->wait_list)))
328 atomic_set(&lock->count, 0);
329
1fb00c6c 330 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
331
332 return prev == 1;
333}
334
335/***
336 * mutex_trylock - try acquire the mutex, without waiting
337 * @lock: the mutex to be acquired
338 *
339 * Try to acquire the mutex atomically. Returns 1 if the mutex
340 * has been acquired successfully, and 0 on contention.
341 *
342 * NOTE: this function follows the spin_trylock() convention, so
343 * it is negated to the down_trylock() return values! Be careful
344 * about this when converting semaphore users to mutexes.
345 *
346 * This function must not be used in interrupt context. The
347 * mutex must be released by the same task that acquired it.
348 */
9cebb552 349int fastcall __sched mutex_trylock(struct mutex *lock)
6053ee3b
IM
350{
351 return __mutex_fastpath_trylock(&lock->count,
352 __mutex_trylock_slowpath);
353}
354
355EXPORT_SYMBOL(mutex_trylock);