]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/mutex.c
[PATCH] lockdep: better lock debugging
[net-next-2.6.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * Also see Documentation/mutex-design.txt.
14 */
15#include <linux/mutex.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
9a11b49a 20#include <linux/debug_locks.h>
6053ee3b
IM
21
22/*
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
25 */
26#ifdef CONFIG_DEBUG_MUTEXES
27# include "mutex-debug.h"
28# include <asm-generic/mutex-null.h>
29#else
30# include "mutex.h"
31# include <asm/mutex.h>
32#endif
33
34/***
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
37 *
38 * Initialize the mutex to unlocked state.
39 *
40 * It is not allowed to initialize an already locked mutex.
41 */
9a11b49a 42__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name)
6053ee3b
IM
43{
44 atomic_set(&lock->count, 1);
45 spin_lock_init(&lock->wait_lock);
46 INIT_LIST_HEAD(&lock->wait_list);
47
48 debug_mutex_init(lock, name);
49}
50
51EXPORT_SYMBOL(__mutex_init);
52
53/*
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
58 */
59static void fastcall noinline __sched
9a11b49a 60__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b
IM
61
62/***
63 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
65 *
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
68 *
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
76 *
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
80 *
81 * This function is similar to (but not equivalent to) down().
82 */
9a11b49a 83void inline fastcall __sched mutex_lock(struct mutex *lock)
6053ee3b 84{
c544bdb1 85 might_sleep();
6053ee3b
IM
86 /*
87 * The locking fastpath is the 1->0 transition from
88 * 'unlocked' into 'locked' state.
6053ee3b
IM
89 */
90 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
91}
92
93EXPORT_SYMBOL(mutex_lock);
94
95static void fastcall noinline __sched
9a11b49a 96__mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b
IM
97
98/***
99 * mutex_unlock - release the mutex
100 * @lock: the mutex to be released
101 *
102 * Unlock a mutex that has been locked by this task previously.
103 *
104 * This function must not be used in interrupt context. Unlocking
105 * of a not locked mutex is not allowed.
106 *
107 * This function is similar to (but not equivalent to) up().
108 */
109void fastcall __sched mutex_unlock(struct mutex *lock)
110{
111 /*
112 * The unlocking fastpath is the 0->1 transition from 'locked'
113 * into 'unlocked' state:
6053ee3b
IM
114 */
115 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
116}
117
118EXPORT_SYMBOL(mutex_unlock);
119
120/*
121 * Lock a mutex (possibly interruptible), slowpath:
122 */
123static inline int __sched
9a11b49a 124__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
6053ee3b
IM
125{
126 struct task_struct *task = current;
127 struct mutex_waiter waiter;
128 unsigned int old_val;
1fb00c6c 129 unsigned long flags;
6053ee3b 130
1fb00c6c 131 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 132
9a11b49a
IM
133 debug_mutex_lock_common(lock, &waiter);
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info);
6053ee3b
IM
135
136 /* add waiting tasks to the end of the waitqueue (FIFO): */
137 list_add_tail(&waiter.list, &lock->wait_list);
138 waiter.task = task;
139
140 for (;;) {
141 /*
142 * Lets try to take the lock again - this is needed even if
143 * we get here for the first time (shortly after failing to
144 * acquire the lock), to make sure that we get a wakeup once
145 * it's unlocked. Later on, if we sleep, this is the
146 * operation that gives us the lock. We xchg it to -1, so
147 * that when we release the lock, we properly wake up the
148 * other waiters:
149 */
150 old_val = atomic_xchg(&lock->count, -1);
151 if (old_val == 1)
152 break;
153
154 /*
155 * got a signal? (This code gets eliminated in the
156 * TASK_UNINTERRUPTIBLE case.)
157 */
158 if (unlikely(state == TASK_INTERRUPTIBLE &&
159 signal_pending(task))) {
160 mutex_remove_waiter(lock, &waiter, task->thread_info);
1fb00c6c 161 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
162
163 debug_mutex_free_waiter(&waiter);
164 return -EINTR;
165 }
166 __set_task_state(task, state);
167
168 /* didnt get the lock, go to sleep: */
1fb00c6c 169 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b 170 schedule();
1fb00c6c 171 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
172 }
173
174 /* got the lock - rejoice! */
175 mutex_remove_waiter(lock, &waiter, task->thread_info);
9a11b49a 176 debug_mutex_set_owner(lock, task->thread_info);
6053ee3b
IM
177
178 /* set it to 0 if there are no waiters left: */
179 if (likely(list_empty(&lock->wait_list)))
180 atomic_set(&lock->count, 0);
181
1fb00c6c 182 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
183
184 debug_mutex_free_waiter(&waiter);
185
6053ee3b
IM
186 return 0;
187}
188
189static void fastcall noinline __sched
9a11b49a 190__mutex_lock_slowpath(atomic_t *lock_count)
6053ee3b
IM
191{
192 struct mutex *lock = container_of(lock_count, struct mutex, count);
193
9a11b49a 194 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
6053ee3b
IM
195}
196
197/*
198 * Release the lock, slowpath:
199 */
9a11b49a
IM
200static fastcall inline void
201__mutex_unlock_common_slowpath(atomic_t *lock_count)
6053ee3b 202{
02706647 203 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 204 unsigned long flags;
6053ee3b 205
1fb00c6c 206 spin_lock_mutex(&lock->wait_lock, flags);
9a11b49a 207 debug_mutex_unlock(lock);
6053ee3b
IM
208
209 /*
210 * some architectures leave the lock unlocked in the fastpath failure
211 * case, others need to leave it locked. In the later case we have to
212 * unlock it here
213 */
214 if (__mutex_slowpath_needs_to_unlock())
215 atomic_set(&lock->count, 1);
216
6053ee3b
IM
217 if (!list_empty(&lock->wait_list)) {
218 /* get the first entry from the wait-list: */
219 struct mutex_waiter *waiter =
220 list_entry(lock->wait_list.next,
221 struct mutex_waiter, list);
222
223 debug_mutex_wake_waiter(lock, waiter);
224
225 wake_up_process(waiter->task);
226 }
227
228 debug_mutex_clear_owner(lock);
229
1fb00c6c 230 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
231}
232
9a11b49a
IM
233/*
234 * Release the lock, slowpath:
235 */
236static fastcall noinline void
237__mutex_unlock_slowpath(atomic_t *lock_count)
238{
239 __mutex_unlock_common_slowpath(lock_count);
240}
241
6053ee3b
IM
242/*
243 * Here come the less common (and hence less performance-critical) APIs:
244 * mutex_lock_interruptible() and mutex_trylock().
245 */
246static int fastcall noinline __sched
9a11b49a 247__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b
IM
248
249/***
250 * mutex_lock_interruptible - acquire the mutex, interruptable
251 * @lock: the mutex to be acquired
252 *
253 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
254 * been acquired or sleep until the mutex becomes available. If a
255 * signal arrives while waiting for the lock then this function
256 * returns -EINTR.
257 *
258 * This function is similar to (but not equivalent to) down_interruptible().
259 */
260int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
261{
c544bdb1 262 might_sleep();
6053ee3b
IM
263 return __mutex_fastpath_lock_retval
264 (&lock->count, __mutex_lock_interruptible_slowpath);
265}
266
267EXPORT_SYMBOL(mutex_lock_interruptible);
268
269static int fastcall noinline __sched
9a11b49a 270__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
271{
272 struct mutex *lock = container_of(lock_count, struct mutex, count);
273
9a11b49a 274 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
6053ee3b
IM
275}
276
277/*
278 * Spinlock based trylock, we take the spinlock and check whether we
279 * can get the lock:
280 */
281static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
282{
283 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 284 unsigned long flags;
6053ee3b
IM
285 int prev;
286
1fb00c6c 287 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
288
289 prev = atomic_xchg(&lock->count, -1);
290 if (likely(prev == 1))
9a11b49a
IM
291 debug_mutex_set_owner(lock, current_thread_info());
292
6053ee3b
IM
293 /* Set it back to 0 if there are no waiters: */
294 if (likely(list_empty(&lock->wait_list)))
295 atomic_set(&lock->count, 0);
296
1fb00c6c 297 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
298
299 return prev == 1;
300}
301
302/***
303 * mutex_trylock - try acquire the mutex, without waiting
304 * @lock: the mutex to be acquired
305 *
306 * Try to acquire the mutex atomically. Returns 1 if the mutex
307 * has been acquired successfully, and 0 on contention.
308 *
309 * NOTE: this function follows the spin_trylock() convention, so
310 * it is negated to the down_trylock() return values! Be careful
311 * about this when converting semaphore users to mutexes.
312 *
313 * This function must not be used in interrupt context. The
314 * mutex must be released by the same task that acquired it.
315 */
316int fastcall mutex_trylock(struct mutex *lock)
317{
318 return __mutex_fastpath_trylock(&lock->count,
319 __mutex_trylock_slowpath);
320}
321
322EXPORT_SYMBOL(mutex_trylock);