]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/mutex.c
[SCSI] qla2xxx: Fix memory leak in error path
[net-next-2.6.git] / kernel / mutex.c
CommitLineData
6053ee3b
IM
1/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * Also see Documentation/mutex-design.txt.
14 */
15#include <linux/mutex.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
9a11b49a 20#include <linux/debug_locks.h>
6053ee3b
IM
21
22/*
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
25 */
26#ifdef CONFIG_DEBUG_MUTEXES
27# include "mutex-debug.h"
28# include <asm-generic/mutex-null.h>
29#else
30# include "mutex.h"
31# include <asm/mutex.h>
32#endif
33
34/***
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
0e241ffd 37 * @key: the lock_class_key for the class; used by mutex lock debugging
6053ee3b
IM
38 *
39 * Initialize the mutex to unlocked state.
40 *
41 * It is not allowed to initialize an already locked mutex.
42 */
ef5d4707
IM
43void
44__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6053ee3b
IM
45{
46 atomic_set(&lock->count, 1);
47 spin_lock_init(&lock->wait_lock);
48 INIT_LIST_HEAD(&lock->wait_list);
49
ef5d4707 50 debug_mutex_init(lock, name, key);
6053ee3b
IM
51}
52
53EXPORT_SYMBOL(__mutex_init);
54
e4564f79 55#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
56/*
57 * We split the mutex lock/unlock logic into separate fastpath and
58 * slowpath functions, to reduce the register pressure on the fastpath.
59 * We also put the fastpath first in the kernel image, to make sure the
60 * branch is predicted by the CPU as default-untaken.
61 */
7918baa5 62static __used noinline void __sched
9a11b49a 63__mutex_lock_slowpath(atomic_t *lock_count);
6053ee3b
IM
64
65/***
66 * mutex_lock - acquire the mutex
67 * @lock: the mutex to be acquired
68 *
69 * Lock the mutex exclusively for this task. If the mutex is not
70 * available right now, it will sleep until it can get it.
71 *
72 * The mutex must later on be released by the same task that
73 * acquired it. Recursive locking is not allowed. The task
74 * may not exit without first unlocking the mutex. Also, kernel
75 * memory where the mutex resides mutex must not be freed with
76 * the mutex still locked. The mutex must first be initialized
77 * (or statically defined) before it can be locked. memset()-ing
78 * the mutex to 0 is not allowed.
79 *
80 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
81 * checks that will enforce the restrictions and will also do
82 * deadlock debugging. )
83 *
84 * This function is similar to (but not equivalent to) down().
85 */
7ad5b3a5 86void inline __sched mutex_lock(struct mutex *lock)
6053ee3b 87{
c544bdb1 88 might_sleep();
6053ee3b
IM
89 /*
90 * The locking fastpath is the 1->0 transition from
91 * 'unlocked' into 'locked' state.
6053ee3b
IM
92 */
93 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
94}
95
96EXPORT_SYMBOL(mutex_lock);
e4564f79 97#endif
6053ee3b 98
7918baa5 99static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
6053ee3b
IM
100
101/***
102 * mutex_unlock - release the mutex
103 * @lock: the mutex to be released
104 *
105 * Unlock a mutex that has been locked by this task previously.
106 *
107 * This function must not be used in interrupt context. Unlocking
108 * of a not locked mutex is not allowed.
109 *
110 * This function is similar to (but not equivalent to) up().
111 */
7ad5b3a5 112void __sched mutex_unlock(struct mutex *lock)
6053ee3b
IM
113{
114 /*
115 * The unlocking fastpath is the 0->1 transition from 'locked'
116 * into 'unlocked' state:
6053ee3b
IM
117 */
118 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119}
120
121EXPORT_SYMBOL(mutex_unlock);
122
123/*
124 * Lock a mutex (possibly interruptible), slowpath:
125 */
126static inline int __sched
e4564f79
PZ
127__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
128 unsigned long ip)
6053ee3b
IM
129{
130 struct task_struct *task = current;
131 struct mutex_waiter waiter;
132 unsigned int old_val;
1fb00c6c 133 unsigned long flags;
6053ee3b 134
1fb00c6c 135 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b 136
9a11b49a 137 debug_mutex_lock_common(lock, &waiter);
e4564f79 138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
c9f4f06d 139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
6053ee3b
IM
140
141 /* add waiting tasks to the end of the waitqueue (FIFO): */
142 list_add_tail(&waiter.list, &lock->wait_list);
143 waiter.task = task;
144
4fe87745
PZ
145 old_val = atomic_xchg(&lock->count, -1);
146 if (old_val == 1)
147 goto done;
148
e4564f79 149 lock_contended(&lock->dep_map, ip);
4fe87745 150
6053ee3b
IM
151 for (;;) {
152 /*
153 * Lets try to take the lock again - this is needed even if
154 * we get here for the first time (shortly after failing to
155 * acquire the lock), to make sure that we get a wakeup once
156 * it's unlocked. Later on, if we sleep, this is the
157 * operation that gives us the lock. We xchg it to -1, so
158 * that when we release the lock, we properly wake up the
159 * other waiters:
160 */
161 old_val = atomic_xchg(&lock->count, -1);
162 if (old_val == 1)
163 break;
164
165 /*
166 * got a signal? (This code gets eliminated in the
167 * TASK_UNINTERRUPTIBLE case.)
168 */
6ad36762 169 if (unlikely(signal_pending_state(state, task))) {
ad776537
LH
170 mutex_remove_waiter(lock, &waiter,
171 task_thread_info(task));
e4564f79 172 mutex_release(&lock->dep_map, 1, ip);
1fb00c6c 173 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
174
175 debug_mutex_free_waiter(&waiter);
176 return -EINTR;
177 }
178 __set_task_state(task, state);
179
180 /* didnt get the lock, go to sleep: */
1fb00c6c 181 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b 182 schedule();
1fb00c6c 183 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
184 }
185
4fe87745 186done:
c7e78cff 187 lock_acquired(&lock->dep_map, ip);
6053ee3b 188 /* got the lock - rejoice! */
c9f4f06d
RZ
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190 debug_mutex_set_owner(lock, task_thread_info(task));
6053ee3b
IM
191
192 /* set it to 0 if there are no waiters left: */
193 if (likely(list_empty(&lock->wait_list)))
194 atomic_set(&lock->count, 0);
195
1fb00c6c 196 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
197
198 debug_mutex_free_waiter(&waiter);
199
6053ee3b
IM
200 return 0;
201}
202
ef5d4707
IM
203#ifdef CONFIG_DEBUG_LOCK_ALLOC
204void __sched
205mutex_lock_nested(struct mutex *lock, unsigned int subclass)
206{
207 might_sleep();
e4564f79 208 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
ef5d4707
IM
209}
210
211EXPORT_SYMBOL_GPL(mutex_lock_nested);
d63a5a74 212
ad776537
LH
213int __sched
214mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
215{
216 might_sleep();
217 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
218}
219EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
220
d63a5a74
N
221int __sched
222mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
223{
224 might_sleep();
e4564f79 225 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
d63a5a74
N
226}
227
228EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
ef5d4707
IM
229#endif
230
6053ee3b
IM
231/*
232 * Release the lock, slowpath:
233 */
7ad5b3a5 234static inline void
ef5d4707 235__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
6053ee3b 236{
02706647 237 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 238 unsigned long flags;
6053ee3b 239
1fb00c6c 240 spin_lock_mutex(&lock->wait_lock, flags);
ef5d4707 241 mutex_release(&lock->dep_map, nested, _RET_IP_);
9a11b49a 242 debug_mutex_unlock(lock);
6053ee3b
IM
243
244 /*
245 * some architectures leave the lock unlocked in the fastpath failure
246 * case, others need to leave it locked. In the later case we have to
247 * unlock it here
248 */
249 if (__mutex_slowpath_needs_to_unlock())
250 atomic_set(&lock->count, 1);
251
6053ee3b
IM
252 if (!list_empty(&lock->wait_list)) {
253 /* get the first entry from the wait-list: */
254 struct mutex_waiter *waiter =
255 list_entry(lock->wait_list.next,
256 struct mutex_waiter, list);
257
258 debug_mutex_wake_waiter(lock, waiter);
259
260 wake_up_process(waiter->task);
261 }
262
263 debug_mutex_clear_owner(lock);
264
1fb00c6c 265 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
266}
267
9a11b49a
IM
268/*
269 * Release the lock, slowpath:
270 */
7918baa5 271static __used noinline void
9a11b49a
IM
272__mutex_unlock_slowpath(atomic_t *lock_count)
273{
ef5d4707 274 __mutex_unlock_common_slowpath(lock_count, 1);
9a11b49a
IM
275}
276
e4564f79 277#ifndef CONFIG_DEBUG_LOCK_ALLOC
6053ee3b
IM
278/*
279 * Here come the less common (and hence less performance-critical) APIs:
280 * mutex_lock_interruptible() and mutex_trylock().
281 */
7ad5b3a5 282static noinline int __sched
ad776537
LH
283__mutex_lock_killable_slowpath(atomic_t *lock_count);
284
7ad5b3a5 285static noinline int __sched
9a11b49a 286__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
6053ee3b
IM
287
288/***
289 * mutex_lock_interruptible - acquire the mutex, interruptable
290 * @lock: the mutex to be acquired
291 *
292 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
293 * been acquired or sleep until the mutex becomes available. If a
294 * signal arrives while waiting for the lock then this function
295 * returns -EINTR.
296 *
297 * This function is similar to (but not equivalent to) down_interruptible().
298 */
7ad5b3a5 299int __sched mutex_lock_interruptible(struct mutex *lock)
6053ee3b 300{
c544bdb1 301 might_sleep();
6053ee3b
IM
302 return __mutex_fastpath_lock_retval
303 (&lock->count, __mutex_lock_interruptible_slowpath);
304}
305
306EXPORT_SYMBOL(mutex_lock_interruptible);
307
7ad5b3a5 308int __sched mutex_lock_killable(struct mutex *lock)
ad776537
LH
309{
310 might_sleep();
311 return __mutex_fastpath_lock_retval
312 (&lock->count, __mutex_lock_killable_slowpath);
313}
314EXPORT_SYMBOL(mutex_lock_killable);
315
7918baa5 316static __used noinline void __sched
e4564f79
PZ
317__mutex_lock_slowpath(atomic_t *lock_count)
318{
319 struct mutex *lock = container_of(lock_count, struct mutex, count);
320
321 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
322}
323
7ad5b3a5 324static noinline int __sched
ad776537
LH
325__mutex_lock_killable_slowpath(atomic_t *lock_count)
326{
327 struct mutex *lock = container_of(lock_count, struct mutex, count);
328
329 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
330}
331
7ad5b3a5 332static noinline int __sched
9a11b49a 333__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
6053ee3b
IM
334{
335 struct mutex *lock = container_of(lock_count, struct mutex, count);
336
e4564f79 337 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
6053ee3b 338}
e4564f79 339#endif
6053ee3b
IM
340
341/*
342 * Spinlock based trylock, we take the spinlock and check whether we
343 * can get the lock:
344 */
345static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
346{
347 struct mutex *lock = container_of(lock_count, struct mutex, count);
1fb00c6c 348 unsigned long flags;
6053ee3b
IM
349 int prev;
350
1fb00c6c 351 spin_lock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
352
353 prev = atomic_xchg(&lock->count, -1);
ef5d4707 354 if (likely(prev == 1)) {
9a11b49a 355 debug_mutex_set_owner(lock, current_thread_info());
ef5d4707
IM
356 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
357 }
6053ee3b
IM
358 /* Set it back to 0 if there are no waiters: */
359 if (likely(list_empty(&lock->wait_list)))
360 atomic_set(&lock->count, 0);
361
1fb00c6c 362 spin_unlock_mutex(&lock->wait_lock, flags);
6053ee3b
IM
363
364 return prev == 1;
365}
366
367/***
368 * mutex_trylock - try acquire the mutex, without waiting
369 * @lock: the mutex to be acquired
370 *
371 * Try to acquire the mutex atomically. Returns 1 if the mutex
372 * has been acquired successfully, and 0 on contention.
373 *
374 * NOTE: this function follows the spin_trylock() convention, so
375 * it is negated to the down_trylock() return values! Be careful
376 * about this when converting semaphore users to mutexes.
377 *
378 * This function must not be used in interrupt context. The
379 * mutex must be released by the same task that acquired it.
380 */
7ad5b3a5 381int __sched mutex_trylock(struct mutex *lock)
6053ee3b
IM
382{
383 return __mutex_fastpath_trylock(&lock->count,
384 __mutex_trylock_slowpath);
385}
386
387EXPORT_SYMBOL(mutex_trylock);