]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/semaphore.c
Add down_timeout and change ACPI to use it
[net-next-2.6.git] / kernel / semaphore.c
CommitLineData
64ac24e7
MW
1/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
6 */
7
8#include <linux/compiler.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/semaphore.h>
13#include <linux/spinlock.h>
14
15/*
16 * Some notes on the implementation:
17 *
18 * down_trylock() and up() can be called from interrupt context.
19 * So we have to disable interrupts when taking the lock.
20 *
21 * The ->count variable, if positive, defines how many more tasks can
22 * acquire the semaphore. If negative, it represents how many tasks are
23 * waiting on the semaphore (*). If zero, no tasks are waiting, and no more
24 * tasks can acquire the semaphore.
25 *
26 * (*) Except for the window between one task calling up() and the task
27 * sleeping in a __down_common() waking up. In order to avoid a third task
28 * coming in and stealing the second task's wakeup, we leave the ->count
29 * negative. If we have a more complex situation, the ->count may become
30 * zero or negative (eg a semaphore with count = 2, three tasks attempt to
31 * acquire it, one sleeps, two finish and call up(), the second task to call
32 * up() notices that the list is empty and just increments count).
33 */
34
35static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem);
f06d9686 37static noinline int __down_killable(struct semaphore *sem);
f1241c87 38static noinline int __down_timeout(struct semaphore *sem, long jiffies);
64ac24e7
MW
39static noinline void __up(struct semaphore *sem);
40
41void down(struct semaphore *sem)
42{
43 unsigned long flags;
44
45 spin_lock_irqsave(&sem->lock, flags);
46 if (unlikely(sem->count-- <= 0))
47 __down(sem);
48 spin_unlock_irqrestore(&sem->lock, flags);
49}
50EXPORT_SYMBOL(down);
51
52int down_interruptible(struct semaphore *sem)
53{
54 unsigned long flags;
55 int result = 0;
56
57 spin_lock_irqsave(&sem->lock, flags);
58 if (unlikely(sem->count-- <= 0))
59 result = __down_interruptible(sem);
60 spin_unlock_irqrestore(&sem->lock, flags);
61
62 return result;
63}
64EXPORT_SYMBOL(down_interruptible);
65
f06d9686
MW
66int down_killable(struct semaphore *sem)
67{
68 unsigned long flags;
69 int result = 0;
70
71 spin_lock_irqsave(&sem->lock, flags);
72 if (unlikely(sem->count-- <= 0))
73 result = __down_killable(sem);
74 spin_unlock_irqrestore(&sem->lock, flags);
75
76 return result;
77}
78EXPORT_SYMBOL(down_killable);
79
64ac24e7
MW
80/**
81 * down_trylock - try to acquire the semaphore, without waiting
82 * @sem: the semaphore to be acquired
83 *
84 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
85 * been acquired successfully and 1 if it is contended.
86 *
87 * NOTE: This return value is inverted from both spin_trylock and
88 * mutex_trylock! Be careful about this when converting code.
89 *
90 * Unlike mutex_trylock, this function can be used from interrupt context,
91 * and the semaphore can be released by any task or interrupt.
92 */
93int down_trylock(struct semaphore *sem)
94{
95 unsigned long flags;
96 int count;
97
98 spin_lock_irqsave(&sem->lock, flags);
99 count = sem->count - 1;
100 if (likely(count >= 0))
101 sem->count = count;
102 spin_unlock_irqrestore(&sem->lock, flags);
103
104 return (count < 0);
105}
106EXPORT_SYMBOL(down_trylock);
107
f1241c87
MW
108int down_timeout(struct semaphore *sem, long jiffies)
109{
110 unsigned long flags;
111 int result = 0;
112
113 spin_lock_irqsave(&sem->lock, flags);
114 if (unlikely(sem->count-- <= 0))
115 result = __down_timeout(sem, jiffies);
116 spin_unlock_irqrestore(&sem->lock, flags);
117
118 return result;
119}
120EXPORT_SYMBOL(down_timeout);
121
64ac24e7
MW
122void up(struct semaphore *sem)
123{
124 unsigned long flags;
125
126 spin_lock_irqsave(&sem->lock, flags);
127 if (likely(sem->count >= 0))
128 sem->count++;
129 else
130 __up(sem);
131 spin_unlock_irqrestore(&sem->lock, flags);
132}
133EXPORT_SYMBOL(up);
134
135/* Functions for the contended case */
136
137struct semaphore_waiter {
138 struct list_head list;
139 struct task_struct *task;
140 int up;
141};
142
143/*
144 * Wake up a process waiting on a semaphore. We need to call this from both
145 * __up and __down_common as it's possible to race a task into the semaphore
146 * if it comes in at just the right time between two tasks calling up() and
147 * a third task waking up. This function assumes the wait_list is already
148 * checked for being non-empty.
149 */
150static noinline void __sched __up_down_common(struct semaphore *sem)
151{
152 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
153 struct semaphore_waiter, list);
154 list_del(&waiter->list);
155 waiter->up = 1;
156 wake_up_process(waiter->task);
157}
158
159/*
f1241c87
MW
160 * Because this function is inlined, the 'state' parameter will be
161 * constant, and thus optimised away by the compiler. Likewise the
162 * 'timeout' parameter for the cases without timeouts.
64ac24e7 163 */
f1241c87
MW
164static inline int __sched __down_common(struct semaphore *sem, long state,
165 long timeout)
64ac24e7
MW
166{
167 int result = 0;
168 struct task_struct *task = current;
169 struct semaphore_waiter waiter;
170
171 list_add_tail(&waiter.list, &sem->wait_list);
172 waiter.task = task;
173 waiter.up = 0;
174
175 for (;;) {
176 if (state == TASK_INTERRUPTIBLE && signal_pending(task))
177 goto interrupted;
f06d9686
MW
178 if (state == TASK_KILLABLE && fatal_signal_pending(task))
179 goto interrupted;
f1241c87
MW
180 if (timeout <= 0)
181 goto timed_out;
64ac24e7
MW
182 __set_task_state(task, state);
183 spin_unlock_irq(&sem->lock);
f1241c87 184 timeout = schedule_timeout(timeout);
64ac24e7
MW
185 spin_lock_irq(&sem->lock);
186 if (waiter.up)
187 goto woken;
188 }
189
f1241c87
MW
190 timed_out:
191 list_del(&waiter.list);
192 result = -ETIME;
193 goto woken;
64ac24e7
MW
194 interrupted:
195 list_del(&waiter.list);
196 result = -EINTR;
197 woken:
198 /*
199 * Account for the process which woke us up. For the case where
200 * we're interrupted, we need to increment the count on our own
201 * behalf. I don't believe we can hit the case where the
202 * sem->count hits zero, *and* there's a second task sleeping,
203 * but it doesn't hurt, that's not a commonly exercised path and
204 * it's not a performance path either.
205 */
206 if (unlikely((++sem->count >= 0) && !list_empty(&sem->wait_list)))
207 __up_down_common(sem);
208 return result;
209}
210
211static noinline void __sched __down(struct semaphore *sem)
212{
f1241c87 213 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
64ac24e7
MW
214}
215
216static noinline int __sched __down_interruptible(struct semaphore *sem)
217{
f1241c87 218 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
64ac24e7
MW
219}
220
f06d9686
MW
221static noinline int __sched __down_killable(struct semaphore *sem)
222{
f1241c87
MW
223 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
224}
225
226static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
227{
228 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
f06d9686
MW
229}
230
64ac24e7
MW
231static noinline void __sched __up(struct semaphore *sem)
232{
233 if (unlikely(list_empty(&sem->wait_list)))
234 sem->count++;
235 else
236 __up_down_common(sem);
237}