]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/futex.c
futex: Move drop_futex_key_refs out of spinlock'ed region
[net-next-2.6.git] / kernel / futex.c
CommitLineData
1da177e4
LT
1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
0771dfef
IM
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
c87e2837
IM
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
34f01cc1
ED
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
52400ba9
DH
22 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
1da177e4
LT
26 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
7ed20e1a 57#include <linux/signal.h>
9adef58b 58#include <linux/module.h>
fd5eea42 59#include <linux/magic.h>
b488893a
PE
60#include <linux/pid.h>
61#include <linux/nsproxy.h>
62
4732efbe 63#include <asm/futex.h>
1da177e4 64
c87e2837
IM
65#include "rtmutex_common.h"
66
a0c1e907
TG
67int __read_mostly futex_cmpxchg_enabled;
68
1da177e4
LT
69#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
c87e2837
IM
71/*
72 * Priority Inheritance state:
73 */
74struct futex_pi_state {
75 /*
76 * list of 'owned' pi_state instances - these have to be
77 * cleaned up in do_exit() if the task exits prematurely:
78 */
79 struct list_head list;
80
81 /*
82 * The PI object:
83 */
84 struct rt_mutex pi_mutex;
85
86 struct task_struct *owner;
87 atomic_t refcount;
88
89 union futex_key key;
90};
91
d8d88fbb
DH
92/**
93 * struct futex_q - The hashed futex queue entry, one per waiting task
94 * @task: the task waiting on the futex
95 * @lock_ptr: the hash bucket lock
96 * @key: the key the futex is hashed on
97 * @pi_state: optional priority inheritance state
98 * @rt_waiter: rt_waiter storage for use with requeue_pi
99 * @requeue_pi_key: the requeue_pi target futex key
100 * @bitset: bitset for the optional bitmasked wakeup
101 *
102 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
1da177e4
LT
103 * we can wake only the relevant ones (hashed queues may be shared).
104 *
105 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
ec92d082 106 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
1da177e4 107 * The order of wakup is always to make the first condition true, then
d8d88fbb
DH
108 * the second.
109 *
110 * PI futexes are typically woken before they are removed from the hash list via
111 * the rt_mutex code. See unqueue_me_pi().
1da177e4
LT
112 */
113struct futex_q {
ec92d082 114 struct plist_node list;
1da177e4 115
d8d88fbb 116 struct task_struct *task;
1da177e4 117 spinlock_t *lock_ptr;
1da177e4 118 union futex_key key;
c87e2837 119 struct futex_pi_state *pi_state;
52400ba9 120 struct rt_mutex_waiter *rt_waiter;
84bc4af5 121 union futex_key *requeue_pi_key;
cd689985 122 u32 bitset;
1da177e4
LT
123};
124
125/*
b2d0994b
DH
126 * Hash buckets are shared by all the futex_keys that hash to the same
127 * location. Each key may have multiple futex_q structures, one for each task
128 * waiting on a futex.
1da177e4
LT
129 */
130struct futex_hash_bucket {
ec92d082
PP
131 spinlock_t lock;
132 struct plist_head chain;
1da177e4
LT
133};
134
135static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
136
1da177e4
LT
137/*
138 * We hash on the keys returned from get_futex_key (see below).
139 */
140static struct futex_hash_bucket *hash_futex(union futex_key *key)
141{
142 u32 hash = jhash2((u32*)&key->both.word,
143 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
144 key->both.offset);
145 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
146}
147
148/*
149 * Return 1 if two futex_keys are equal, 0 otherwise.
150 */
151static inline int match_futex(union futex_key *key1, union futex_key *key2)
152{
2bc87203
DH
153 return (key1 && key2
154 && key1->both.word == key2->both.word
1da177e4
LT
155 && key1->both.ptr == key2->both.ptr
156 && key1->both.offset == key2->both.offset);
157}
158
38d47c1b
PZ
159/*
160 * Take a reference to the resource addressed by a key.
161 * Can be called while holding spinlocks.
162 *
163 */
164static void get_futex_key_refs(union futex_key *key)
165{
166 if (!key->both.ptr)
167 return;
168
169 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
170 case FUT_OFF_INODE:
171 atomic_inc(&key->shared.inode->i_count);
172 break;
173 case FUT_OFF_MMSHARED:
174 atomic_inc(&key->private.mm->mm_count);
175 break;
176 }
177}
178
179/*
180 * Drop a reference to the resource addressed by a key.
181 * The hash bucket spinlock must not be held.
182 */
183static void drop_futex_key_refs(union futex_key *key)
184{
90621c40
DH
185 if (!key->both.ptr) {
186 /* If we're here then we tried to put a key we failed to get */
187 WARN_ON_ONCE(1);
38d47c1b 188 return;
90621c40 189 }
38d47c1b
PZ
190
191 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
192 case FUT_OFF_INODE:
193 iput(key->shared.inode);
194 break;
195 case FUT_OFF_MMSHARED:
196 mmdrop(key->private.mm);
197 break;
198 }
199}
200
34f01cc1 201/**
d96ee56c
DH
202 * get_futex_key() - Get parameters which are the keys for a futex
203 * @uaddr: virtual address of the futex
204 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
205 * @key: address where result is stored.
206 * @rw: mapping needs to be read/write (values: VERIFY_READ,
207 * VERIFY_WRITE)
34f01cc1
ED
208 *
209 * Returns a negative error code or 0
210 * The key words are stored in *key on success.
1da177e4 211 *
f3a43f3f 212 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
1da177e4
LT
213 * offset_within_page). For private mappings, it's (uaddr, current->mm).
214 * We can usually work out the index without swapping in the page.
215 *
b2d0994b 216 * lock_page() might sleep, the caller should not hold a spinlock.
1da177e4 217 */
64d1304a
TG
218static int
219get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
1da177e4 220{
e2970f2f 221 unsigned long address = (unsigned long)uaddr;
1da177e4 222 struct mm_struct *mm = current->mm;
1da177e4
LT
223 struct page *page;
224 int err;
225
226 /*
227 * The futex address must be "naturally" aligned.
228 */
e2970f2f 229 key->both.offset = address % PAGE_SIZE;
34f01cc1 230 if (unlikely((address % sizeof(u32)) != 0))
1da177e4 231 return -EINVAL;
e2970f2f 232 address -= key->both.offset;
1da177e4 233
34f01cc1
ED
234 /*
235 * PROCESS_PRIVATE futexes are fast.
236 * As the mm cannot disappear under us and the 'key' only needs
237 * virtual address, we dont even have to find the underlying vma.
238 * Note : We do have to check 'uaddr' is a valid user address,
239 * but access_ok() should be faster than find_vma()
240 */
241 if (!fshared) {
64d1304a 242 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
34f01cc1
ED
243 return -EFAULT;
244 key->private.mm = mm;
245 key->private.address = address;
42569c39 246 get_futex_key_refs(key);
34f01cc1
ED
247 return 0;
248 }
1da177e4 249
38d47c1b 250again:
64d1304a 251 err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
38d47c1b
PZ
252 if (err < 0)
253 return err;
254
ce2ae53b 255 page = compound_head(page);
38d47c1b
PZ
256 lock_page(page);
257 if (!page->mapping) {
258 unlock_page(page);
259 put_page(page);
260 goto again;
261 }
1da177e4
LT
262
263 /*
264 * Private mappings are handled in a simple way.
265 *
266 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
267 * it's a read-only handle, it's expected that futexes attach to
38d47c1b 268 * the object not the particular process.
1da177e4 269 */
38d47c1b
PZ
270 if (PageAnon(page)) {
271 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
1da177e4 272 key->private.mm = mm;
e2970f2f 273 key->private.address = address;
38d47c1b
PZ
274 } else {
275 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
276 key->shared.inode = page->mapping->host;
277 key->shared.pgoff = page->index;
1da177e4
LT
278 }
279
38d47c1b 280 get_futex_key_refs(key);
1da177e4 281
38d47c1b
PZ
282 unlock_page(page);
283 put_page(page);
284 return 0;
1da177e4
LT
285}
286
38d47c1b 287static inline
c2f9f201 288void put_futex_key(int fshared, union futex_key *key)
1da177e4 289{
38d47c1b 290 drop_futex_key_refs(key);
1da177e4
LT
291}
292
d96ee56c
DH
293/**
294 * fault_in_user_writeable() - Fault in user address and verify RW access
d0725992
TG
295 * @uaddr: pointer to faulting user space address
296 *
297 * Slow path to fixup the fault we just took in the atomic write
298 * access to @uaddr.
299 *
300 * We have no generic implementation of a non destructive write to the
301 * user address. We know that we faulted in the atomic pagefault
302 * disabled section so we can as well avoid the #PF overhead by
303 * calling get_user_pages() right away.
304 */
305static int fault_in_user_writeable(u32 __user *uaddr)
306{
307 int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
aa715284 308 1, 1, 0, NULL, NULL);
d0725992
TG
309 return ret < 0 ? ret : 0;
310}
311
4b1c486b
DH
312/**
313 * futex_top_waiter() - Return the highest priority waiter on a futex
d96ee56c
DH
314 * @hb: the hash bucket the futex_q's reside in
315 * @key: the futex key (to distinguish it from other futex futex_q's)
4b1c486b
DH
316 *
317 * Must be called with the hb lock held.
318 */
319static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
320 union futex_key *key)
321{
322 struct futex_q *this;
323
324 plist_for_each_entry(this, &hb->chain, list) {
325 if (match_futex(&this->key, key))
326 return this;
327 }
328 return NULL;
329}
330
36cf3b5c
TG
331static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
332{
333 u32 curval;
334
335 pagefault_disable();
336 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
337 pagefault_enable();
338
339 return curval;
340}
341
342static int get_futex_value_locked(u32 *dest, u32 __user *from)
1da177e4
LT
343{
344 int ret;
345
a866374a 346 pagefault_disable();
e2970f2f 347 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
a866374a 348 pagefault_enable();
1da177e4
LT
349
350 return ret ? -EFAULT : 0;
351}
352
c87e2837
IM
353
354/*
355 * PI code:
356 */
357static int refill_pi_state_cache(void)
358{
359 struct futex_pi_state *pi_state;
360
361 if (likely(current->pi_state_cache))
362 return 0;
363
4668edc3 364 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
c87e2837
IM
365
366 if (!pi_state)
367 return -ENOMEM;
368
c87e2837
IM
369 INIT_LIST_HEAD(&pi_state->list);
370 /* pi_mutex gets initialized later */
371 pi_state->owner = NULL;
372 atomic_set(&pi_state->refcount, 1);
38d47c1b 373 pi_state->key = FUTEX_KEY_INIT;
c87e2837
IM
374
375 current->pi_state_cache = pi_state;
376
377 return 0;
378}
379
380static struct futex_pi_state * alloc_pi_state(void)
381{
382 struct futex_pi_state *pi_state = current->pi_state_cache;
383
384 WARN_ON(!pi_state);
385 current->pi_state_cache = NULL;
386
387 return pi_state;
388}
389
390static void free_pi_state(struct futex_pi_state *pi_state)
391{
392 if (!atomic_dec_and_test(&pi_state->refcount))
393 return;
394
395 /*
396 * If pi_state->owner is NULL, the owner is most probably dying
397 * and has cleaned up the pi_state already
398 */
399 if (pi_state->owner) {
400 spin_lock_irq(&pi_state->owner->pi_lock);
401 list_del_init(&pi_state->list);
402 spin_unlock_irq(&pi_state->owner->pi_lock);
403
404 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
405 }
406
407 if (current->pi_state_cache)
408 kfree(pi_state);
409 else {
410 /*
411 * pi_state->list is already empty.
412 * clear pi_state->owner.
413 * refcount is at 0 - put it back to 1.
414 */
415 pi_state->owner = NULL;
416 atomic_set(&pi_state->refcount, 1);
417 current->pi_state_cache = pi_state;
418 }
419}
420
421/*
422 * Look up the task based on what TID userspace gave us.
423 * We dont trust it.
424 */
425static struct task_struct * futex_find_get_task(pid_t pid)
426{
427 struct task_struct *p;
c69e8d9c 428 const struct cred *cred = current_cred(), *pcred;
c87e2837 429
d359b549 430 rcu_read_lock();
228ebcbe 431 p = find_task_by_vpid(pid);
c69e8d9c 432 if (!p) {
a06381fe 433 p = ERR_PTR(-ESRCH);
c69e8d9c
DH
434 } else {
435 pcred = __task_cred(p);
436 if (cred->euid != pcred->euid &&
437 cred->euid != pcred->uid)
438 p = ERR_PTR(-ESRCH);
439 else
440 get_task_struct(p);
441 }
a06381fe 442
d359b549 443 rcu_read_unlock();
c87e2837
IM
444
445 return p;
446}
447
448/*
449 * This task is holding PI mutexes at exit time => bad.
450 * Kernel cleans up PI-state, but userspace is likely hosed.
451 * (Robust-futex cleanup is separate and might save the day for userspace.)
452 */
453void exit_pi_state_list(struct task_struct *curr)
454{
c87e2837
IM
455 struct list_head *next, *head = &curr->pi_state_list;
456 struct futex_pi_state *pi_state;
627371d7 457 struct futex_hash_bucket *hb;
38d47c1b 458 union futex_key key = FUTEX_KEY_INIT;
c87e2837 459
a0c1e907
TG
460 if (!futex_cmpxchg_enabled)
461 return;
c87e2837
IM
462 /*
463 * We are a ZOMBIE and nobody can enqueue itself on
464 * pi_state_list anymore, but we have to be careful
627371d7 465 * versus waiters unqueueing themselves:
c87e2837
IM
466 */
467 spin_lock_irq(&curr->pi_lock);
468 while (!list_empty(head)) {
469
470 next = head->next;
471 pi_state = list_entry(next, struct futex_pi_state, list);
472 key = pi_state->key;
627371d7 473 hb = hash_futex(&key);
c87e2837
IM
474 spin_unlock_irq(&curr->pi_lock);
475
c87e2837
IM
476 spin_lock(&hb->lock);
477
478 spin_lock_irq(&curr->pi_lock);
627371d7
IM
479 /*
480 * We dropped the pi-lock, so re-check whether this
481 * task still owns the PI-state:
482 */
c87e2837
IM
483 if (head->next != next) {
484 spin_unlock(&hb->lock);
485 continue;
486 }
487
c87e2837 488 WARN_ON(pi_state->owner != curr);
627371d7
IM
489 WARN_ON(list_empty(&pi_state->list));
490 list_del_init(&pi_state->list);
c87e2837
IM
491 pi_state->owner = NULL;
492 spin_unlock_irq(&curr->pi_lock);
493
494 rt_mutex_unlock(&pi_state->pi_mutex);
495
496 spin_unlock(&hb->lock);
497
498 spin_lock_irq(&curr->pi_lock);
499 }
500 spin_unlock_irq(&curr->pi_lock);
501}
502
503static int
d0aa7a70
PP
504lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
505 union futex_key *key, struct futex_pi_state **ps)
c87e2837
IM
506{
507 struct futex_pi_state *pi_state = NULL;
508 struct futex_q *this, *next;
ec92d082 509 struct plist_head *head;
c87e2837 510 struct task_struct *p;
778e9a9c 511 pid_t pid = uval & FUTEX_TID_MASK;
c87e2837
IM
512
513 head = &hb->chain;
514
ec92d082 515 plist_for_each_entry_safe(this, next, head, list) {
d0aa7a70 516 if (match_futex(&this->key, key)) {
c87e2837
IM
517 /*
518 * Another waiter already exists - bump up
519 * the refcount and return its pi_state:
520 */
521 pi_state = this->pi_state;
06a9ec29
TG
522 /*
523 * Userspace might have messed up non PI and PI futexes
524 */
525 if (unlikely(!pi_state))
526 return -EINVAL;
527
627371d7 528 WARN_ON(!atomic_read(&pi_state->refcount));
778e9a9c
AK
529 WARN_ON(pid && pi_state->owner &&
530 pi_state->owner->pid != pid);
627371d7 531
c87e2837 532 atomic_inc(&pi_state->refcount);
d0aa7a70 533 *ps = pi_state;
c87e2837
IM
534
535 return 0;
536 }
537 }
538
539 /*
e3f2ddea 540 * We are the first waiter - try to look up the real owner and attach
778e9a9c 541 * the new pi_state to it, but bail out when TID = 0
c87e2837 542 */
778e9a9c 543 if (!pid)
e3f2ddea 544 return -ESRCH;
c87e2837 545 p = futex_find_get_task(pid);
778e9a9c
AK
546 if (IS_ERR(p))
547 return PTR_ERR(p);
548
549 /*
550 * We need to look at the task state flags to figure out,
551 * whether the task is exiting. To protect against the do_exit
552 * change of the task flags, we do this protected by
553 * p->pi_lock:
554 */
555 spin_lock_irq(&p->pi_lock);
556 if (unlikely(p->flags & PF_EXITING)) {
557 /*
558 * The task is on the way out. When PF_EXITPIDONE is
559 * set, we know that the task has finished the
560 * cleanup:
561 */
562 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
563
564 spin_unlock_irq(&p->pi_lock);
565 put_task_struct(p);
566 return ret;
567 }
c87e2837
IM
568
569 pi_state = alloc_pi_state();
570
571 /*
572 * Initialize the pi_mutex in locked state and make 'p'
573 * the owner of it:
574 */
575 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
576
577 /* Store the key for possible exit cleanups: */
d0aa7a70 578 pi_state->key = *key;
c87e2837 579
627371d7 580 WARN_ON(!list_empty(&pi_state->list));
c87e2837
IM
581 list_add(&pi_state->list, &p->pi_state_list);
582 pi_state->owner = p;
583 spin_unlock_irq(&p->pi_lock);
584
585 put_task_struct(p);
586
d0aa7a70 587 *ps = pi_state;
c87e2837
IM
588
589 return 0;
590}
591
1a52084d 592/**
d96ee56c 593 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
bab5bc9e
DH
594 * @uaddr: the pi futex user address
595 * @hb: the pi futex hash bucket
596 * @key: the futex key associated with uaddr and hb
597 * @ps: the pi_state pointer where we store the result of the
598 * lookup
599 * @task: the task to perform the atomic lock work for. This will
600 * be "current" except in the case of requeue pi.
601 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1a52084d
DH
602 *
603 * Returns:
604 * 0 - ready to wait
605 * 1 - acquired the lock
606 * <0 - error
607 *
608 * The hb->lock and futex_key refs shall be held by the caller.
609 */
610static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
611 union futex_key *key,
612 struct futex_pi_state **ps,
bab5bc9e 613 struct task_struct *task, int set_waiters)
1a52084d
DH
614{
615 int lock_taken, ret, ownerdied = 0;
616 u32 uval, newval, curval;
617
618retry:
619 ret = lock_taken = 0;
620
621 /*
622 * To avoid races, we attempt to take the lock here again
623 * (by doing a 0 -> TID atomic cmpxchg), while holding all
624 * the locks. It will most likely not succeed.
625 */
626 newval = task_pid_vnr(task);
bab5bc9e
DH
627 if (set_waiters)
628 newval |= FUTEX_WAITERS;
1a52084d
DH
629
630 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
631
632 if (unlikely(curval == -EFAULT))
633 return -EFAULT;
634
635 /*
636 * Detect deadlocks.
637 */
638 if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
639 return -EDEADLK;
640
641 /*
642 * Surprise - we got the lock. Just return to userspace:
643 */
644 if (unlikely(!curval))
645 return 1;
646
647 uval = curval;
648
649 /*
650 * Set the FUTEX_WAITERS flag, so the owner will know it has someone
651 * to wake at the next unlock.
652 */
653 newval = curval | FUTEX_WAITERS;
654
655 /*
656 * There are two cases, where a futex might have no owner (the
657 * owner TID is 0): OWNER_DIED. We take over the futex in this
658 * case. We also do an unconditional take over, when the owner
659 * of the futex died.
660 *
661 * This is safe as we are protected by the hash bucket lock !
662 */
663 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
664 /* Keep the OWNER_DIED bit */
665 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
666 ownerdied = 0;
667 lock_taken = 1;
668 }
669
670 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
671
672 if (unlikely(curval == -EFAULT))
673 return -EFAULT;
674 if (unlikely(curval != uval))
675 goto retry;
676
677 /*
678 * We took the lock due to owner died take over.
679 */
680 if (unlikely(lock_taken))
681 return 1;
682
683 /*
684 * We dont have the lock. Look up the PI state (or create it if
685 * we are the first waiter):
686 */
687 ret = lookup_pi_state(uval, hb, key, ps);
688
689 if (unlikely(ret)) {
690 switch (ret) {
691 case -ESRCH:
692 /*
693 * No owner found for this futex. Check if the
694 * OWNER_DIED bit is set to figure out whether
695 * this is a robust futex or not.
696 */
697 if (get_futex_value_locked(&curval, uaddr))
698 return -EFAULT;
699
700 /*
701 * We simply start over in case of a robust
702 * futex. The code above will take the futex
703 * and return happy.
704 */
705 if (curval & FUTEX_OWNER_DIED) {
706 ownerdied = 1;
707 goto retry;
708 }
709 default:
710 break;
711 }
712 }
713
714 return ret;
715}
716
1da177e4
LT
717/*
718 * The hash bucket lock must be held when this is called.
719 * Afterwards, the futex_q must not be accessed.
720 */
721static void wake_futex(struct futex_q *q)
722{
f1a11e05
TG
723 struct task_struct *p = q->task;
724
1da177e4 725 /*
f1a11e05
TG
726 * We set q->lock_ptr = NULL _before_ we wake up the task. If
727 * a non futex wake up happens on another CPU then the task
728 * might exit and p would dereference a non existing task
729 * struct. Prevent this by holding a reference on p across the
730 * wake up.
1da177e4 731 */
f1a11e05
TG
732 get_task_struct(p);
733
734 plist_del(&q->list, &q->list.plist);
1da177e4 735 /*
f1a11e05
TG
736 * The waiting task can free the futex_q as soon as
737 * q->lock_ptr = NULL is written, without taking any locks. A
738 * memory barrier is required here to prevent the following
739 * store to lock_ptr from getting ahead of the plist_del.
1da177e4 740 */
ccdea2f8 741 smp_wmb();
1da177e4 742 q->lock_ptr = NULL;
f1a11e05
TG
743
744 wake_up_state(p, TASK_NORMAL);
745 put_task_struct(p);
1da177e4
LT
746}
747
c87e2837
IM
748static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
749{
750 struct task_struct *new_owner;
751 struct futex_pi_state *pi_state = this->pi_state;
752 u32 curval, newval;
753
754 if (!pi_state)
755 return -EINVAL;
756
21778867 757 spin_lock(&pi_state->pi_mutex.wait_lock);
c87e2837
IM
758 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
759
760 /*
761 * This happens when we have stolen the lock and the original
762 * pending owner did not enqueue itself back on the rt_mutex.
763 * Thats not a tragedy. We know that way, that a lock waiter
764 * is on the fly. We make the futex_q waiter the pending owner.
765 */
766 if (!new_owner)
767 new_owner = this->task;
768
769 /*
770 * We pass it to the next owner. (The WAITERS bit is always
771 * kept enabled while there is PI state around. We must also
772 * preserve the owner died bit.)
773 */
e3f2ddea 774 if (!(uval & FUTEX_OWNER_DIED)) {
778e9a9c
AK
775 int ret = 0;
776
b488893a 777 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
e3f2ddea 778
36cf3b5c 779 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
778e9a9c 780
e3f2ddea 781 if (curval == -EFAULT)
778e9a9c 782 ret = -EFAULT;
cde898fa 783 else if (curval != uval)
778e9a9c
AK
784 ret = -EINVAL;
785 if (ret) {
786 spin_unlock(&pi_state->pi_mutex.wait_lock);
787 return ret;
788 }
e3f2ddea 789 }
c87e2837 790
627371d7
IM
791 spin_lock_irq(&pi_state->owner->pi_lock);
792 WARN_ON(list_empty(&pi_state->list));
793 list_del_init(&pi_state->list);
794 spin_unlock_irq(&pi_state->owner->pi_lock);
795
796 spin_lock_irq(&new_owner->pi_lock);
797 WARN_ON(!list_empty(&pi_state->list));
c87e2837
IM
798 list_add(&pi_state->list, &new_owner->pi_state_list);
799 pi_state->owner = new_owner;
627371d7
IM
800 spin_unlock_irq(&new_owner->pi_lock);
801
21778867 802 spin_unlock(&pi_state->pi_mutex.wait_lock);
c87e2837
IM
803 rt_mutex_unlock(&pi_state->pi_mutex);
804
805 return 0;
806}
807
808static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
809{
810 u32 oldval;
811
812 /*
813 * There is no waiter, so we unlock the futex. The owner died
814 * bit has not to be preserved here. We are the owner:
815 */
36cf3b5c 816 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
c87e2837
IM
817
818 if (oldval == -EFAULT)
819 return oldval;
820 if (oldval != uval)
821 return -EAGAIN;
822
823 return 0;
824}
825
8b8f319f
IM
826/*
827 * Express the locking dependencies for lockdep:
828 */
829static inline void
830double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
831{
832 if (hb1 <= hb2) {
833 spin_lock(&hb1->lock);
834 if (hb1 < hb2)
835 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
836 } else { /* hb1 > hb2 */
837 spin_lock(&hb2->lock);
838 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
839 }
840}
841
5eb3dc62
DH
842static inline void
843double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
844{
f061d351 845 spin_unlock(&hb1->lock);
88f502fe
IM
846 if (hb1 != hb2)
847 spin_unlock(&hb2->lock);
5eb3dc62
DH
848}
849
1da177e4 850/*
b2d0994b 851 * Wake up waiters matching bitset queued on this futex (uaddr).
1da177e4 852 */
c2f9f201 853static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
1da177e4 854{
e2970f2f 855 struct futex_hash_bucket *hb;
1da177e4 856 struct futex_q *this, *next;
ec92d082 857 struct plist_head *head;
38d47c1b 858 union futex_key key = FUTEX_KEY_INIT;
1da177e4
LT
859 int ret;
860
cd689985
TG
861 if (!bitset)
862 return -EINVAL;
863
64d1304a 864 ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
1da177e4
LT
865 if (unlikely(ret != 0))
866 goto out;
867
e2970f2f
IM
868 hb = hash_futex(&key);
869 spin_lock(&hb->lock);
870 head = &hb->chain;
1da177e4 871
ec92d082 872 plist_for_each_entry_safe(this, next, head, list) {
1da177e4 873 if (match_futex (&this->key, &key)) {
52400ba9 874 if (this->pi_state || this->rt_waiter) {
ed6f7b10
IM
875 ret = -EINVAL;
876 break;
877 }
cd689985
TG
878
879 /* Check if one of the bits is set in both bitsets */
880 if (!(this->bitset & bitset))
881 continue;
882
1da177e4
LT
883 wake_futex(this);
884 if (++ret >= nr_wake)
885 break;
886 }
887 }
888
e2970f2f 889 spin_unlock(&hb->lock);
38d47c1b 890 put_futex_key(fshared, &key);
42d35d48 891out:
1da177e4
LT
892 return ret;
893}
894
4732efbe
JJ
895/*
896 * Wake up all waiters hashed on the physical page that is mapped
897 * to this virtual address:
898 */
e2970f2f 899static int
c2f9f201 900futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
e2970f2f 901 int nr_wake, int nr_wake2, int op)
4732efbe 902{
38d47c1b 903 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
e2970f2f 904 struct futex_hash_bucket *hb1, *hb2;
ec92d082 905 struct plist_head *head;
4732efbe 906 struct futex_q *this, *next;
e4dc5b7a 907 int ret, op_ret;
4732efbe 908
e4dc5b7a 909retry:
64d1304a 910 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
4732efbe
JJ
911 if (unlikely(ret != 0))
912 goto out;
64d1304a 913 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
4732efbe 914 if (unlikely(ret != 0))
42d35d48 915 goto out_put_key1;
4732efbe 916
e2970f2f
IM
917 hb1 = hash_futex(&key1);
918 hb2 = hash_futex(&key2);
4732efbe 919
e4dc5b7a 920retry_private:
eaaea803 921 double_lock_hb(hb1, hb2);
e2970f2f 922 op_ret = futex_atomic_op_inuser(op, uaddr2);
4732efbe 923 if (unlikely(op_ret < 0)) {
4732efbe 924
5eb3dc62 925 double_unlock_hb(hb1, hb2);
4732efbe 926
7ee1dd3f 927#ifndef CONFIG_MMU
e2970f2f
IM
928 /*
929 * we don't get EFAULT from MMU faults if we don't have an MMU,
930 * but we might get them from range checking
931 */
7ee1dd3f 932 ret = op_ret;
42d35d48 933 goto out_put_keys;
7ee1dd3f
DH
934#endif
935
796f8d9b
DG
936 if (unlikely(op_ret != -EFAULT)) {
937 ret = op_ret;
42d35d48 938 goto out_put_keys;
796f8d9b
DG
939 }
940
d0725992 941 ret = fault_in_user_writeable(uaddr2);
4732efbe 942 if (ret)
de87fcc1 943 goto out_put_keys;
4732efbe 944
e4dc5b7a
DH
945 if (!fshared)
946 goto retry_private;
947
de87fcc1
DH
948 put_futex_key(fshared, &key2);
949 put_futex_key(fshared, &key1);
e4dc5b7a 950 goto retry;
4732efbe
JJ
951 }
952
e2970f2f 953 head = &hb1->chain;
4732efbe 954
ec92d082 955 plist_for_each_entry_safe(this, next, head, list) {
4732efbe
JJ
956 if (match_futex (&this->key, &key1)) {
957 wake_futex(this);
958 if (++ret >= nr_wake)
959 break;
960 }
961 }
962
963 if (op_ret > 0) {
e2970f2f 964 head = &hb2->chain;
4732efbe
JJ
965
966 op_ret = 0;
ec92d082 967 plist_for_each_entry_safe(this, next, head, list) {
4732efbe
JJ
968 if (match_futex (&this->key, &key2)) {
969 wake_futex(this);
970 if (++op_ret >= nr_wake2)
971 break;
972 }
973 }
974 ret += op_ret;
975 }
976
5eb3dc62 977 double_unlock_hb(hb1, hb2);
42d35d48 978out_put_keys:
38d47c1b 979 put_futex_key(fshared, &key2);
42d35d48 980out_put_key1:
38d47c1b 981 put_futex_key(fshared, &key1);
42d35d48 982out:
4732efbe
JJ
983 return ret;
984}
985
9121e478
DH
986/**
987 * requeue_futex() - Requeue a futex_q from one hb to another
988 * @q: the futex_q to requeue
989 * @hb1: the source hash_bucket
990 * @hb2: the target hash_bucket
991 * @key2: the new key for the requeued futex_q
992 */
993static inline
994void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
995 struct futex_hash_bucket *hb2, union futex_key *key2)
996{
997
998 /*
999 * If key1 and key2 hash to the same bucket, no need to
1000 * requeue.
1001 */
1002 if (likely(&hb1->chain != &hb2->chain)) {
1003 plist_del(&q->list, &hb1->chain);
1004 plist_add(&q->list, &hb2->chain);
1005 q->lock_ptr = &hb2->lock;
1006#ifdef CONFIG_DEBUG_PI_LIST
1007 q->list.plist.lock = &hb2->lock;
1008#endif
1009 }
1010 get_futex_key_refs(key2);
1011 q->key = *key2;
1012}
1013
52400ba9
DH
1014/**
1015 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
d96ee56c
DH
1016 * @q: the futex_q
1017 * @key: the key of the requeue target futex
1018 * @hb: the hash_bucket of the requeue target futex
52400ba9
DH
1019 *
1020 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1021 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1022 * to the requeue target futex so the waiter can detect the wakeup on the right
1023 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
beda2c7e
DH
1024 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1025 * to protect access to the pi_state to fixup the owner later. Must be called
1026 * with both q->lock_ptr and hb->lock held.
52400ba9
DH
1027 */
1028static inline
beda2c7e
DH
1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1030 struct futex_hash_bucket *hb)
52400ba9 1031{
52400ba9
DH
1032 get_futex_key_refs(key);
1033 q->key = *key;
1034
1035 WARN_ON(plist_node_empty(&q->list));
1036 plist_del(&q->list, &q->list.plist);
1037
1038 WARN_ON(!q->rt_waiter);
1039 q->rt_waiter = NULL;
1040
beda2c7e
DH
1041 q->lock_ptr = &hb->lock;
1042#ifdef CONFIG_DEBUG_PI_LIST
1043 q->list.plist.lock = &hb->lock;
1044#endif
1045
f1a11e05 1046 wake_up_state(q->task, TASK_NORMAL);
52400ba9
DH
1047}
1048
1049/**
1050 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
bab5bc9e
DH
1051 * @pifutex: the user address of the to futex
1052 * @hb1: the from futex hash bucket, must be locked by the caller
1053 * @hb2: the to futex hash bucket, must be locked by the caller
1054 * @key1: the from futex key
1055 * @key2: the to futex key
1056 * @ps: address to store the pi_state pointer
1057 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
52400ba9
DH
1058 *
1059 * Try and get the lock on behalf of the top waiter if we can do it atomically.
bab5bc9e
DH
1060 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1061 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1062 * hb1 and hb2 must be held by the caller.
52400ba9
DH
1063 *
1064 * Returns:
1065 * 0 - failed to acquire the lock atomicly
1066 * 1 - acquired the lock
1067 * <0 - error
1068 */
1069static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1070 struct futex_hash_bucket *hb1,
1071 struct futex_hash_bucket *hb2,
1072 union futex_key *key1, union futex_key *key2,
bab5bc9e 1073 struct futex_pi_state **ps, int set_waiters)
52400ba9 1074{
bab5bc9e 1075 struct futex_q *top_waiter = NULL;
52400ba9
DH
1076 u32 curval;
1077 int ret;
1078
1079 if (get_futex_value_locked(&curval, pifutex))
1080 return -EFAULT;
1081
bab5bc9e
DH
1082 /*
1083 * Find the top_waiter and determine if there are additional waiters.
1084 * If the caller intends to requeue more than 1 waiter to pifutex,
1085 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1086 * as we have means to handle the possible fault. If not, don't set
1087 * the bit unecessarily as it will force the subsequent unlock to enter
1088 * the kernel.
1089 */
52400ba9
DH
1090 top_waiter = futex_top_waiter(hb1, key1);
1091
1092 /* There are no waiters, nothing for us to do. */
1093 if (!top_waiter)
1094 return 0;
1095
84bc4af5
DH
1096 /* Ensure we requeue to the expected futex. */
1097 if (!match_futex(top_waiter->requeue_pi_key, key2))
1098 return -EINVAL;
1099
52400ba9 1100 /*
bab5bc9e
DH
1101 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1102 * the contended case or if set_waiters is 1. The pi_state is returned
1103 * in ps in contended cases.
52400ba9 1104 */
bab5bc9e
DH
1105 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1106 set_waiters);
52400ba9 1107 if (ret == 1)
beda2c7e 1108 requeue_pi_wake_futex(top_waiter, key2, hb2);
52400ba9
DH
1109
1110 return ret;
1111}
1112
1113/**
1114 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1115 * uaddr1: source futex user address
1116 * uaddr2: target futex user address
1117 * nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1118 * nr_requeue: number of waiters to requeue (0-INT_MAX)
1119 * requeue_pi: if we are attempting to requeue from a non-pi futex to a
1120 * pi futex (pi to pi requeue is not supported)
1121 *
1122 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1123 * uaddr2 atomically on behalf of the top waiter.
1124 *
1125 * Returns:
1126 * >=0 - on success, the number of tasks requeued or woken
1127 * <0 - on error
1da177e4 1128 */
c2f9f201 1129static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
52400ba9
DH
1130 int nr_wake, int nr_requeue, u32 *cmpval,
1131 int requeue_pi)
1da177e4 1132{
38d47c1b 1133 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
52400ba9
DH
1134 int drop_count = 0, task_count = 0, ret;
1135 struct futex_pi_state *pi_state = NULL;
e2970f2f 1136 struct futex_hash_bucket *hb1, *hb2;
ec92d082 1137 struct plist_head *head1;
1da177e4 1138 struct futex_q *this, *next;
52400ba9
DH
1139 u32 curval2;
1140
1141 if (requeue_pi) {
1142 /*
1143 * requeue_pi requires a pi_state, try to allocate it now
1144 * without any locks in case it fails.
1145 */
1146 if (refill_pi_state_cache())
1147 return -ENOMEM;
1148 /*
1149 * requeue_pi must wake as many tasks as it can, up to nr_wake
1150 * + nr_requeue, since it acquires the rt_mutex prior to
1151 * returning to userspace, so as to not leave the rt_mutex with
1152 * waiters and no owner. However, second and third wake-ups
1153 * cannot be predicted as they involve race conditions with the
1154 * first wake and a fault while looking up the pi_state. Both
1155 * pthread_cond_signal() and pthread_cond_broadcast() should
1156 * use nr_wake=1.
1157 */
1158 if (nr_wake != 1)
1159 return -EINVAL;
1160 }
1da177e4 1161
42d35d48 1162retry:
52400ba9
DH
1163 if (pi_state != NULL) {
1164 /*
1165 * We will have to lookup the pi_state again, so free this one
1166 * to keep the accounting correct.
1167 */
1168 free_pi_state(pi_state);
1169 pi_state = NULL;
1170 }
1171
64d1304a 1172 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1da177e4
LT
1173 if (unlikely(ret != 0))
1174 goto out;
521c1808
TG
1175 ret = get_futex_key(uaddr2, fshared, &key2,
1176 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1da177e4 1177 if (unlikely(ret != 0))
42d35d48 1178 goto out_put_key1;
1da177e4 1179
e2970f2f
IM
1180 hb1 = hash_futex(&key1);
1181 hb2 = hash_futex(&key2);
1da177e4 1182
e4dc5b7a 1183retry_private:
8b8f319f 1184 double_lock_hb(hb1, hb2);
1da177e4 1185
e2970f2f
IM
1186 if (likely(cmpval != NULL)) {
1187 u32 curval;
1da177e4 1188
e2970f2f 1189 ret = get_futex_value_locked(&curval, uaddr1);
1da177e4
LT
1190
1191 if (unlikely(ret)) {
5eb3dc62 1192 double_unlock_hb(hb1, hb2);
1da177e4 1193
e2970f2f 1194 ret = get_user(curval, uaddr1);
e4dc5b7a
DH
1195 if (ret)
1196 goto out_put_keys;
1da177e4 1197
e4dc5b7a
DH
1198 if (!fshared)
1199 goto retry_private;
1da177e4 1200
e4dc5b7a
DH
1201 put_futex_key(fshared, &key2);
1202 put_futex_key(fshared, &key1);
1203 goto retry;
1da177e4 1204 }
e2970f2f 1205 if (curval != *cmpval) {
1da177e4
LT
1206 ret = -EAGAIN;
1207 goto out_unlock;
1208 }
1209 }
1210
52400ba9 1211 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
bab5bc9e
DH
1212 /*
1213 * Attempt to acquire uaddr2 and wake the top waiter. If we
1214 * intend to requeue waiters, force setting the FUTEX_WAITERS
1215 * bit. We force this here where we are able to easily handle
1216 * faults rather in the requeue loop below.
1217 */
52400ba9 1218 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
bab5bc9e 1219 &key2, &pi_state, nr_requeue);
52400ba9
DH
1220
1221 /*
1222 * At this point the top_waiter has either taken uaddr2 or is
1223 * waiting on it. If the former, then the pi_state will not
1224 * exist yet, look it up one more time to ensure we have a
1225 * reference to it.
1226 */
1227 if (ret == 1) {
1228 WARN_ON(pi_state);
89061d3d 1229 drop_count++;
52400ba9
DH
1230 task_count++;
1231 ret = get_futex_value_locked(&curval2, uaddr2);
1232 if (!ret)
1233 ret = lookup_pi_state(curval2, hb2, &key2,
1234 &pi_state);
1235 }
1236
1237 switch (ret) {
1238 case 0:
1239 break;
1240 case -EFAULT:
1241 double_unlock_hb(hb1, hb2);
1242 put_futex_key(fshared, &key2);
1243 put_futex_key(fshared, &key1);
d0725992 1244 ret = fault_in_user_writeable(uaddr2);
52400ba9
DH
1245 if (!ret)
1246 goto retry;
1247 goto out;
1248 case -EAGAIN:
1249 /* The owner was exiting, try again. */
1250 double_unlock_hb(hb1, hb2);
1251 put_futex_key(fshared, &key2);
1252 put_futex_key(fshared, &key1);
1253 cond_resched();
1254 goto retry;
1255 default:
1256 goto out_unlock;
1257 }
1258 }
1259
e2970f2f 1260 head1 = &hb1->chain;
ec92d082 1261 plist_for_each_entry_safe(this, next, head1, list) {
52400ba9
DH
1262 if (task_count - nr_wake >= nr_requeue)
1263 break;
1264
1265 if (!match_futex(&this->key, &key1))
1da177e4 1266 continue;
52400ba9 1267
392741e0
DH
1268 /*
1269 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1270 * be paired with each other and no other futex ops.
1271 */
1272 if ((requeue_pi && !this->rt_waiter) ||
1273 (!requeue_pi && this->rt_waiter)) {
1274 ret = -EINVAL;
1275 break;
1276 }
52400ba9
DH
1277
1278 /*
1279 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1280 * lock, we already woke the top_waiter. If not, it will be
1281 * woken by futex_unlock_pi().
1282 */
1283 if (++task_count <= nr_wake && !requeue_pi) {
1da177e4 1284 wake_futex(this);
52400ba9
DH
1285 continue;
1286 }
1da177e4 1287
84bc4af5
DH
1288 /* Ensure we requeue to the expected futex for requeue_pi. */
1289 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1290 ret = -EINVAL;
1291 break;
1292 }
1293
52400ba9
DH
1294 /*
1295 * Requeue nr_requeue waiters and possibly one more in the case
1296 * of requeue_pi if we couldn't acquire the lock atomically.
1297 */
1298 if (requeue_pi) {
1299 /* Prepare the waiter to take the rt_mutex. */
1300 atomic_inc(&pi_state->refcount);
1301 this->pi_state = pi_state;
1302 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1303 this->rt_waiter,
1304 this->task, 1);
1305 if (ret == 1) {
1306 /* We got the lock. */
beda2c7e 1307 requeue_pi_wake_futex(this, &key2, hb2);
89061d3d 1308 drop_count++;
52400ba9
DH
1309 continue;
1310 } else if (ret) {
1311 /* -EDEADLK */
1312 this->pi_state = NULL;
1313 free_pi_state(pi_state);
1314 goto out_unlock;
1315 }
1da177e4 1316 }
52400ba9
DH
1317 requeue_futex(this, hb1, hb2, &key2);
1318 drop_count++;
1da177e4
LT
1319 }
1320
1321out_unlock:
5eb3dc62 1322 double_unlock_hb(hb1, hb2);
1da177e4 1323
cd84a42f
DH
1324 /*
1325 * drop_futex_key_refs() must be called outside the spinlocks. During
1326 * the requeue we moved futex_q's from the hash bucket at key1 to the
1327 * one at key2 and updated their key pointer. We no longer need to
1328 * hold the references to key1.
1329 */
1da177e4 1330 while (--drop_count >= 0)
9adef58b 1331 drop_futex_key_refs(&key1);
1da177e4 1332
42d35d48 1333out_put_keys:
38d47c1b 1334 put_futex_key(fshared, &key2);
42d35d48 1335out_put_key1:
38d47c1b 1336 put_futex_key(fshared, &key1);
42d35d48 1337out:
52400ba9
DH
1338 if (pi_state != NULL)
1339 free_pi_state(pi_state);
1340 return ret ? ret : task_count;
1da177e4
LT
1341}
1342
1343/* The key must be already stored in q->key. */
82af7aca 1344static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1da177e4 1345{
e2970f2f 1346 struct futex_hash_bucket *hb;
1da177e4 1347
9adef58b 1348 get_futex_key_refs(&q->key);
e2970f2f
IM
1349 hb = hash_futex(&q->key);
1350 q->lock_ptr = &hb->lock;
1da177e4 1351
e2970f2f
IM
1352 spin_lock(&hb->lock);
1353 return hb;
1da177e4
LT
1354}
1355
d40d65c8
DH
1356static inline void
1357queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1358{
1359 spin_unlock(&hb->lock);
1360 drop_futex_key_refs(&q->key);
1361}
1362
1363/**
1364 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1365 * @q: The futex_q to enqueue
1366 * @hb: The destination hash bucket
1367 *
1368 * The hb->lock must be held by the caller, and is released here. A call to
1369 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1370 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1371 * or nothing if the unqueue is done as part of the wake process and the unqueue
1372 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1373 * an example).
1374 */
82af7aca 1375static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1da177e4 1376{
ec92d082
PP
1377 int prio;
1378
1379 /*
1380 * The priority used to register this element is
1381 * - either the real thread-priority for the real-time threads
1382 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1383 * - or MAX_RT_PRIO for non-RT threads.
1384 * Thus, all RT-threads are woken first in priority order, and
1385 * the others are woken last, in FIFO order.
1386 */
1387 prio = min(current->normal_prio, MAX_RT_PRIO);
1388
1389 plist_node_init(&q->list, prio);
1390#ifdef CONFIG_DEBUG_PI_LIST
1391 q->list.plist.lock = &hb->lock;
1392#endif
1393 plist_add(&q->list, &hb->chain);
c87e2837 1394 q->task = current;
e2970f2f 1395 spin_unlock(&hb->lock);
1da177e4
LT
1396}
1397
d40d65c8
DH
1398/**
1399 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1400 * @q: The futex_q to unqueue
1401 *
1402 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1403 * be paired with exactly one earlier call to queue_me().
1404 *
1405 * Returns:
1406 * 1 - if the futex_q was still queued (and we removed unqueued it)
1407 * 0 - if the futex_q was already removed by the waking thread
1da177e4 1408 */
1da177e4
LT
1409static int unqueue_me(struct futex_q *q)
1410{
1da177e4 1411 spinlock_t *lock_ptr;
e2970f2f 1412 int ret = 0;
1da177e4
LT
1413
1414 /* In the common case we don't take the spinlock, which is nice. */
42d35d48 1415retry:
1da177e4 1416 lock_ptr = q->lock_ptr;
e91467ec 1417 barrier();
c80544dc 1418 if (lock_ptr != NULL) {
1da177e4
LT
1419 spin_lock(lock_ptr);
1420 /*
1421 * q->lock_ptr can change between reading it and
1422 * spin_lock(), causing us to take the wrong lock. This
1423 * corrects the race condition.
1424 *
1425 * Reasoning goes like this: if we have the wrong lock,
1426 * q->lock_ptr must have changed (maybe several times)
1427 * between reading it and the spin_lock(). It can
1428 * change again after the spin_lock() but only if it was
1429 * already changed before the spin_lock(). It cannot,
1430 * however, change back to the original value. Therefore
1431 * we can detect whether we acquired the correct lock.
1432 */
1433 if (unlikely(lock_ptr != q->lock_ptr)) {
1434 spin_unlock(lock_ptr);
1435 goto retry;
1436 }
ec92d082
PP
1437 WARN_ON(plist_node_empty(&q->list));
1438 plist_del(&q->list, &q->list.plist);
c87e2837
IM
1439
1440 BUG_ON(q->pi_state);
1441
1da177e4
LT
1442 spin_unlock(lock_ptr);
1443 ret = 1;
1444 }
1445
9adef58b 1446 drop_futex_key_refs(&q->key);
1da177e4
LT
1447 return ret;
1448}
1449
c87e2837
IM
1450/*
1451 * PI futexes can not be requeued and must remove themself from the
d0aa7a70
PP
1452 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1453 * and dropped here.
c87e2837 1454 */
d0aa7a70 1455static void unqueue_me_pi(struct futex_q *q)
c87e2837 1456{
ec92d082
PP
1457 WARN_ON(plist_node_empty(&q->list));
1458 plist_del(&q->list, &q->list.plist);
c87e2837
IM
1459
1460 BUG_ON(!q->pi_state);
1461 free_pi_state(q->pi_state);
1462 q->pi_state = NULL;
1463
d0aa7a70 1464 spin_unlock(q->lock_ptr);
c87e2837 1465
9adef58b 1466 drop_futex_key_refs(&q->key);
c87e2837
IM
1467}
1468
d0aa7a70 1469/*
cdf71a10 1470 * Fixup the pi_state owner with the new owner.
d0aa7a70 1471 *
778e9a9c
AK
1472 * Must be called with hash bucket lock held and mm->sem held for non
1473 * private futexes.
d0aa7a70 1474 */
778e9a9c 1475static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
c2f9f201 1476 struct task_struct *newowner, int fshared)
d0aa7a70 1477{
cdf71a10 1478 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
d0aa7a70 1479 struct futex_pi_state *pi_state = q->pi_state;
1b7558e4 1480 struct task_struct *oldowner = pi_state->owner;
d0aa7a70 1481 u32 uval, curval, newval;
e4dc5b7a 1482 int ret;
d0aa7a70
PP
1483
1484 /* Owner died? */
1b7558e4
TG
1485 if (!pi_state->owner)
1486 newtid |= FUTEX_OWNER_DIED;
1487
1488 /*
1489 * We are here either because we stole the rtmutex from the
1490 * pending owner or we are the pending owner which failed to
1491 * get the rtmutex. We have to replace the pending owner TID
1492 * in the user space variable. This must be atomic as we have
1493 * to preserve the owner died bit here.
1494 *
b2d0994b
DH
1495 * Note: We write the user space value _before_ changing the pi_state
1496 * because we can fault here. Imagine swapped out pages or a fork
1497 * that marked all the anonymous memory readonly for cow.
1b7558e4
TG
1498 *
1499 * Modifying pi_state _before_ the user space value would
1500 * leave the pi_state in an inconsistent state when we fault
1501 * here, because we need to drop the hash bucket lock to
1502 * handle the fault. This might be observed in the PID check
1503 * in lookup_pi_state.
1504 */
1505retry:
1506 if (get_futex_value_locked(&uval, uaddr))
1507 goto handle_fault;
1508
1509 while (1) {
1510 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1511
1512 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1513
1514 if (curval == -EFAULT)
1515 goto handle_fault;
1516 if (curval == uval)
1517 break;
1518 uval = curval;
1519 }
1520
1521 /*
1522 * We fixed up user space. Now we need to fix the pi_state
1523 * itself.
1524 */
d0aa7a70
PP
1525 if (pi_state->owner != NULL) {
1526 spin_lock_irq(&pi_state->owner->pi_lock);
1527 WARN_ON(list_empty(&pi_state->list));
1528 list_del_init(&pi_state->list);
1529 spin_unlock_irq(&pi_state->owner->pi_lock);
1b7558e4 1530 }
d0aa7a70 1531
cdf71a10 1532 pi_state->owner = newowner;
d0aa7a70 1533
cdf71a10 1534 spin_lock_irq(&newowner->pi_lock);
d0aa7a70 1535 WARN_ON(!list_empty(&pi_state->list));
cdf71a10
TG
1536 list_add(&pi_state->list, &newowner->pi_state_list);
1537 spin_unlock_irq(&newowner->pi_lock);
1b7558e4 1538 return 0;
d0aa7a70 1539
d0aa7a70 1540 /*
1b7558e4
TG
1541 * To handle the page fault we need to drop the hash bucket
1542 * lock here. That gives the other task (either the pending
1543 * owner itself or the task which stole the rtmutex) the
1544 * chance to try the fixup of the pi_state. So once we are
1545 * back from handling the fault we need to check the pi_state
1546 * after reacquiring the hash bucket lock and before trying to
1547 * do another fixup. When the fixup has been done already we
1548 * simply return.
d0aa7a70 1549 */
1b7558e4
TG
1550handle_fault:
1551 spin_unlock(q->lock_ptr);
778e9a9c 1552
d0725992 1553 ret = fault_in_user_writeable(uaddr);
778e9a9c 1554
1b7558e4 1555 spin_lock(q->lock_ptr);
778e9a9c 1556
1b7558e4
TG
1557 /*
1558 * Check if someone else fixed it for us:
1559 */
1560 if (pi_state->owner != oldowner)
1561 return 0;
1562
1563 if (ret)
1564 return ret;
1565
1566 goto retry;
d0aa7a70
PP
1567}
1568
34f01cc1
ED
1569/*
1570 * In case we must use restart_block to restart a futex_wait,
ce6bd420 1571 * we encode in the 'flags' shared capability
34f01cc1 1572 */
1acdac10
TG
1573#define FLAGS_SHARED 0x01
1574#define FLAGS_CLOCKRT 0x02
a72188d8 1575#define FLAGS_HAS_TIMEOUT 0x04
34f01cc1 1576
72c1bbf3 1577static long futex_wait_restart(struct restart_block *restart);
36cf3b5c 1578
dd973998
DH
1579/**
1580 * fixup_owner() - Post lock pi_state and corner case management
1581 * @uaddr: user address of the futex
1582 * @fshared: whether the futex is shared (1) or not (0)
1583 * @q: futex_q (contains pi_state and access to the rt_mutex)
1584 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
1585 *
1586 * After attempting to lock an rt_mutex, this function is called to cleanup
1587 * the pi_state owner as well as handle race conditions that may allow us to
1588 * acquire the lock. Must be called with the hb lock held.
1589 *
1590 * Returns:
1591 * 1 - success, lock taken
1592 * 0 - success, lock not taken
1593 * <0 - on error (-EFAULT)
1594 */
1595static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1596 int locked)
1597{
1598 struct task_struct *owner;
1599 int ret = 0;
1600
1601 if (locked) {
1602 /*
1603 * Got the lock. We might not be the anticipated owner if we
1604 * did a lock-steal - fix up the PI-state in that case:
1605 */
1606 if (q->pi_state->owner != current)
1607 ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1608 goto out;
1609 }
1610
1611 /*
1612 * Catch the rare case, where the lock was released when we were on the
1613 * way back before we locked the hash bucket.
1614 */
1615 if (q->pi_state->owner == current) {
1616 /*
1617 * Try to get the rt_mutex now. This might fail as some other
1618 * task acquired the rt_mutex after we removed ourself from the
1619 * rt_mutex waiters list.
1620 */
1621 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1622 locked = 1;
1623 goto out;
1624 }
1625
1626 /*
1627 * pi_state is incorrect, some other task did a lock steal and
1628 * we returned due to timeout or signal without taking the
1629 * rt_mutex. Too late. We can access the rt_mutex_owner without
1630 * locking, as the other task is now blocked on the hash bucket
1631 * lock. Fix the state up.
1632 */
1633 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1634 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1635 goto out;
1636 }
1637
1638 /*
1639 * Paranoia check. If we did not take the lock, then we should not be
1640 * the owner, nor the pending owner, of the rt_mutex.
1641 */
1642 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1643 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1644 "pi-state %p\n", ret,
1645 q->pi_state->pi_mutex.owner,
1646 q->pi_state->owner);
1647
1648out:
1649 return ret ? ret : locked;
1650}
1651
ca5f9524
DH
1652/**
1653 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1654 * @hb: the futex hash bucket, must be locked by the caller
1655 * @q: the futex_q to queue up on
1656 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
ca5f9524
DH
1657 */
1658static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
f1a11e05 1659 struct hrtimer_sleeper *timeout)
ca5f9524 1660{
9beba3c5
DH
1661 /*
1662 * The task state is guaranteed to be set before another task can
1663 * wake it. set_current_state() is implemented using set_mb() and
1664 * queue_me() calls spin_unlock() upon completion, both serializing
1665 * access to the hash list and forcing another memory barrier.
1666 */
f1a11e05 1667 set_current_state(TASK_INTERRUPTIBLE);
0729e196 1668 queue_me(q, hb);
ca5f9524
DH
1669
1670 /* Arm the timer */
1671 if (timeout) {
1672 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1673 if (!hrtimer_active(&timeout->timer))
1674 timeout->task = NULL;
1675 }
1676
1677 /*
0729e196
DH
1678 * If we have been removed from the hash list, then another task
1679 * has tried to wake us, and we can skip the call to schedule().
ca5f9524
DH
1680 */
1681 if (likely(!plist_node_empty(&q->list))) {
1682 /*
1683 * If the timer has already expired, current will already be
1684 * flagged for rescheduling. Only call schedule if there
1685 * is no timeout, or if it has yet to expire.
1686 */
1687 if (!timeout || timeout->task)
1688 schedule();
1689 }
1690 __set_current_state(TASK_RUNNING);
1691}
1692
f801073f
DH
1693/**
1694 * futex_wait_setup() - Prepare to wait on a futex
1695 * @uaddr: the futex userspace address
1696 * @val: the expected value
1697 * @fshared: whether the futex is shared (1) or not (0)
1698 * @q: the associated futex_q
1699 * @hb: storage for hash_bucket pointer to be returned to caller
1700 *
1701 * Setup the futex_q and locate the hash_bucket. Get the futex value and
1702 * compare it with the expected value. Handle atomic faults internally.
1703 * Return with the hb lock held and a q.key reference on success, and unlocked
1704 * with no q.key reference on failure.
1705 *
1706 * Returns:
1707 * 0 - uaddr contains val and hb has been locked
1708 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1709 */
1710static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
1711 struct futex_q *q, struct futex_hash_bucket **hb)
1da177e4 1712{
e2970f2f
IM
1713 u32 uval;
1714 int ret;
1da177e4 1715
1da177e4 1716 /*
b2d0994b 1717 * Access the page AFTER the hash-bucket is locked.
1da177e4
LT
1718 * Order is important:
1719 *
1720 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1721 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1722 *
1723 * The basic logical guarantee of a futex is that it blocks ONLY
1724 * if cond(var) is known to be true at the time of blocking, for
1725 * any cond. If we queued after testing *uaddr, that would open
1726 * a race condition where we could block indefinitely with
1727 * cond(var) false, which would violate the guarantee.
1728 *
1729 * A consequence is that futex_wait() can return zero and absorb
1730 * a wakeup when *uaddr != val on entry to the syscall. This is
1731 * rare, but normal.
1da177e4 1732 */
f801073f
DH
1733retry:
1734 q->key = FUTEX_KEY_INIT;
521c1808 1735 ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
f801073f 1736 if (unlikely(ret != 0))
a5a2a0c7 1737 return ret;
f801073f
DH
1738
1739retry_private:
1740 *hb = queue_lock(q);
1741
e2970f2f 1742 ret = get_futex_value_locked(&uval, uaddr);
1da177e4 1743
f801073f
DH
1744 if (ret) {
1745 queue_unlock(q, *hb);
1da177e4 1746
e2970f2f 1747 ret = get_user(uval, uaddr);
e4dc5b7a 1748 if (ret)
f801073f 1749 goto out;
1da177e4 1750
e4dc5b7a
DH
1751 if (!fshared)
1752 goto retry_private;
1753
f801073f 1754 put_futex_key(fshared, &q->key);
e4dc5b7a 1755 goto retry;
1da177e4 1756 }
ca5f9524 1757
f801073f
DH
1758 if (uval != val) {
1759 queue_unlock(q, *hb);
1760 ret = -EWOULDBLOCK;
2fff78c7 1761 }
1da177e4 1762
f801073f
DH
1763out:
1764 if (ret)
1765 put_futex_key(fshared, &q->key);
1766 return ret;
1767}
1768
1769static int futex_wait(u32 __user *uaddr, int fshared,
1770 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1771{
1772 struct hrtimer_sleeper timeout, *to = NULL;
f801073f
DH
1773 struct restart_block *restart;
1774 struct futex_hash_bucket *hb;
1775 struct futex_q q;
1776 int ret;
1777
1778 if (!bitset)
1779 return -EINVAL;
1780
1781 q.pi_state = NULL;
1782 q.bitset = bitset;
52400ba9 1783 q.rt_waiter = NULL;
84bc4af5 1784 q.requeue_pi_key = NULL;
f801073f
DH
1785
1786 if (abs_time) {
1787 to = &timeout;
1788
1789 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
1790 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1791 hrtimer_init_sleeper(to, current);
1792 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1793 current->timer_slack_ns);
1794 }
1795
d58e6576 1796retry:
f801073f
DH
1797 /* Prepare to wait on uaddr. */
1798 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1799 if (ret)
1800 goto out;
1801
ca5f9524 1802 /* queue_me and wait for wakeup, timeout, or a signal. */
f1a11e05 1803 futex_wait_queue_me(hb, &q, to);
1da177e4
LT
1804
1805 /* If we were woken (and unqueued), we succeeded, whatever. */
2fff78c7 1806 ret = 0;
1da177e4 1807 if (!unqueue_me(&q))
2fff78c7
PZ
1808 goto out_put_key;
1809 ret = -ETIMEDOUT;
ca5f9524 1810 if (to && !to->task)
2fff78c7 1811 goto out_put_key;
72c1bbf3 1812
e2970f2f 1813 /*
d58e6576
TG
1814 * We expect signal_pending(current), but we might be the
1815 * victim of a spurious wakeup as well.
e2970f2f 1816 */
d58e6576
TG
1817 if (!signal_pending(current)) {
1818 put_futex_key(fshared, &q.key);
1819 goto retry;
1820 }
1821
2fff78c7 1822 ret = -ERESTARTSYS;
c19384b5 1823 if (!abs_time)
2fff78c7 1824 goto out_put_key;
1da177e4 1825
2fff78c7
PZ
1826 restart = &current_thread_info()->restart_block;
1827 restart->fn = futex_wait_restart;
1828 restart->futex.uaddr = (u32 *)uaddr;
1829 restart->futex.val = val;
1830 restart->futex.time = abs_time->tv64;
1831 restart->futex.bitset = bitset;
a72188d8 1832 restart->futex.flags = FLAGS_HAS_TIMEOUT;
2fff78c7
PZ
1833
1834 if (fshared)
1835 restart->futex.flags |= FLAGS_SHARED;
1836 if (clockrt)
1837 restart->futex.flags |= FLAGS_CLOCKRT;
42d35d48 1838
2fff78c7
PZ
1839 ret = -ERESTART_RESTARTBLOCK;
1840
1841out_put_key:
1842 put_futex_key(fshared, &q.key);
42d35d48 1843out:
ca5f9524
DH
1844 if (to) {
1845 hrtimer_cancel(&to->timer);
1846 destroy_hrtimer_on_stack(&to->timer);
1847 }
c87e2837
IM
1848 return ret;
1849}
1850
72c1bbf3
NP
1851
1852static long futex_wait_restart(struct restart_block *restart)
1853{
ce6bd420 1854 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
c2f9f201 1855 int fshared = 0;
a72188d8 1856 ktime_t t, *tp = NULL;
72c1bbf3 1857
a72188d8
DH
1858 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1859 t.tv64 = restart->futex.time;
1860 tp = &t;
1861 }
72c1bbf3 1862 restart->fn = do_no_restart_syscall;
ce6bd420 1863 if (restart->futex.flags & FLAGS_SHARED)
c2f9f201 1864 fshared = 1;
a72188d8 1865 return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
1acdac10
TG
1866 restart->futex.bitset,
1867 restart->futex.flags & FLAGS_CLOCKRT);
72c1bbf3
NP
1868}
1869
1870
c87e2837
IM
1871/*
1872 * Userspace tried a 0 -> TID atomic transition of the futex value
1873 * and failed. The kernel side here does the whole locking operation:
1874 * if there are waiters then it will block, it does PI, etc. (Due to
1875 * races the kernel might see a 0 value of the futex too.)
1876 */
c2f9f201 1877static int futex_lock_pi(u32 __user *uaddr, int fshared,
34f01cc1 1878 int detect, ktime_t *time, int trylock)
c87e2837 1879{
c5780e97 1880 struct hrtimer_sleeper timeout, *to = NULL;
c87e2837 1881 struct futex_hash_bucket *hb;
c87e2837 1882 struct futex_q q;
dd973998 1883 int res, ret;
c87e2837
IM
1884
1885 if (refill_pi_state_cache())
1886 return -ENOMEM;
1887
c19384b5 1888 if (time) {
c5780e97 1889 to = &timeout;
237fc6e7
TG
1890 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1891 HRTIMER_MODE_ABS);
c5780e97 1892 hrtimer_init_sleeper(to, current);
cc584b21 1893 hrtimer_set_expires(&to->timer, *time);
c5780e97
TG
1894 }
1895
c87e2837 1896 q.pi_state = NULL;
52400ba9 1897 q.rt_waiter = NULL;
84bc4af5 1898 q.requeue_pi_key = NULL;
42d35d48 1899retry:
38d47c1b 1900 q.key = FUTEX_KEY_INIT;
64d1304a 1901 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
c87e2837 1902 if (unlikely(ret != 0))
42d35d48 1903 goto out;
c87e2837 1904
e4dc5b7a 1905retry_private:
82af7aca 1906 hb = queue_lock(&q);
c87e2837 1907
bab5bc9e 1908 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
c87e2837 1909 if (unlikely(ret)) {
778e9a9c 1910 switch (ret) {
1a52084d
DH
1911 case 1:
1912 /* We got the lock. */
1913 ret = 0;
1914 goto out_unlock_put_key;
1915 case -EFAULT:
1916 goto uaddr_faulted;
778e9a9c
AK
1917 case -EAGAIN:
1918 /*
1919 * Task is exiting and we just wait for the
1920 * exit to complete.
1921 */
1922 queue_unlock(&q, hb);
de87fcc1 1923 put_futex_key(fshared, &q.key);
778e9a9c
AK
1924 cond_resched();
1925 goto retry;
778e9a9c 1926 default:
42d35d48 1927 goto out_unlock_put_key;
c87e2837 1928 }
c87e2837
IM
1929 }
1930
1931 /*
1932 * Only actually queue now that the atomic ops are done:
1933 */
82af7aca 1934 queue_me(&q, hb);
c87e2837 1935
c87e2837
IM
1936 WARN_ON(!q.pi_state);
1937 /*
1938 * Block on the PI mutex:
1939 */
1940 if (!trylock)
1941 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1942 else {
1943 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1944 /* Fixup the trylock return value: */
1945 ret = ret ? 0 : -EWOULDBLOCK;
1946 }
1947
a99e4e41 1948 spin_lock(q.lock_ptr);
dd973998
DH
1949 /*
1950 * Fixup the pi_state owner and possibly acquire the lock if we
1951 * haven't already.
1952 */
1953 res = fixup_owner(uaddr, fshared, &q, !ret);
1954 /*
1955 * If fixup_owner() returned an error, proprogate that. If it acquired
1956 * the lock, clear our -ETIMEDOUT or -EINTR.
1957 */
1958 if (res)
1959 ret = (res < 0) ? res : 0;
c87e2837 1960
e8f6386c 1961 /*
dd973998
DH
1962 * If fixup_owner() faulted and was unable to handle the fault, unlock
1963 * it and return the fault to userspace.
e8f6386c
DH
1964 */
1965 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1966 rt_mutex_unlock(&q.pi_state->pi_mutex);
1967
778e9a9c
AK
1968 /* Unqueue and drop the lock */
1969 unqueue_me_pi(&q);
c87e2837 1970
dd973998 1971 goto out;
c87e2837 1972
42d35d48 1973out_unlock_put_key:
c87e2837
IM
1974 queue_unlock(&q, hb);
1975
42d35d48 1976out_put_key:
38d47c1b 1977 put_futex_key(fshared, &q.key);
42d35d48 1978out:
237fc6e7
TG
1979 if (to)
1980 destroy_hrtimer_on_stack(&to->timer);
dd973998 1981 return ret != -EINTR ? ret : -ERESTARTNOINTR;
c87e2837 1982
42d35d48 1983uaddr_faulted:
778e9a9c
AK
1984 queue_unlock(&q, hb);
1985
d0725992 1986 ret = fault_in_user_writeable(uaddr);
e4dc5b7a
DH
1987 if (ret)
1988 goto out_put_key;
c87e2837 1989
e4dc5b7a
DH
1990 if (!fshared)
1991 goto retry_private;
1992
1993 put_futex_key(fshared, &q.key);
1994 goto retry;
c87e2837
IM
1995}
1996
c87e2837
IM
1997/*
1998 * Userspace attempted a TID -> 0 atomic transition, and failed.
1999 * This is the in-kernel slowpath: we look up the PI state (if any),
2000 * and do the rt-mutex unlock.
2001 */
c2f9f201 2002static int futex_unlock_pi(u32 __user *uaddr, int fshared)
c87e2837
IM
2003{
2004 struct futex_hash_bucket *hb;
2005 struct futex_q *this, *next;
2006 u32 uval;
ec92d082 2007 struct plist_head *head;
38d47c1b 2008 union futex_key key = FUTEX_KEY_INIT;
e4dc5b7a 2009 int ret;
c87e2837
IM
2010
2011retry:
2012 if (get_user(uval, uaddr))
2013 return -EFAULT;
2014 /*
2015 * We release only a lock we actually own:
2016 */
b488893a 2017 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
c87e2837 2018 return -EPERM;
c87e2837 2019
64d1304a 2020 ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
c87e2837
IM
2021 if (unlikely(ret != 0))
2022 goto out;
2023
2024 hb = hash_futex(&key);
2025 spin_lock(&hb->lock);
2026
c87e2837
IM
2027 /*
2028 * To avoid races, try to do the TID -> 0 atomic transition
2029 * again. If it succeeds then we can return without waking
2030 * anyone else up:
2031 */
36cf3b5c 2032 if (!(uval & FUTEX_OWNER_DIED))
b488893a 2033 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
36cf3b5c 2034
c87e2837
IM
2035
2036 if (unlikely(uval == -EFAULT))
2037 goto pi_faulted;
2038 /*
2039 * Rare case: we managed to release the lock atomically,
2040 * no need to wake anyone else up:
2041 */
b488893a 2042 if (unlikely(uval == task_pid_vnr(current)))
c87e2837
IM
2043 goto out_unlock;
2044
2045 /*
2046 * Ok, other tasks may need to be woken up - check waiters
2047 * and do the wakeup if necessary:
2048 */
2049 head = &hb->chain;
2050
ec92d082 2051 plist_for_each_entry_safe(this, next, head, list) {
c87e2837
IM
2052 if (!match_futex (&this->key, &key))
2053 continue;
2054 ret = wake_futex_pi(uaddr, uval, this);
2055 /*
2056 * The atomic access to the futex value
2057 * generated a pagefault, so retry the
2058 * user-access and the wakeup:
2059 */
2060 if (ret == -EFAULT)
2061 goto pi_faulted;
2062 goto out_unlock;
2063 }
2064 /*
2065 * No waiters - kernel unlocks the futex:
2066 */
e3f2ddea
IM
2067 if (!(uval & FUTEX_OWNER_DIED)) {
2068 ret = unlock_futex_pi(uaddr, uval);
2069 if (ret == -EFAULT)
2070 goto pi_faulted;
2071 }
c87e2837
IM
2072
2073out_unlock:
2074 spin_unlock(&hb->lock);
38d47c1b 2075 put_futex_key(fshared, &key);
c87e2837 2076
42d35d48 2077out:
c87e2837
IM
2078 return ret;
2079
2080pi_faulted:
778e9a9c 2081 spin_unlock(&hb->lock);
e4dc5b7a 2082 put_futex_key(fshared, &key);
c87e2837 2083
d0725992 2084 ret = fault_in_user_writeable(uaddr);
b5686363 2085 if (!ret)
c87e2837
IM
2086 goto retry;
2087
1da177e4
LT
2088 return ret;
2089}
2090
52400ba9
DH
2091/**
2092 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2093 * @hb: the hash_bucket futex_q was original enqueued on
2094 * @q: the futex_q woken while waiting to be requeued
2095 * @key2: the futex_key of the requeue target futex
2096 * @timeout: the timeout associated with the wait (NULL if none)
2097 *
2098 * Detect if the task was woken on the initial futex as opposed to the requeue
2099 * target futex. If so, determine if it was a timeout or a signal that caused
2100 * the wakeup and return the appropriate error code to the caller. Must be
2101 * called with the hb lock held.
2102 *
2103 * Returns
2104 * 0 - no early wakeup detected
1c840c14 2105 * <0 - -ETIMEDOUT or -ERESTARTNOINTR
52400ba9
DH
2106 */
2107static inline
2108int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2109 struct futex_q *q, union futex_key *key2,
2110 struct hrtimer_sleeper *timeout)
2111{
2112 int ret = 0;
2113
2114 /*
2115 * With the hb lock held, we avoid races while we process the wakeup.
2116 * We only need to hold hb (and not hb2) to ensure atomicity as the
2117 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2118 * It can't be requeued from uaddr2 to something else since we don't
2119 * support a PI aware source futex for requeue.
2120 */
2121 if (!match_futex(&q->key, key2)) {
2122 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2123 /*
2124 * We were woken prior to requeue by a timeout or a signal.
2125 * Unqueue the futex_q and determine which it was.
2126 */
2127 plist_del(&q->list, &q->list.plist);
52400ba9 2128
d58e6576
TG
2129 /* Handle spurious wakeups gracefully */
2130 ret = -EAGAIN;
52400ba9
DH
2131 if (timeout && !timeout->task)
2132 ret = -ETIMEDOUT;
d58e6576 2133 else if (signal_pending(current))
1c840c14 2134 ret = -ERESTARTNOINTR;
52400ba9
DH
2135 }
2136 return ret;
2137}
2138
2139/**
2140 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
56ec1607 2141 * @uaddr: the futex we initially wait on (non-pi)
52400ba9
DH
2142 * @fshared: whether the futexes are shared (1) or not (0). They must be
2143 * the same type, no requeueing from private to shared, etc.
2144 * @val: the expected value of uaddr
2145 * @abs_time: absolute timeout
56ec1607 2146 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
52400ba9
DH
2147 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2148 * @uaddr2: the pi futex we will take prior to returning to user-space
2149 *
2150 * The caller will wait on uaddr and will be requeued by futex_requeue() to
2151 * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
2152 * complete the acquisition of the rt_mutex prior to returning to userspace.
2153 * This ensures the rt_mutex maintains an owner when it has waiters; without
2154 * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2155 * need to.
2156 *
2157 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2158 * via the following:
2159 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
cc6db4e6
DH
2160 * 2) wakeup on uaddr2 after a requeue
2161 * 3) signal
2162 * 4) timeout
52400ba9 2163 *
cc6db4e6 2164 * If 3, cleanup and return -ERESTARTNOINTR.
52400ba9
DH
2165 *
2166 * If 2, we may then block on trying to take the rt_mutex and return via:
2167 * 5) successful lock
2168 * 6) signal
2169 * 7) timeout
2170 * 8) other lock acquisition failure
2171 *
cc6db4e6 2172 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
52400ba9
DH
2173 *
2174 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2175 *
2176 * Returns:
2177 * 0 - On success
2178 * <0 - On error
2179 */
2180static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2181 u32 val, ktime_t *abs_time, u32 bitset,
2182 int clockrt, u32 __user *uaddr2)
2183{
2184 struct hrtimer_sleeper timeout, *to = NULL;
2185 struct rt_mutex_waiter rt_waiter;
2186 struct rt_mutex *pi_mutex = NULL;
52400ba9
DH
2187 struct futex_hash_bucket *hb;
2188 union futex_key key2;
2189 struct futex_q q;
2190 int res, ret;
52400ba9
DH
2191
2192 if (!bitset)
2193 return -EINVAL;
2194
2195 if (abs_time) {
2196 to = &timeout;
2197 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
2198 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2199 hrtimer_init_sleeper(to, current);
2200 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2201 current->timer_slack_ns);
2202 }
2203
2204 /*
2205 * The waiter is allocated on our stack, manipulated by the requeue
2206 * code while we sleep on uaddr.
2207 */
2208 debug_rt_mutex_init_waiter(&rt_waiter);
2209 rt_waiter.task = NULL;
2210
d58e6576 2211retry:
52400ba9 2212 key2 = FUTEX_KEY_INIT;
521c1808 2213 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
52400ba9
DH
2214 if (unlikely(ret != 0))
2215 goto out;
2216
84bc4af5
DH
2217 q.pi_state = NULL;
2218 q.bitset = bitset;
2219 q.rt_waiter = &rt_waiter;
2220 q.requeue_pi_key = &key2;
2221
52400ba9
DH
2222 /* Prepare to wait on uaddr. */
2223 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
c8b15a70
TG
2224 if (ret)
2225 goto out_key2;
52400ba9
DH
2226
2227 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
f1a11e05 2228 futex_wait_queue_me(hb, &q, to);
52400ba9
DH
2229
2230 spin_lock(&hb->lock);
2231 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2232 spin_unlock(&hb->lock);
2233 if (ret)
2234 goto out_put_keys;
2235
2236 /*
2237 * In order for us to be here, we know our q.key == key2, and since
2238 * we took the hb->lock above, we also know that futex_requeue() has
2239 * completed and we no longer have to concern ourselves with a wakeup
2240 * race with the atomic proxy lock acquition by the requeue code.
2241 */
2242
2243 /* Check if the requeue code acquired the second futex for us. */
2244 if (!q.rt_waiter) {
2245 /*
2246 * Got the lock. We might not be the anticipated owner if we
2247 * did a lock-steal - fix up the PI-state in that case.
2248 */
2249 if (q.pi_state && (q.pi_state->owner != current)) {
2250 spin_lock(q.lock_ptr);
2251 ret = fixup_pi_state_owner(uaddr2, &q, current,
2252 fshared);
2253 spin_unlock(q.lock_ptr);
2254 }
2255 } else {
2256 /*
2257 * We have been woken up by futex_unlock_pi(), a timeout, or a
2258 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2259 * the pi_state.
2260 */
2261 WARN_ON(!&q.pi_state);
2262 pi_mutex = &q.pi_state->pi_mutex;
2263 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2264 debug_rt_mutex_free_waiter(&rt_waiter);
2265
2266 spin_lock(q.lock_ptr);
2267 /*
2268 * Fixup the pi_state owner and possibly acquire the lock if we
2269 * haven't already.
2270 */
2271 res = fixup_owner(uaddr2, fshared, &q, !ret);
2272 /*
2273 * If fixup_owner() returned an error, proprogate that. If it
56ec1607 2274 * acquired the lock, clear -ETIMEDOUT or -EINTR.
52400ba9
DH
2275 */
2276 if (res)
2277 ret = (res < 0) ? res : 0;
2278
2279 /* Unqueue and drop the lock. */
2280 unqueue_me_pi(&q);
2281 }
2282
2283 /*
2284 * If fixup_pi_state_owner() faulted and was unable to handle the
2285 * fault, unlock the rt_mutex and return the fault to userspace.
2286 */
2287 if (ret == -EFAULT) {
2288 if (rt_mutex_owner(pi_mutex) == current)
2289 rt_mutex_unlock(pi_mutex);
2290 } else if (ret == -EINTR) {
52400ba9 2291 /*
cc6db4e6
DH
2292 * We've already been requeued, but cannot restart by calling
2293 * futex_lock_pi() directly. We could restart this syscall, but
2294 * it would detect that the user space "val" changed and return
2295 * -EWOULDBLOCK. Save the overhead of the restart and return
2296 * -EWOULDBLOCK directly.
52400ba9 2297 */
2070887f 2298 ret = -EWOULDBLOCK;
52400ba9
DH
2299 }
2300
2301out_put_keys:
2302 put_futex_key(fshared, &q.key);
c8b15a70 2303out_key2:
52400ba9
DH
2304 put_futex_key(fshared, &key2);
2305
d58e6576
TG
2306 /* Spurious wakeup ? */
2307 if (ret == -EAGAIN)
2308 goto retry;
52400ba9
DH
2309out:
2310 if (to) {
2311 hrtimer_cancel(&to->timer);
2312 destroy_hrtimer_on_stack(&to->timer);
2313 }
2314 return ret;
2315}
2316
0771dfef
IM
2317/*
2318 * Support for robust futexes: the kernel cleans up held futexes at
2319 * thread exit time.
2320 *
2321 * Implementation: user-space maintains a per-thread list of locks it
2322 * is holding. Upon do_exit(), the kernel carefully walks this list,
2323 * and marks all locks that are owned by this thread with the
c87e2837 2324 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
0771dfef
IM
2325 * always manipulated with the lock held, so the list is private and
2326 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2327 * field, to allow the kernel to clean up if the thread dies after
2328 * acquiring the lock, but just before it could have added itself to
2329 * the list. There can only be one such pending lock.
2330 */
2331
2332/**
d96ee56c
DH
2333 * sys_set_robust_list() - Set the robust-futex list head of a task
2334 * @head: pointer to the list-head
2335 * @len: length of the list-head, as userspace expects
0771dfef 2336 */
836f92ad
HC
2337SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2338 size_t, len)
0771dfef 2339{
a0c1e907
TG
2340 if (!futex_cmpxchg_enabled)
2341 return -ENOSYS;
0771dfef
IM
2342 /*
2343 * The kernel knows only one size for now:
2344 */
2345 if (unlikely(len != sizeof(*head)))
2346 return -EINVAL;
2347
2348 current->robust_list = head;
2349
2350 return 0;
2351}
2352
2353/**
d96ee56c
DH
2354 * sys_get_robust_list() - Get the robust-futex list head of a task
2355 * @pid: pid of the process [zero for current task]
2356 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2357 * @len_ptr: pointer to a length field, the kernel fills in the header size
0771dfef 2358 */
836f92ad
HC
2359SYSCALL_DEFINE3(get_robust_list, int, pid,
2360 struct robust_list_head __user * __user *, head_ptr,
2361 size_t __user *, len_ptr)
0771dfef 2362{
ba46df98 2363 struct robust_list_head __user *head;
0771dfef 2364 unsigned long ret;
c69e8d9c 2365 const struct cred *cred = current_cred(), *pcred;
0771dfef 2366
a0c1e907
TG
2367 if (!futex_cmpxchg_enabled)
2368 return -ENOSYS;
2369
0771dfef
IM
2370 if (!pid)
2371 head = current->robust_list;
2372 else {
2373 struct task_struct *p;
2374
2375 ret = -ESRCH;
aaa2a97e 2376 rcu_read_lock();
228ebcbe 2377 p = find_task_by_vpid(pid);
0771dfef
IM
2378 if (!p)
2379 goto err_unlock;
2380 ret = -EPERM;
c69e8d9c
DH
2381 pcred = __task_cred(p);
2382 if (cred->euid != pcred->euid &&
2383 cred->euid != pcred->uid &&
76aac0e9 2384 !capable(CAP_SYS_PTRACE))
0771dfef
IM
2385 goto err_unlock;
2386 head = p->robust_list;
aaa2a97e 2387 rcu_read_unlock();
0771dfef
IM
2388 }
2389
2390 if (put_user(sizeof(*head), len_ptr))
2391 return -EFAULT;
2392 return put_user(head, head_ptr);
2393
2394err_unlock:
aaa2a97e 2395 rcu_read_unlock();
0771dfef
IM
2396
2397 return ret;
2398}
2399
2400/*
2401 * Process a futex-list entry, check whether it's owned by the
2402 * dying task, and do notification if so:
2403 */
e3f2ddea 2404int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
0771dfef 2405{
e3f2ddea 2406 u32 uval, nval, mval;
0771dfef 2407
8f17d3a5
IM
2408retry:
2409 if (get_user(uval, uaddr))
0771dfef
IM
2410 return -1;
2411
b488893a 2412 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
0771dfef
IM
2413 /*
2414 * Ok, this dying thread is truly holding a futex
2415 * of interest. Set the OWNER_DIED bit atomically
2416 * via cmpxchg, and if the value had FUTEX_WAITERS
2417 * set, wake up a waiter (if any). (We have to do a
2418 * futex_wake() even if OWNER_DIED is already set -
2419 * to handle the rare but possible case of recursive
2420 * thread-death.) The rest of the cleanup is done in
2421 * userspace.
2422 */
e3f2ddea
IM
2423 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2424 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2425
c87e2837
IM
2426 if (nval == -EFAULT)
2427 return -1;
2428
2429 if (nval != uval)
8f17d3a5 2430 goto retry;
0771dfef 2431
e3f2ddea
IM
2432 /*
2433 * Wake robust non-PI futexes here. The wakeup of
2434 * PI futexes happens in exit_pi_state():
2435 */
36cf3b5c 2436 if (!pi && (uval & FUTEX_WAITERS))
c2f9f201 2437 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
0771dfef
IM
2438 }
2439 return 0;
2440}
2441
e3f2ddea
IM
2442/*
2443 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2444 */
2445static inline int fetch_robust_entry(struct robust_list __user **entry,
ba46df98
AV
2446 struct robust_list __user * __user *head,
2447 int *pi)
e3f2ddea
IM
2448{
2449 unsigned long uentry;
2450
ba46df98 2451 if (get_user(uentry, (unsigned long __user *)head))
e3f2ddea
IM
2452 return -EFAULT;
2453
ba46df98 2454 *entry = (void __user *)(uentry & ~1UL);
e3f2ddea
IM
2455 *pi = uentry & 1;
2456
2457 return 0;
2458}
2459
0771dfef
IM
2460/*
2461 * Walk curr->robust_list (very carefully, it's a userspace list!)
2462 * and mark any locks found there dead, and notify any waiters.
2463 *
2464 * We silently return on any sign of list-walking problem.
2465 */
2466void exit_robust_list(struct task_struct *curr)
2467{
2468 struct robust_list_head __user *head = curr->robust_list;
9f96cb1e
MS
2469 struct robust_list __user *entry, *next_entry, *pending;
2470 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
0771dfef 2471 unsigned long futex_offset;
9f96cb1e 2472 int rc;
0771dfef 2473
a0c1e907
TG
2474 if (!futex_cmpxchg_enabled)
2475 return;
2476
0771dfef
IM
2477 /*
2478 * Fetch the list head (which was registered earlier, via
2479 * sys_set_robust_list()):
2480 */
e3f2ddea 2481 if (fetch_robust_entry(&entry, &head->list.next, &pi))
0771dfef
IM
2482 return;
2483 /*
2484 * Fetch the relative futex offset:
2485 */
2486 if (get_user(futex_offset, &head->futex_offset))
2487 return;
2488 /*
2489 * Fetch any possibly pending lock-add first, and handle it
2490 * if it exists:
2491 */
e3f2ddea 2492 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
0771dfef 2493 return;
e3f2ddea 2494
9f96cb1e 2495 next_entry = NULL; /* avoid warning with gcc */
0771dfef 2496 while (entry != &head->list) {
9f96cb1e
MS
2497 /*
2498 * Fetch the next entry in the list before calling
2499 * handle_futex_death:
2500 */
2501 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
0771dfef
IM
2502 /*
2503 * A pending lock might already be on the list, so
c87e2837 2504 * don't process it twice:
0771dfef
IM
2505 */
2506 if (entry != pending)
ba46df98 2507 if (handle_futex_death((void __user *)entry + futex_offset,
e3f2ddea 2508 curr, pi))
0771dfef 2509 return;
9f96cb1e 2510 if (rc)
0771dfef 2511 return;
9f96cb1e
MS
2512 entry = next_entry;
2513 pi = next_pi;
0771dfef
IM
2514 /*
2515 * Avoid excessively long or circular lists:
2516 */
2517 if (!--limit)
2518 break;
2519
2520 cond_resched();
2521 }
9f96cb1e
MS
2522
2523 if (pending)
2524 handle_futex_death((void __user *)pending + futex_offset,
2525 curr, pip);
0771dfef
IM
2526}
2527
c19384b5 2528long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
e2970f2f 2529 u32 __user *uaddr2, u32 val2, u32 val3)
1da177e4 2530{
1acdac10 2531 int clockrt, ret = -ENOSYS;
34f01cc1 2532 int cmd = op & FUTEX_CMD_MASK;
c2f9f201 2533 int fshared = 0;
34f01cc1
ED
2534
2535 if (!(op & FUTEX_PRIVATE_FLAG))
c2f9f201 2536 fshared = 1;
1da177e4 2537
1acdac10 2538 clockrt = op & FUTEX_CLOCK_REALTIME;
52400ba9 2539 if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
1acdac10 2540 return -ENOSYS;
1da177e4 2541
34f01cc1 2542 switch (cmd) {
1da177e4 2543 case FUTEX_WAIT:
cd689985
TG
2544 val3 = FUTEX_BITSET_MATCH_ANY;
2545 case FUTEX_WAIT_BITSET:
1acdac10 2546 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
1da177e4
LT
2547 break;
2548 case FUTEX_WAKE:
cd689985
TG
2549 val3 = FUTEX_BITSET_MATCH_ANY;
2550 case FUTEX_WAKE_BITSET:
2551 ret = futex_wake(uaddr, fshared, val, val3);
1da177e4 2552 break;
1da177e4 2553 case FUTEX_REQUEUE:
52400ba9 2554 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
1da177e4
LT
2555 break;
2556 case FUTEX_CMP_REQUEUE:
52400ba9
DH
2557 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2558 0);
1da177e4 2559 break;
4732efbe 2560 case FUTEX_WAKE_OP:
34f01cc1 2561 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
4732efbe 2562 break;
c87e2837 2563 case FUTEX_LOCK_PI:
a0c1e907
TG
2564 if (futex_cmpxchg_enabled)
2565 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
c87e2837
IM
2566 break;
2567 case FUTEX_UNLOCK_PI:
a0c1e907
TG
2568 if (futex_cmpxchg_enabled)
2569 ret = futex_unlock_pi(uaddr, fshared);
c87e2837
IM
2570 break;
2571 case FUTEX_TRYLOCK_PI:
a0c1e907
TG
2572 if (futex_cmpxchg_enabled)
2573 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
c87e2837 2574 break;
52400ba9
DH
2575 case FUTEX_WAIT_REQUEUE_PI:
2576 val3 = FUTEX_BITSET_MATCH_ANY;
2577 ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
2578 clockrt, uaddr2);
2579 break;
52400ba9
DH
2580 case FUTEX_CMP_REQUEUE_PI:
2581 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2582 1);
2583 break;
1da177e4
LT
2584 default:
2585 ret = -ENOSYS;
2586 }
2587 return ret;
2588}
2589
2590
17da2bd9
HC
2591SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2592 struct timespec __user *, utime, u32 __user *, uaddr2,
2593 u32, val3)
1da177e4 2594{
c19384b5
PP
2595 struct timespec ts;
2596 ktime_t t, *tp = NULL;
e2970f2f 2597 u32 val2 = 0;
34f01cc1 2598 int cmd = op & FUTEX_CMD_MASK;
1da177e4 2599
cd689985 2600 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
52400ba9
DH
2601 cmd == FUTEX_WAIT_BITSET ||
2602 cmd == FUTEX_WAIT_REQUEUE_PI)) {
c19384b5 2603 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
1da177e4 2604 return -EFAULT;
c19384b5 2605 if (!timespec_valid(&ts))
9741ef96 2606 return -EINVAL;
c19384b5
PP
2607
2608 t = timespec_to_ktime(ts);
34f01cc1 2609 if (cmd == FUTEX_WAIT)
5a7780e7 2610 t = ktime_add_safe(ktime_get(), t);
c19384b5 2611 tp = &t;
1da177e4
LT
2612 }
2613 /*
52400ba9 2614 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
f54f0986 2615 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
1da177e4 2616 */
f54f0986 2617 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
ba9c22f2 2618 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
e2970f2f 2619 val2 = (u32) (unsigned long) utime;
1da177e4 2620
c19384b5 2621 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1da177e4
LT
2622}
2623
f6d107fb 2624static int __init futex_init(void)
1da177e4 2625{
a0c1e907 2626 u32 curval;
3e4ab747 2627 int i;
95362fa9 2628
a0c1e907
TG
2629 /*
2630 * This will fail and we want it. Some arch implementations do
2631 * runtime detection of the futex_atomic_cmpxchg_inatomic()
2632 * functionality. We want to know that before we call in any
2633 * of the complex code paths. Also we want to prevent
2634 * registration of robust lists in that case. NULL is
2635 * guaranteed to fault and we get -EFAULT on functional
2636 * implementation, the non functional ones will return
2637 * -ENOSYS.
2638 */
2639 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2640 if (curval == -EFAULT)
2641 futex_cmpxchg_enabled = 1;
2642
3e4ab747
TG
2643 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2644 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2645 spin_lock_init(&futex_queues[i].lock);
2646 }
2647
1da177e4
LT
2648 return 0;
2649}
f6d107fb 2650__initcall(futex_init);