]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/futex.c
Btrfs: tweak congestion backoff
[net-next-2.6.git] / kernel / futex.c
CommitLineData
1da177e4
LT
1/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
0771dfef
IM
11 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
c87e2837
IM
15 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
34f01cc1
ED
19 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
1da177e4
LT
22 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
23 * enough at me, Linus for the original (flawed) idea, Matthew
24 * Kirkwood for proof-of-concept implementation.
25 *
26 * "The futexes are also cursed."
27 * "But they come in a choice of three flavours!"
28 *
29 * This program is free software; you can redistribute it and/or modify
30 * it under the terms of the GNU General Public License as published by
31 * the Free Software Foundation; either version 2 of the License, or
32 * (at your option) any later version.
33 *
34 * This program is distributed in the hope that it will be useful,
35 * but WITHOUT ANY WARRANTY; without even the implied warranty of
36 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37 * GNU General Public License for more details.
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43#include <linux/slab.h>
44#include <linux/poll.h>
45#include <linux/fs.h>
46#include <linux/file.h>
47#include <linux/jhash.h>
48#include <linux/init.h>
49#include <linux/futex.h>
50#include <linux/mount.h>
51#include <linux/pagemap.h>
52#include <linux/syscalls.h>
7ed20e1a 53#include <linux/signal.h>
9adef58b 54#include <linux/module.h>
fd5eea42 55#include <linux/magic.h>
b488893a
PE
56#include <linux/pid.h>
57#include <linux/nsproxy.h>
58
4732efbe 59#include <asm/futex.h>
1da177e4 60
c87e2837
IM
61#include "rtmutex_common.h"
62
a0c1e907
TG
63int __read_mostly futex_cmpxchg_enabled;
64
1da177e4
LT
65#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
66
c87e2837
IM
67/*
68 * Priority Inheritance state:
69 */
70struct futex_pi_state {
71 /*
72 * list of 'owned' pi_state instances - these have to be
73 * cleaned up in do_exit() if the task exits prematurely:
74 */
75 struct list_head list;
76
77 /*
78 * The PI object:
79 */
80 struct rt_mutex pi_mutex;
81
82 struct task_struct *owner;
83 atomic_t refcount;
84
85 union futex_key key;
86};
87
1da177e4
LT
88/*
89 * We use this hashed waitqueue instead of a normal wait_queue_t, so
90 * we can wake only the relevant ones (hashed queues may be shared).
91 *
92 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
ec92d082 93 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
1da177e4 94 * The order of wakup is always to make the first condition true, then
73500ac5 95 * wake up q->waiter, then make the second condition true.
1da177e4
LT
96 */
97struct futex_q {
ec92d082 98 struct plist_node list;
73500ac5
DH
99 /* There can only be a single waiter */
100 wait_queue_head_t waiter;
1da177e4 101
e2970f2f 102 /* Which hash list lock to use: */
1da177e4
LT
103 spinlock_t *lock_ptr;
104
e2970f2f 105 /* Key which the futex is hashed on: */
1da177e4
LT
106 union futex_key key;
107
c87e2837
IM
108 /* Optional priority inheritance state: */
109 struct futex_pi_state *pi_state;
110 struct task_struct *task;
cd689985
TG
111
112 /* Bitset for the optional bitmasked wakeup */
113 u32 bitset;
1da177e4
LT
114};
115
116/*
b2d0994b
DH
117 * Hash buckets are shared by all the futex_keys that hash to the same
118 * location. Each key may have multiple futex_q structures, one for each task
119 * waiting on a futex.
1da177e4
LT
120 */
121struct futex_hash_bucket {
ec92d082
PP
122 spinlock_t lock;
123 struct plist_head chain;
1da177e4
LT
124};
125
126static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
127
1da177e4
LT
128/*
129 * We hash on the keys returned from get_futex_key (see below).
130 */
131static struct futex_hash_bucket *hash_futex(union futex_key *key)
132{
133 u32 hash = jhash2((u32*)&key->both.word,
134 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
135 key->both.offset);
136 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
137}
138
139/*
140 * Return 1 if two futex_keys are equal, 0 otherwise.
141 */
142static inline int match_futex(union futex_key *key1, union futex_key *key2)
143{
144 return (key1->both.word == key2->both.word
145 && key1->both.ptr == key2->both.ptr
146 && key1->both.offset == key2->both.offset);
147}
148
38d47c1b
PZ
149/*
150 * Take a reference to the resource addressed by a key.
151 * Can be called while holding spinlocks.
152 *
153 */
154static void get_futex_key_refs(union futex_key *key)
155{
156 if (!key->both.ptr)
157 return;
158
159 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
160 case FUT_OFF_INODE:
161 atomic_inc(&key->shared.inode->i_count);
162 break;
163 case FUT_OFF_MMSHARED:
164 atomic_inc(&key->private.mm->mm_count);
165 break;
166 }
167}
168
169/*
170 * Drop a reference to the resource addressed by a key.
171 * The hash bucket spinlock must not be held.
172 */
173static void drop_futex_key_refs(union futex_key *key)
174{
90621c40
DH
175 if (!key->both.ptr) {
176 /* If we're here then we tried to put a key we failed to get */
177 WARN_ON_ONCE(1);
38d47c1b 178 return;
90621c40 179 }
38d47c1b
PZ
180
181 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
182 case FUT_OFF_INODE:
183 iput(key->shared.inode);
184 break;
185 case FUT_OFF_MMSHARED:
186 mmdrop(key->private.mm);
187 break;
188 }
189}
190
34f01cc1
ED
191/**
192 * get_futex_key - Get parameters which are the keys for a futex.
193 * @uaddr: virtual address of the futex
b2d0994b 194 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
34f01cc1 195 * @key: address where result is stored.
64d1304a 196 * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
34f01cc1
ED
197 *
198 * Returns a negative error code or 0
199 * The key words are stored in *key on success.
1da177e4 200 *
f3a43f3f 201 * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
1da177e4
LT
202 * offset_within_page). For private mappings, it's (uaddr, current->mm).
203 * We can usually work out the index without swapping in the page.
204 *
b2d0994b 205 * lock_page() might sleep, the caller should not hold a spinlock.
1da177e4 206 */
64d1304a
TG
207static int
208get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
1da177e4 209{
e2970f2f 210 unsigned long address = (unsigned long)uaddr;
1da177e4 211 struct mm_struct *mm = current->mm;
1da177e4
LT
212 struct page *page;
213 int err;
214
215 /*
216 * The futex address must be "naturally" aligned.
217 */
e2970f2f 218 key->both.offset = address % PAGE_SIZE;
34f01cc1 219 if (unlikely((address % sizeof(u32)) != 0))
1da177e4 220 return -EINVAL;
e2970f2f 221 address -= key->both.offset;
1da177e4 222
34f01cc1
ED
223 /*
224 * PROCESS_PRIVATE futexes are fast.
225 * As the mm cannot disappear under us and the 'key' only needs
226 * virtual address, we dont even have to find the underlying vma.
227 * Note : We do have to check 'uaddr' is a valid user address,
228 * but access_ok() should be faster than find_vma()
229 */
230 if (!fshared) {
64d1304a 231 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
34f01cc1
ED
232 return -EFAULT;
233 key->private.mm = mm;
234 key->private.address = address;
42569c39 235 get_futex_key_refs(key);
34f01cc1
ED
236 return 0;
237 }
1da177e4 238
38d47c1b 239again:
64d1304a 240 err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
38d47c1b
PZ
241 if (err < 0)
242 return err;
243
244 lock_page(page);
245 if (!page->mapping) {
246 unlock_page(page);
247 put_page(page);
248 goto again;
249 }
1da177e4
LT
250
251 /*
252 * Private mappings are handled in a simple way.
253 *
254 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
255 * it's a read-only handle, it's expected that futexes attach to
38d47c1b 256 * the object not the particular process.
1da177e4 257 */
38d47c1b
PZ
258 if (PageAnon(page)) {
259 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
1da177e4 260 key->private.mm = mm;
e2970f2f 261 key->private.address = address;
38d47c1b
PZ
262 } else {
263 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
264 key->shared.inode = page->mapping->host;
265 key->shared.pgoff = page->index;
1da177e4
LT
266 }
267
38d47c1b 268 get_futex_key_refs(key);
1da177e4 269
38d47c1b
PZ
270 unlock_page(page);
271 put_page(page);
272 return 0;
1da177e4
LT
273}
274
38d47c1b 275static inline
c2f9f201 276void put_futex_key(int fshared, union futex_key *key)
1da177e4 277{
38d47c1b 278 drop_futex_key_refs(key);
1da177e4
LT
279}
280
36cf3b5c
TG
281static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
282{
283 u32 curval;
284
285 pagefault_disable();
286 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
287 pagefault_enable();
288
289 return curval;
290}
291
292static int get_futex_value_locked(u32 *dest, u32 __user *from)
1da177e4
LT
293{
294 int ret;
295
a866374a 296 pagefault_disable();
e2970f2f 297 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
a866374a 298 pagefault_enable();
1da177e4
LT
299
300 return ret ? -EFAULT : 0;
301}
302
c87e2837
IM
303
304/*
305 * PI code:
306 */
307static int refill_pi_state_cache(void)
308{
309 struct futex_pi_state *pi_state;
310
311 if (likely(current->pi_state_cache))
312 return 0;
313
4668edc3 314 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
c87e2837
IM
315
316 if (!pi_state)
317 return -ENOMEM;
318
c87e2837
IM
319 INIT_LIST_HEAD(&pi_state->list);
320 /* pi_mutex gets initialized later */
321 pi_state->owner = NULL;
322 atomic_set(&pi_state->refcount, 1);
38d47c1b 323 pi_state->key = FUTEX_KEY_INIT;
c87e2837
IM
324
325 current->pi_state_cache = pi_state;
326
327 return 0;
328}
329
330static struct futex_pi_state * alloc_pi_state(void)
331{
332 struct futex_pi_state *pi_state = current->pi_state_cache;
333
334 WARN_ON(!pi_state);
335 current->pi_state_cache = NULL;
336
337 return pi_state;
338}
339
340static void free_pi_state(struct futex_pi_state *pi_state)
341{
342 if (!atomic_dec_and_test(&pi_state->refcount))
343 return;
344
345 /*
346 * If pi_state->owner is NULL, the owner is most probably dying
347 * and has cleaned up the pi_state already
348 */
349 if (pi_state->owner) {
350 spin_lock_irq(&pi_state->owner->pi_lock);
351 list_del_init(&pi_state->list);
352 spin_unlock_irq(&pi_state->owner->pi_lock);
353
354 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
355 }
356
357 if (current->pi_state_cache)
358 kfree(pi_state);
359 else {
360 /*
361 * pi_state->list is already empty.
362 * clear pi_state->owner.
363 * refcount is at 0 - put it back to 1.
364 */
365 pi_state->owner = NULL;
366 atomic_set(&pi_state->refcount, 1);
367 current->pi_state_cache = pi_state;
368 }
369}
370
371/*
372 * Look up the task based on what TID userspace gave us.
373 * We dont trust it.
374 */
375static struct task_struct * futex_find_get_task(pid_t pid)
376{
377 struct task_struct *p;
c69e8d9c 378 const struct cred *cred = current_cred(), *pcred;
c87e2837 379
d359b549 380 rcu_read_lock();
228ebcbe 381 p = find_task_by_vpid(pid);
c69e8d9c 382 if (!p) {
a06381fe 383 p = ERR_PTR(-ESRCH);
c69e8d9c
DH
384 } else {
385 pcred = __task_cred(p);
386 if (cred->euid != pcred->euid &&
387 cred->euid != pcred->uid)
388 p = ERR_PTR(-ESRCH);
389 else
390 get_task_struct(p);
391 }
a06381fe 392
d359b549 393 rcu_read_unlock();
c87e2837
IM
394
395 return p;
396}
397
398/*
399 * This task is holding PI mutexes at exit time => bad.
400 * Kernel cleans up PI-state, but userspace is likely hosed.
401 * (Robust-futex cleanup is separate and might save the day for userspace.)
402 */
403void exit_pi_state_list(struct task_struct *curr)
404{
c87e2837
IM
405 struct list_head *next, *head = &curr->pi_state_list;
406 struct futex_pi_state *pi_state;
627371d7 407 struct futex_hash_bucket *hb;
38d47c1b 408 union futex_key key = FUTEX_KEY_INIT;
c87e2837 409
a0c1e907
TG
410 if (!futex_cmpxchg_enabled)
411 return;
c87e2837
IM
412 /*
413 * We are a ZOMBIE and nobody can enqueue itself on
414 * pi_state_list anymore, but we have to be careful
627371d7 415 * versus waiters unqueueing themselves:
c87e2837
IM
416 */
417 spin_lock_irq(&curr->pi_lock);
418 while (!list_empty(head)) {
419
420 next = head->next;
421 pi_state = list_entry(next, struct futex_pi_state, list);
422 key = pi_state->key;
627371d7 423 hb = hash_futex(&key);
c87e2837
IM
424 spin_unlock_irq(&curr->pi_lock);
425
c87e2837
IM
426 spin_lock(&hb->lock);
427
428 spin_lock_irq(&curr->pi_lock);
627371d7
IM
429 /*
430 * We dropped the pi-lock, so re-check whether this
431 * task still owns the PI-state:
432 */
c87e2837
IM
433 if (head->next != next) {
434 spin_unlock(&hb->lock);
435 continue;
436 }
437
c87e2837 438 WARN_ON(pi_state->owner != curr);
627371d7
IM
439 WARN_ON(list_empty(&pi_state->list));
440 list_del_init(&pi_state->list);
c87e2837
IM
441 pi_state->owner = NULL;
442 spin_unlock_irq(&curr->pi_lock);
443
444 rt_mutex_unlock(&pi_state->pi_mutex);
445
446 spin_unlock(&hb->lock);
447
448 spin_lock_irq(&curr->pi_lock);
449 }
450 spin_unlock_irq(&curr->pi_lock);
451}
452
453static int
d0aa7a70
PP
454lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
455 union futex_key *key, struct futex_pi_state **ps)
c87e2837
IM
456{
457 struct futex_pi_state *pi_state = NULL;
458 struct futex_q *this, *next;
ec92d082 459 struct plist_head *head;
c87e2837 460 struct task_struct *p;
778e9a9c 461 pid_t pid = uval & FUTEX_TID_MASK;
c87e2837
IM
462
463 head = &hb->chain;
464
ec92d082 465 plist_for_each_entry_safe(this, next, head, list) {
d0aa7a70 466 if (match_futex(&this->key, key)) {
c87e2837
IM
467 /*
468 * Another waiter already exists - bump up
469 * the refcount and return its pi_state:
470 */
471 pi_state = this->pi_state;
06a9ec29
TG
472 /*
473 * Userspace might have messed up non PI and PI futexes
474 */
475 if (unlikely(!pi_state))
476 return -EINVAL;
477
627371d7 478 WARN_ON(!atomic_read(&pi_state->refcount));
778e9a9c
AK
479 WARN_ON(pid && pi_state->owner &&
480 pi_state->owner->pid != pid);
627371d7 481
c87e2837 482 atomic_inc(&pi_state->refcount);
d0aa7a70 483 *ps = pi_state;
c87e2837
IM
484
485 return 0;
486 }
487 }
488
489 /*
e3f2ddea 490 * We are the first waiter - try to look up the real owner and attach
778e9a9c 491 * the new pi_state to it, but bail out when TID = 0
c87e2837 492 */
778e9a9c 493 if (!pid)
e3f2ddea 494 return -ESRCH;
c87e2837 495 p = futex_find_get_task(pid);
778e9a9c
AK
496 if (IS_ERR(p))
497 return PTR_ERR(p);
498
499 /*
500 * We need to look at the task state flags to figure out,
501 * whether the task is exiting. To protect against the do_exit
502 * change of the task flags, we do this protected by
503 * p->pi_lock:
504 */
505 spin_lock_irq(&p->pi_lock);
506 if (unlikely(p->flags & PF_EXITING)) {
507 /*
508 * The task is on the way out. When PF_EXITPIDONE is
509 * set, we know that the task has finished the
510 * cleanup:
511 */
512 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
513
514 spin_unlock_irq(&p->pi_lock);
515 put_task_struct(p);
516 return ret;
517 }
c87e2837
IM
518
519 pi_state = alloc_pi_state();
520
521 /*
522 * Initialize the pi_mutex in locked state and make 'p'
523 * the owner of it:
524 */
525 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
526
527 /* Store the key for possible exit cleanups: */
d0aa7a70 528 pi_state->key = *key;
c87e2837 529
627371d7 530 WARN_ON(!list_empty(&pi_state->list));
c87e2837
IM
531 list_add(&pi_state->list, &p->pi_state_list);
532 pi_state->owner = p;
533 spin_unlock_irq(&p->pi_lock);
534
535 put_task_struct(p);
536
d0aa7a70 537 *ps = pi_state;
c87e2837
IM
538
539 return 0;
540}
541
1da177e4
LT
542/*
543 * The hash bucket lock must be held when this is called.
544 * Afterwards, the futex_q must not be accessed.
545 */
546static void wake_futex(struct futex_q *q)
547{
ec92d082 548 plist_del(&q->list, &q->list.plist);
1da177e4
LT
549 /*
550 * The lock in wake_up_all() is a crucial memory barrier after the
ec92d082 551 * plist_del() and also before assigning to q->lock_ptr.
1da177e4 552 */
73500ac5 553 wake_up(&q->waiter);
1da177e4
LT
554 /*
555 * The waiting task can free the futex_q as soon as this is written,
556 * without taking any locks. This must come last.
8e31108b 557 *
b2d0994b
DH
558 * A memory barrier is required here to prevent the following store to
559 * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
560 * end of wake_up() does not prevent this store from moving.
1da177e4 561 */
ccdea2f8 562 smp_wmb();
1da177e4
LT
563 q->lock_ptr = NULL;
564}
565
c87e2837
IM
566static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
567{
568 struct task_struct *new_owner;
569 struct futex_pi_state *pi_state = this->pi_state;
570 u32 curval, newval;
571
572 if (!pi_state)
573 return -EINVAL;
574
21778867 575 spin_lock(&pi_state->pi_mutex.wait_lock);
c87e2837
IM
576 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
577
578 /*
579 * This happens when we have stolen the lock and the original
580 * pending owner did not enqueue itself back on the rt_mutex.
581 * Thats not a tragedy. We know that way, that a lock waiter
582 * is on the fly. We make the futex_q waiter the pending owner.
583 */
584 if (!new_owner)
585 new_owner = this->task;
586
587 /*
588 * We pass it to the next owner. (The WAITERS bit is always
589 * kept enabled while there is PI state around. We must also
590 * preserve the owner died bit.)
591 */
e3f2ddea 592 if (!(uval & FUTEX_OWNER_DIED)) {
778e9a9c
AK
593 int ret = 0;
594
b488893a 595 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
e3f2ddea 596
36cf3b5c 597 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
778e9a9c 598
e3f2ddea 599 if (curval == -EFAULT)
778e9a9c 600 ret = -EFAULT;
cde898fa 601 else if (curval != uval)
778e9a9c
AK
602 ret = -EINVAL;
603 if (ret) {
604 spin_unlock(&pi_state->pi_mutex.wait_lock);
605 return ret;
606 }
e3f2ddea 607 }
c87e2837 608
627371d7
IM
609 spin_lock_irq(&pi_state->owner->pi_lock);
610 WARN_ON(list_empty(&pi_state->list));
611 list_del_init(&pi_state->list);
612 spin_unlock_irq(&pi_state->owner->pi_lock);
613
614 spin_lock_irq(&new_owner->pi_lock);
615 WARN_ON(!list_empty(&pi_state->list));
c87e2837
IM
616 list_add(&pi_state->list, &new_owner->pi_state_list);
617 pi_state->owner = new_owner;
627371d7
IM
618 spin_unlock_irq(&new_owner->pi_lock);
619
21778867 620 spin_unlock(&pi_state->pi_mutex.wait_lock);
c87e2837
IM
621 rt_mutex_unlock(&pi_state->pi_mutex);
622
623 return 0;
624}
625
626static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
627{
628 u32 oldval;
629
630 /*
631 * There is no waiter, so we unlock the futex. The owner died
632 * bit has not to be preserved here. We are the owner:
633 */
36cf3b5c 634 oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
c87e2837
IM
635
636 if (oldval == -EFAULT)
637 return oldval;
638 if (oldval != uval)
639 return -EAGAIN;
640
641 return 0;
642}
643
8b8f319f
IM
644/*
645 * Express the locking dependencies for lockdep:
646 */
647static inline void
648double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
649{
650 if (hb1 <= hb2) {
651 spin_lock(&hb1->lock);
652 if (hb1 < hb2)
653 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
654 } else { /* hb1 > hb2 */
655 spin_lock(&hb2->lock);
656 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
657 }
658}
659
5eb3dc62
DH
660static inline void
661double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
662{
f061d351 663 spin_unlock(&hb1->lock);
88f502fe
IM
664 if (hb1 != hb2)
665 spin_unlock(&hb2->lock);
5eb3dc62
DH
666}
667
1da177e4 668/*
b2d0994b 669 * Wake up waiters matching bitset queued on this futex (uaddr).
1da177e4 670 */
c2f9f201 671static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
1da177e4 672{
e2970f2f 673 struct futex_hash_bucket *hb;
1da177e4 674 struct futex_q *this, *next;
ec92d082 675 struct plist_head *head;
38d47c1b 676 union futex_key key = FUTEX_KEY_INIT;
1da177e4
LT
677 int ret;
678
cd689985
TG
679 if (!bitset)
680 return -EINVAL;
681
64d1304a 682 ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
1da177e4
LT
683 if (unlikely(ret != 0))
684 goto out;
685
e2970f2f
IM
686 hb = hash_futex(&key);
687 spin_lock(&hb->lock);
688 head = &hb->chain;
1da177e4 689
ec92d082 690 plist_for_each_entry_safe(this, next, head, list) {
1da177e4 691 if (match_futex (&this->key, &key)) {
ed6f7b10
IM
692 if (this->pi_state) {
693 ret = -EINVAL;
694 break;
695 }
cd689985
TG
696
697 /* Check if one of the bits is set in both bitsets */
698 if (!(this->bitset & bitset))
699 continue;
700
1da177e4
LT
701 wake_futex(this);
702 if (++ret >= nr_wake)
703 break;
704 }
705 }
706
e2970f2f 707 spin_unlock(&hb->lock);
38d47c1b 708 put_futex_key(fshared, &key);
42d35d48 709out:
1da177e4
LT
710 return ret;
711}
712
4732efbe
JJ
713/*
714 * Wake up all waiters hashed on the physical page that is mapped
715 * to this virtual address:
716 */
e2970f2f 717static int
c2f9f201 718futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
e2970f2f 719 int nr_wake, int nr_wake2, int op)
4732efbe 720{
38d47c1b 721 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
e2970f2f 722 struct futex_hash_bucket *hb1, *hb2;
ec92d082 723 struct plist_head *head;
4732efbe 724 struct futex_q *this, *next;
e4dc5b7a 725 int ret, op_ret;
4732efbe 726
e4dc5b7a 727retry:
64d1304a 728 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
4732efbe
JJ
729 if (unlikely(ret != 0))
730 goto out;
64d1304a 731 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
4732efbe 732 if (unlikely(ret != 0))
42d35d48 733 goto out_put_key1;
4732efbe 734
e2970f2f
IM
735 hb1 = hash_futex(&key1);
736 hb2 = hash_futex(&key2);
4732efbe 737
8b8f319f 738 double_lock_hb(hb1, hb2);
e4dc5b7a 739retry_private:
e2970f2f 740 op_ret = futex_atomic_op_inuser(op, uaddr2);
4732efbe 741 if (unlikely(op_ret < 0)) {
e2970f2f 742 u32 dummy;
4732efbe 743
5eb3dc62 744 double_unlock_hb(hb1, hb2);
4732efbe 745
7ee1dd3f 746#ifndef CONFIG_MMU
e2970f2f
IM
747 /*
748 * we don't get EFAULT from MMU faults if we don't have an MMU,
749 * but we might get them from range checking
750 */
7ee1dd3f 751 ret = op_ret;
42d35d48 752 goto out_put_keys;
7ee1dd3f
DH
753#endif
754
796f8d9b
DG
755 if (unlikely(op_ret != -EFAULT)) {
756 ret = op_ret;
42d35d48 757 goto out_put_keys;
796f8d9b
DG
758 }
759
e2970f2f 760 ret = get_user(dummy, uaddr2);
4732efbe 761 if (ret)
de87fcc1 762 goto out_put_keys;
4732efbe 763
e4dc5b7a
DH
764 if (!fshared)
765 goto retry_private;
766
de87fcc1
DH
767 put_futex_key(fshared, &key2);
768 put_futex_key(fshared, &key1);
e4dc5b7a 769 goto retry;
4732efbe
JJ
770 }
771
e2970f2f 772 head = &hb1->chain;
4732efbe 773
ec92d082 774 plist_for_each_entry_safe(this, next, head, list) {
4732efbe
JJ
775 if (match_futex (&this->key, &key1)) {
776 wake_futex(this);
777 if (++ret >= nr_wake)
778 break;
779 }
780 }
781
782 if (op_ret > 0) {
e2970f2f 783 head = &hb2->chain;
4732efbe
JJ
784
785 op_ret = 0;
ec92d082 786 plist_for_each_entry_safe(this, next, head, list) {
4732efbe
JJ
787 if (match_futex (&this->key, &key2)) {
788 wake_futex(this);
789 if (++op_ret >= nr_wake2)
790 break;
791 }
792 }
793 ret += op_ret;
794 }
795
5eb3dc62 796 double_unlock_hb(hb1, hb2);
42d35d48 797out_put_keys:
38d47c1b 798 put_futex_key(fshared, &key2);
42d35d48 799out_put_key1:
38d47c1b 800 put_futex_key(fshared, &key1);
42d35d48 801out:
4732efbe
JJ
802 return ret;
803}
804
1da177e4
LT
805/*
806 * Requeue all waiters hashed on one physical page to another
807 * physical page.
808 */
c2f9f201 809static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
e2970f2f 810 int nr_wake, int nr_requeue, u32 *cmpval)
1da177e4 811{
38d47c1b 812 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
e2970f2f 813 struct futex_hash_bucket *hb1, *hb2;
ec92d082 814 struct plist_head *head1;
1da177e4
LT
815 struct futex_q *this, *next;
816 int ret, drop_count = 0;
817
42d35d48 818retry:
64d1304a 819 ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1da177e4
LT
820 if (unlikely(ret != 0))
821 goto out;
64d1304a 822 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_READ);
1da177e4 823 if (unlikely(ret != 0))
42d35d48 824 goto out_put_key1;
1da177e4 825
e2970f2f
IM
826 hb1 = hash_futex(&key1);
827 hb2 = hash_futex(&key2);
1da177e4 828
e4dc5b7a 829retry_private:
8b8f319f 830 double_lock_hb(hb1, hb2);
1da177e4 831
e2970f2f
IM
832 if (likely(cmpval != NULL)) {
833 u32 curval;
1da177e4 834
e2970f2f 835 ret = get_futex_value_locked(&curval, uaddr1);
1da177e4
LT
836
837 if (unlikely(ret)) {
5eb3dc62 838 double_unlock_hb(hb1, hb2);
1da177e4 839
e2970f2f 840 ret = get_user(curval, uaddr1);
e4dc5b7a
DH
841 if (ret)
842 goto out_put_keys;
1da177e4 843
e4dc5b7a
DH
844 if (!fshared)
845 goto retry_private;
1da177e4 846
e4dc5b7a
DH
847 put_futex_key(fshared, &key2);
848 put_futex_key(fshared, &key1);
849 goto retry;
1da177e4 850 }
e2970f2f 851 if (curval != *cmpval) {
1da177e4
LT
852 ret = -EAGAIN;
853 goto out_unlock;
854 }
855 }
856
e2970f2f 857 head1 = &hb1->chain;
ec92d082 858 plist_for_each_entry_safe(this, next, head1, list) {
1da177e4
LT
859 if (!match_futex (&this->key, &key1))
860 continue;
861 if (++ret <= nr_wake) {
862 wake_futex(this);
863 } else {
59e0e0ac
SD
864 /*
865 * If key1 and key2 hash to the same bucket, no need to
866 * requeue.
867 */
868 if (likely(head1 != &hb2->chain)) {
ec92d082
PP
869 plist_del(&this->list, &hb1->chain);
870 plist_add(&this->list, &hb2->chain);
59e0e0ac 871 this->lock_ptr = &hb2->lock;
ec92d082
PP
872#ifdef CONFIG_DEBUG_PI_LIST
873 this->list.plist.lock = &hb2->lock;
874#endif
778e9a9c 875 }
1da177e4 876 this->key = key2;
9adef58b 877 get_futex_key_refs(&key2);
1da177e4
LT
878 drop_count++;
879
880 if (ret - nr_wake >= nr_requeue)
881 break;
1da177e4
LT
882 }
883 }
884
885out_unlock:
5eb3dc62 886 double_unlock_hb(hb1, hb2);
1da177e4 887
cd84a42f
DH
888 /*
889 * drop_futex_key_refs() must be called outside the spinlocks. During
890 * the requeue we moved futex_q's from the hash bucket at key1 to the
891 * one at key2 and updated their key pointer. We no longer need to
892 * hold the references to key1.
893 */
1da177e4 894 while (--drop_count >= 0)
9adef58b 895 drop_futex_key_refs(&key1);
1da177e4 896
42d35d48 897out_put_keys:
38d47c1b 898 put_futex_key(fshared, &key2);
42d35d48 899out_put_key1:
38d47c1b 900 put_futex_key(fshared, &key1);
42d35d48 901out:
1da177e4
LT
902 return ret;
903}
904
905/* The key must be already stored in q->key. */
82af7aca 906static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1da177e4 907{
e2970f2f 908 struct futex_hash_bucket *hb;
1da177e4 909
73500ac5 910 init_waitqueue_head(&q->waiter);
1da177e4 911
9adef58b 912 get_futex_key_refs(&q->key);
e2970f2f
IM
913 hb = hash_futex(&q->key);
914 q->lock_ptr = &hb->lock;
1da177e4 915
e2970f2f
IM
916 spin_lock(&hb->lock);
917 return hb;
1da177e4
LT
918}
919
82af7aca 920static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1da177e4 921{
ec92d082
PP
922 int prio;
923
924 /*
925 * The priority used to register this element is
926 * - either the real thread-priority for the real-time threads
927 * (i.e. threads with a priority lower than MAX_RT_PRIO)
928 * - or MAX_RT_PRIO for non-RT threads.
929 * Thus, all RT-threads are woken first in priority order, and
930 * the others are woken last, in FIFO order.
931 */
932 prio = min(current->normal_prio, MAX_RT_PRIO);
933
934 plist_node_init(&q->list, prio);
935#ifdef CONFIG_DEBUG_PI_LIST
936 q->list.plist.lock = &hb->lock;
937#endif
938 plist_add(&q->list, &hb->chain);
c87e2837 939 q->task = current;
e2970f2f 940 spin_unlock(&hb->lock);
1da177e4
LT
941}
942
943static inline void
e2970f2f 944queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1da177e4 945{
e2970f2f 946 spin_unlock(&hb->lock);
9adef58b 947 drop_futex_key_refs(&q->key);
1da177e4
LT
948}
949
950/*
951 * queue_me and unqueue_me must be called as a pair, each
952 * exactly once. They are called with the hashed spinlock held.
953 */
954
1da177e4
LT
955/* Return 1 if we were still queued (ie. 0 means we were woken) */
956static int unqueue_me(struct futex_q *q)
957{
1da177e4 958 spinlock_t *lock_ptr;
e2970f2f 959 int ret = 0;
1da177e4
LT
960
961 /* In the common case we don't take the spinlock, which is nice. */
42d35d48 962retry:
1da177e4 963 lock_ptr = q->lock_ptr;
e91467ec 964 barrier();
c80544dc 965 if (lock_ptr != NULL) {
1da177e4
LT
966 spin_lock(lock_ptr);
967 /*
968 * q->lock_ptr can change between reading it and
969 * spin_lock(), causing us to take the wrong lock. This
970 * corrects the race condition.
971 *
972 * Reasoning goes like this: if we have the wrong lock,
973 * q->lock_ptr must have changed (maybe several times)
974 * between reading it and the spin_lock(). It can
975 * change again after the spin_lock() but only if it was
976 * already changed before the spin_lock(). It cannot,
977 * however, change back to the original value. Therefore
978 * we can detect whether we acquired the correct lock.
979 */
980 if (unlikely(lock_ptr != q->lock_ptr)) {
981 spin_unlock(lock_ptr);
982 goto retry;
983 }
ec92d082
PP
984 WARN_ON(plist_node_empty(&q->list));
985 plist_del(&q->list, &q->list.plist);
c87e2837
IM
986
987 BUG_ON(q->pi_state);
988
1da177e4
LT
989 spin_unlock(lock_ptr);
990 ret = 1;
991 }
992
9adef58b 993 drop_futex_key_refs(&q->key);
1da177e4
LT
994 return ret;
995}
996
c87e2837
IM
997/*
998 * PI futexes can not be requeued and must remove themself from the
d0aa7a70
PP
999 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1000 * and dropped here.
c87e2837 1001 */
d0aa7a70 1002static void unqueue_me_pi(struct futex_q *q)
c87e2837 1003{
ec92d082
PP
1004 WARN_ON(plist_node_empty(&q->list));
1005 plist_del(&q->list, &q->list.plist);
c87e2837
IM
1006
1007 BUG_ON(!q->pi_state);
1008 free_pi_state(q->pi_state);
1009 q->pi_state = NULL;
1010
d0aa7a70 1011 spin_unlock(q->lock_ptr);
c87e2837 1012
9adef58b 1013 drop_futex_key_refs(&q->key);
c87e2837
IM
1014}
1015
d0aa7a70 1016/*
cdf71a10 1017 * Fixup the pi_state owner with the new owner.
d0aa7a70 1018 *
778e9a9c
AK
1019 * Must be called with hash bucket lock held and mm->sem held for non
1020 * private futexes.
d0aa7a70 1021 */
778e9a9c 1022static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
c2f9f201 1023 struct task_struct *newowner, int fshared)
d0aa7a70 1024{
cdf71a10 1025 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
d0aa7a70 1026 struct futex_pi_state *pi_state = q->pi_state;
1b7558e4 1027 struct task_struct *oldowner = pi_state->owner;
d0aa7a70 1028 u32 uval, curval, newval;
e4dc5b7a 1029 int ret;
d0aa7a70
PP
1030
1031 /* Owner died? */
1b7558e4
TG
1032 if (!pi_state->owner)
1033 newtid |= FUTEX_OWNER_DIED;
1034
1035 /*
1036 * We are here either because we stole the rtmutex from the
1037 * pending owner or we are the pending owner which failed to
1038 * get the rtmutex. We have to replace the pending owner TID
1039 * in the user space variable. This must be atomic as we have
1040 * to preserve the owner died bit here.
1041 *
b2d0994b
DH
1042 * Note: We write the user space value _before_ changing the pi_state
1043 * because we can fault here. Imagine swapped out pages or a fork
1044 * that marked all the anonymous memory readonly for cow.
1b7558e4
TG
1045 *
1046 * Modifying pi_state _before_ the user space value would
1047 * leave the pi_state in an inconsistent state when we fault
1048 * here, because we need to drop the hash bucket lock to
1049 * handle the fault. This might be observed in the PID check
1050 * in lookup_pi_state.
1051 */
1052retry:
1053 if (get_futex_value_locked(&uval, uaddr))
1054 goto handle_fault;
1055
1056 while (1) {
1057 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1058
1059 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1060
1061 if (curval == -EFAULT)
1062 goto handle_fault;
1063 if (curval == uval)
1064 break;
1065 uval = curval;
1066 }
1067
1068 /*
1069 * We fixed up user space. Now we need to fix the pi_state
1070 * itself.
1071 */
d0aa7a70
PP
1072 if (pi_state->owner != NULL) {
1073 spin_lock_irq(&pi_state->owner->pi_lock);
1074 WARN_ON(list_empty(&pi_state->list));
1075 list_del_init(&pi_state->list);
1076 spin_unlock_irq(&pi_state->owner->pi_lock);
1b7558e4 1077 }
d0aa7a70 1078
cdf71a10 1079 pi_state->owner = newowner;
d0aa7a70 1080
cdf71a10 1081 spin_lock_irq(&newowner->pi_lock);
d0aa7a70 1082 WARN_ON(!list_empty(&pi_state->list));
cdf71a10
TG
1083 list_add(&pi_state->list, &newowner->pi_state_list);
1084 spin_unlock_irq(&newowner->pi_lock);
1b7558e4 1085 return 0;
d0aa7a70 1086
d0aa7a70 1087 /*
1b7558e4
TG
1088 * To handle the page fault we need to drop the hash bucket
1089 * lock here. That gives the other task (either the pending
1090 * owner itself or the task which stole the rtmutex) the
1091 * chance to try the fixup of the pi_state. So once we are
1092 * back from handling the fault we need to check the pi_state
1093 * after reacquiring the hash bucket lock and before trying to
1094 * do another fixup. When the fixup has been done already we
1095 * simply return.
d0aa7a70 1096 */
1b7558e4
TG
1097handle_fault:
1098 spin_unlock(q->lock_ptr);
778e9a9c 1099
e4dc5b7a 1100 ret = get_user(uval, uaddr);
778e9a9c 1101
1b7558e4 1102 spin_lock(q->lock_ptr);
778e9a9c 1103
1b7558e4
TG
1104 /*
1105 * Check if someone else fixed it for us:
1106 */
1107 if (pi_state->owner != oldowner)
1108 return 0;
1109
1110 if (ret)
1111 return ret;
1112
1113 goto retry;
d0aa7a70
PP
1114}
1115
34f01cc1
ED
1116/*
1117 * In case we must use restart_block to restart a futex_wait,
ce6bd420 1118 * we encode in the 'flags' shared capability
34f01cc1 1119 */
1acdac10
TG
1120#define FLAGS_SHARED 0x01
1121#define FLAGS_CLOCKRT 0x02
34f01cc1 1122
72c1bbf3 1123static long futex_wait_restart(struct restart_block *restart);
36cf3b5c 1124
c2f9f201 1125static int futex_wait(u32 __user *uaddr, int fshared,
1acdac10 1126 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1da177e4 1127{
c87e2837 1128 struct task_struct *curr = current;
2fff78c7 1129 struct restart_block *restart;
c87e2837 1130 DECLARE_WAITQUEUE(wait, curr);
e2970f2f 1131 struct futex_hash_bucket *hb;
1da177e4 1132 struct futex_q q;
e2970f2f
IM
1133 u32 uval;
1134 int ret;
bd197234 1135 struct hrtimer_sleeper t;
c19384b5 1136 int rem = 0;
1da177e4 1137
cd689985
TG
1138 if (!bitset)
1139 return -EINVAL;
1140
c87e2837 1141 q.pi_state = NULL;
cd689985 1142 q.bitset = bitset;
42d35d48 1143retry:
38d47c1b 1144 q.key = FUTEX_KEY_INIT;
64d1304a 1145 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_READ);
1da177e4 1146 if (unlikely(ret != 0))
42d35d48 1147 goto out;
1da177e4 1148
e4dc5b7a 1149retry_private:
82af7aca 1150 hb = queue_lock(&q);
1da177e4
LT
1151
1152 /*
b2d0994b 1153 * Access the page AFTER the hash-bucket is locked.
1da177e4
LT
1154 * Order is important:
1155 *
1156 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1157 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
1158 *
1159 * The basic logical guarantee of a futex is that it blocks ONLY
1160 * if cond(var) is known to be true at the time of blocking, for
1161 * any cond. If we queued after testing *uaddr, that would open
1162 * a race condition where we could block indefinitely with
1163 * cond(var) false, which would violate the guarantee.
1164 *
1165 * A consequence is that futex_wait() can return zero and absorb
1166 * a wakeup when *uaddr != val on entry to the syscall. This is
1167 * rare, but normal.
1168 *
b2d0994b 1169 * For shared futexes, we hold the mmap semaphore, so the mapping
34f01cc1 1170 * cannot have changed since we looked it up in get_futex_key.
1da177e4 1171 */
e2970f2f 1172 ret = get_futex_value_locked(&uval, uaddr);
1da177e4
LT
1173
1174 if (unlikely(ret)) {
e2970f2f 1175 queue_unlock(&q, hb);
1da177e4 1176
e2970f2f 1177 ret = get_user(uval, uaddr);
e4dc5b7a
DH
1178 if (ret)
1179 goto out_put_key;
1da177e4 1180
e4dc5b7a
DH
1181 if (!fshared)
1182 goto retry_private;
1183
1184 put_futex_key(fshared, &q.key);
1185 goto retry;
1da177e4 1186 }
c87e2837 1187 ret = -EWOULDBLOCK;
2fff78c7
PZ
1188 if (unlikely(uval != val)) {
1189 queue_unlock(&q, hb);
1190 goto out_put_key;
1191 }
1da177e4
LT
1192
1193 /* Only actually queue if *uaddr contained val. */
82af7aca 1194 queue_me(&q, hb);
1da177e4 1195
1da177e4
LT
1196 /*
1197 * There might have been scheduling since the queue_me(), as we
1198 * cannot hold a spinlock across the get_user() in case it
1199 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1200 * queueing ourselves into the futex hash. This code thus has to
1201 * rely on the futex_wake() code removing us from hash when it
1202 * wakes us up.
1203 */
1204
1205 /* add_wait_queue is the barrier after __set_current_state. */
1206 __set_current_state(TASK_INTERRUPTIBLE);
73500ac5 1207 add_wait_queue(&q.waiter, &wait);
1da177e4 1208 /*
ec92d082 1209 * !plist_node_empty() is safe here without any lock.
1da177e4
LT
1210 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1211 */
ec92d082 1212 if (likely(!plist_node_empty(&q.list))) {
c19384b5
PP
1213 if (!abs_time)
1214 schedule();
1215 else {
1acdac10
TG
1216 hrtimer_init_on_stack(&t.timer,
1217 clockrt ? CLOCK_REALTIME :
1218 CLOCK_MONOTONIC,
1219 HRTIMER_MODE_ABS);
c19384b5 1220 hrtimer_init_sleeper(&t, current);
16f4993f
DH
1221 hrtimer_set_expires_range_ns(&t.timer, *abs_time,
1222 current->timer_slack_ns);
c19384b5 1223
cc584b21 1224 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
3588a085
PZ
1225 if (!hrtimer_active(&t.timer))
1226 t.task = NULL;
c19384b5
PP
1227
1228 /*
1229 * the timer could have already expired, in which
1230 * case current would be flagged for rescheduling.
1231 * Don't bother calling schedule.
1232 */
1233 if (likely(t.task))
1234 schedule();
1235
1236 hrtimer_cancel(&t.timer);
72c1bbf3 1237
c19384b5
PP
1238 /* Flag if a timeout occured */
1239 rem = (t.task == NULL);
237fc6e7
TG
1240
1241 destroy_hrtimer_on_stack(&t.timer);
c19384b5 1242 }
72c1bbf3 1243 }
1da177e4
LT
1244 __set_current_state(TASK_RUNNING);
1245
1246 /*
1247 * NOTE: we don't remove ourselves from the waitqueue because
1248 * we are the only user of it.
1249 */
1250
1251 /* If we were woken (and unqueued), we succeeded, whatever. */
2fff78c7 1252 ret = 0;
1da177e4 1253 if (!unqueue_me(&q))
2fff78c7
PZ
1254 goto out_put_key;
1255 ret = -ETIMEDOUT;
c19384b5 1256 if (rem)
2fff78c7 1257 goto out_put_key;
72c1bbf3 1258
e2970f2f
IM
1259 /*
1260 * We expect signal_pending(current), but another thread may
1261 * have handled it for us already.
1262 */
2fff78c7 1263 ret = -ERESTARTSYS;
c19384b5 1264 if (!abs_time)
2fff78c7 1265 goto out_put_key;
1da177e4 1266
2fff78c7
PZ
1267 restart = &current_thread_info()->restart_block;
1268 restart->fn = futex_wait_restart;
1269 restart->futex.uaddr = (u32 *)uaddr;
1270 restart->futex.val = val;
1271 restart->futex.time = abs_time->tv64;
1272 restart->futex.bitset = bitset;
1273 restart->futex.flags = 0;
1274
1275 if (fshared)
1276 restart->futex.flags |= FLAGS_SHARED;
1277 if (clockrt)
1278 restart->futex.flags |= FLAGS_CLOCKRT;
42d35d48 1279
2fff78c7
PZ
1280 ret = -ERESTART_RESTARTBLOCK;
1281
1282out_put_key:
1283 put_futex_key(fshared, &q.key);
42d35d48 1284out:
c87e2837
IM
1285 return ret;
1286}
1287
72c1bbf3
NP
1288
1289static long futex_wait_restart(struct restart_block *restart)
1290{
ce6bd420 1291 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
c2f9f201 1292 int fshared = 0;
ce6bd420 1293 ktime_t t;
72c1bbf3 1294
ce6bd420 1295 t.tv64 = restart->futex.time;
72c1bbf3 1296 restart->fn = do_no_restart_syscall;
ce6bd420 1297 if (restart->futex.flags & FLAGS_SHARED)
c2f9f201 1298 fshared = 1;
cd689985 1299 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1acdac10
TG
1300 restart->futex.bitset,
1301 restart->futex.flags & FLAGS_CLOCKRT);
72c1bbf3
NP
1302}
1303
1304
c87e2837
IM
1305/*
1306 * Userspace tried a 0 -> TID atomic transition of the futex value
1307 * and failed. The kernel side here does the whole locking operation:
1308 * if there are waiters then it will block, it does PI, etc. (Due to
1309 * races the kernel might see a 0 value of the futex too.)
1310 */
c2f9f201 1311static int futex_lock_pi(u32 __user *uaddr, int fshared,
34f01cc1 1312 int detect, ktime_t *time, int trylock)
c87e2837 1313{
c5780e97 1314 struct hrtimer_sleeper timeout, *to = NULL;
c87e2837
IM
1315 struct task_struct *curr = current;
1316 struct futex_hash_bucket *hb;
1317 u32 uval, newval, curval;
1318 struct futex_q q;
e4dc5b7a 1319 int ret, lock_taken, ownerdied = 0;
c87e2837
IM
1320
1321 if (refill_pi_state_cache())
1322 return -ENOMEM;
1323
c19384b5 1324 if (time) {
c5780e97 1325 to = &timeout;
237fc6e7
TG
1326 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1327 HRTIMER_MODE_ABS);
c5780e97 1328 hrtimer_init_sleeper(to, current);
cc584b21 1329 hrtimer_set_expires(&to->timer, *time);
c5780e97
TG
1330 }
1331
c87e2837 1332 q.pi_state = NULL;
42d35d48 1333retry:
38d47c1b 1334 q.key = FUTEX_KEY_INIT;
64d1304a 1335 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
c87e2837 1336 if (unlikely(ret != 0))
42d35d48 1337 goto out;
c87e2837 1338
e4dc5b7a 1339retry_private:
82af7aca 1340 hb = queue_lock(&q);
c87e2837 1341
42d35d48 1342retry_locked:
778e9a9c 1343 ret = lock_taken = 0;
d0aa7a70 1344
c87e2837
IM
1345 /*
1346 * To avoid races, we attempt to take the lock here again
1347 * (by doing a 0 -> TID atomic cmpxchg), while holding all
1348 * the locks. It will most likely not succeed.
1349 */
b488893a 1350 newval = task_pid_vnr(current);
c87e2837 1351
36cf3b5c 1352 curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
c87e2837
IM
1353
1354 if (unlikely(curval == -EFAULT))
1355 goto uaddr_faulted;
1356
778e9a9c
AK
1357 /*
1358 * Detect deadlocks. In case of REQUEUE_PI this is a valid
1359 * situation and we return success to user space.
1360 */
b488893a 1361 if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
bd197234 1362 ret = -EDEADLK;
42d35d48 1363 goto out_unlock_put_key;
c87e2837
IM
1364 }
1365
1366 /*
778e9a9c 1367 * Surprise - we got the lock. Just return to userspace:
c87e2837
IM
1368 */
1369 if (unlikely(!curval))
42d35d48 1370 goto out_unlock_put_key;
c87e2837
IM
1371
1372 uval = curval;
778e9a9c 1373
d0aa7a70 1374 /*
778e9a9c
AK
1375 * Set the WAITERS flag, so the owner will know it has someone
1376 * to wake at next unlock
d0aa7a70 1377 */
778e9a9c
AK
1378 newval = curval | FUTEX_WAITERS;
1379
1380 /*
1381 * There are two cases, where a futex might have no owner (the
bd197234
TG
1382 * owner TID is 0): OWNER_DIED. We take over the futex in this
1383 * case. We also do an unconditional take over, when the owner
1384 * of the futex died.
778e9a9c
AK
1385 *
1386 * This is safe as we are protected by the hash bucket lock !
1387 */
1388 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
bd197234 1389 /* Keep the OWNER_DIED bit */
b488893a 1390 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
778e9a9c
AK
1391 ownerdied = 0;
1392 lock_taken = 1;
1393 }
c87e2837 1394
36cf3b5c 1395 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
c87e2837
IM
1396
1397 if (unlikely(curval == -EFAULT))
1398 goto uaddr_faulted;
1399 if (unlikely(curval != uval))
1400 goto retry_locked;
1401
778e9a9c 1402 /*
bd197234 1403 * We took the lock due to owner died take over.
778e9a9c 1404 */
bd197234 1405 if (unlikely(lock_taken))
42d35d48 1406 goto out_unlock_put_key;
d0aa7a70 1407
c87e2837
IM
1408 /*
1409 * We dont have the lock. Look up the PI state (or create it if
1410 * we are the first waiter):
1411 */
d0aa7a70 1412 ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
c87e2837
IM
1413
1414 if (unlikely(ret)) {
778e9a9c 1415 switch (ret) {
c87e2837 1416
778e9a9c
AK
1417 case -EAGAIN:
1418 /*
1419 * Task is exiting and we just wait for the
1420 * exit to complete.
1421 */
1422 queue_unlock(&q, hb);
de87fcc1 1423 put_futex_key(fshared, &q.key);
778e9a9c
AK
1424 cond_resched();
1425 goto retry;
c87e2837 1426
778e9a9c
AK
1427 case -ESRCH:
1428 /*
1429 * No owner found for this futex. Check if the
1430 * OWNER_DIED bit is set to figure out whether
1431 * this is a robust futex or not.
1432 */
1433 if (get_futex_value_locked(&curval, uaddr))
c87e2837 1434 goto uaddr_faulted;
778e9a9c
AK
1435
1436 /*
1437 * We simply start over in case of a robust
1438 * futex. The code above will take the futex
1439 * and return happy.
1440 */
1441 if (curval & FUTEX_OWNER_DIED) {
1442 ownerdied = 1;
c87e2837 1443 goto retry_locked;
778e9a9c
AK
1444 }
1445 default:
42d35d48 1446 goto out_unlock_put_key;
c87e2837 1447 }
c87e2837
IM
1448 }
1449
1450 /*
1451 * Only actually queue now that the atomic ops are done:
1452 */
82af7aca 1453 queue_me(&q, hb);
c87e2837 1454
c87e2837
IM
1455 WARN_ON(!q.pi_state);
1456 /*
1457 * Block on the PI mutex:
1458 */
1459 if (!trylock)
1460 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1461 else {
1462 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1463 /* Fixup the trylock return value: */
1464 ret = ret ? 0 : -EWOULDBLOCK;
1465 }
1466
a99e4e41 1467 spin_lock(q.lock_ptr);
c87e2837 1468
778e9a9c
AK
1469 if (!ret) {
1470 /*
1471 * Got the lock. We might not be the anticipated owner
1472 * if we did a lock-steal - fix up the PI-state in
1473 * that case:
1474 */
1475 if (q.pi_state->owner != curr)
1b7558e4 1476 ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
778e9a9c 1477 } else {
c87e2837
IM
1478 /*
1479 * Catch the rare case, where the lock was released
778e9a9c
AK
1480 * when we were on the way back before we locked the
1481 * hash bucket.
c87e2837 1482 */
cdf71a10
TG
1483 if (q.pi_state->owner == curr) {
1484 /*
1485 * Try to get the rt_mutex now. This might
1486 * fail as some other task acquired the
1487 * rt_mutex after we removed ourself from the
1488 * rt_mutex waiters list.
1489 */
1490 if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1491 ret = 0;
1492 else {
1493 /*
1494 * pi_state is incorrect, some other
1495 * task did a lock steal and we
1496 * returned due to timeout or signal
1497 * without taking the rt_mutex. Too
1498 * late. We can access the
1499 * rt_mutex_owner without locking, as
1500 * the other task is now blocked on
1501 * the hash bucket lock. Fix the state
1502 * up.
1503 */
1504 struct task_struct *owner;
1505 int res;
1506
1507 owner = rt_mutex_owner(&q.pi_state->pi_mutex);
1b7558e4
TG
1508 res = fixup_pi_state_owner(uaddr, &q, owner,
1509 fshared);
cdf71a10 1510
cdf71a10
TG
1511 /* propagate -EFAULT, if the fixup failed */
1512 if (res)
1513 ret = res;
1514 }
778e9a9c
AK
1515 } else {
1516 /*
1517 * Paranoia check. If we did not take the lock
1518 * in the trylock above, then we should not be
1519 * the owner of the rtmutex, neither the real
1520 * nor the pending one:
1521 */
1522 if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
1523 printk(KERN_ERR "futex_lock_pi: ret = %d "
1524 "pi-mutex: %p pi-state %p\n", ret,
1525 q.pi_state->pi_mutex.owner,
1526 q.pi_state->owner);
c87e2837 1527 }
c87e2837
IM
1528 }
1529
e8f6386c
DH
1530 /*
1531 * If fixup_pi_state_owner() faulted and was unable to handle the
1532 * fault, unlock it and return the fault to userspace.
1533 */
1534 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1535 rt_mutex_unlock(&q.pi_state->pi_mutex);
1536
778e9a9c
AK
1537 /* Unqueue and drop the lock */
1538 unqueue_me_pi(&q);
c87e2837 1539
237fc6e7
TG
1540 if (to)
1541 destroy_hrtimer_on_stack(&to->timer);
c5780e97 1542 return ret != -EINTR ? ret : -ERESTARTNOINTR;
c87e2837 1543
42d35d48 1544out_unlock_put_key:
c87e2837
IM
1545 queue_unlock(&q, hb);
1546
42d35d48 1547out_put_key:
38d47c1b 1548 put_futex_key(fshared, &q.key);
42d35d48 1549out:
237fc6e7
TG
1550 if (to)
1551 destroy_hrtimer_on_stack(&to->timer);
c87e2837
IM
1552 return ret;
1553
42d35d48 1554uaddr_faulted:
c87e2837 1555 /*
b5686363
DH
1556 * We have to r/w *(int __user *)uaddr, and we have to modify it
1557 * atomically. Therefore, if we continue to fault after get_user()
1558 * below, we need to handle the fault ourselves, while still holding
1559 * the mmap_sem. This can occur if the uaddr is under contention as
1560 * we have to drop the mmap_sem in order to call get_user().
c87e2837 1561 */
778e9a9c
AK
1562 queue_unlock(&q, hb);
1563
c87e2837 1564 ret = get_user(uval, uaddr);
e4dc5b7a
DH
1565 if (ret)
1566 goto out_put_key;
c87e2837 1567
e4dc5b7a
DH
1568 if (!fshared)
1569 goto retry_private;
1570
1571 put_futex_key(fshared, &q.key);
1572 goto retry;
c87e2837
IM
1573}
1574
de87fcc1 1575
c87e2837
IM
1576/*
1577 * Userspace attempted a TID -> 0 atomic transition, and failed.
1578 * This is the in-kernel slowpath: we look up the PI state (if any),
1579 * and do the rt-mutex unlock.
1580 */
c2f9f201 1581static int futex_unlock_pi(u32 __user *uaddr, int fshared)
c87e2837
IM
1582{
1583 struct futex_hash_bucket *hb;
1584 struct futex_q *this, *next;
1585 u32 uval;
ec92d082 1586 struct plist_head *head;
38d47c1b 1587 union futex_key key = FUTEX_KEY_INIT;
e4dc5b7a 1588 int ret;
c87e2837
IM
1589
1590retry:
1591 if (get_user(uval, uaddr))
1592 return -EFAULT;
1593 /*
1594 * We release only a lock we actually own:
1595 */
b488893a 1596 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
c87e2837 1597 return -EPERM;
c87e2837 1598
64d1304a 1599 ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
c87e2837
IM
1600 if (unlikely(ret != 0))
1601 goto out;
1602
1603 hb = hash_futex(&key);
1604 spin_lock(&hb->lock);
1605
c87e2837
IM
1606 /*
1607 * To avoid races, try to do the TID -> 0 atomic transition
1608 * again. If it succeeds then we can return without waking
1609 * anyone else up:
1610 */
36cf3b5c 1611 if (!(uval & FUTEX_OWNER_DIED))
b488893a 1612 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
36cf3b5c 1613
c87e2837
IM
1614
1615 if (unlikely(uval == -EFAULT))
1616 goto pi_faulted;
1617 /*
1618 * Rare case: we managed to release the lock atomically,
1619 * no need to wake anyone else up:
1620 */
b488893a 1621 if (unlikely(uval == task_pid_vnr(current)))
c87e2837
IM
1622 goto out_unlock;
1623
1624 /*
1625 * Ok, other tasks may need to be woken up - check waiters
1626 * and do the wakeup if necessary:
1627 */
1628 head = &hb->chain;
1629
ec92d082 1630 plist_for_each_entry_safe(this, next, head, list) {
c87e2837
IM
1631 if (!match_futex (&this->key, &key))
1632 continue;
1633 ret = wake_futex_pi(uaddr, uval, this);
1634 /*
1635 * The atomic access to the futex value
1636 * generated a pagefault, so retry the
1637 * user-access and the wakeup:
1638 */
1639 if (ret == -EFAULT)
1640 goto pi_faulted;
1641 goto out_unlock;
1642 }
1643 /*
1644 * No waiters - kernel unlocks the futex:
1645 */
e3f2ddea
IM
1646 if (!(uval & FUTEX_OWNER_DIED)) {
1647 ret = unlock_futex_pi(uaddr, uval);
1648 if (ret == -EFAULT)
1649 goto pi_faulted;
1650 }
c87e2837
IM
1651
1652out_unlock:
1653 spin_unlock(&hb->lock);
38d47c1b 1654 put_futex_key(fshared, &key);
c87e2837 1655
42d35d48 1656out:
c87e2837
IM
1657 return ret;
1658
1659pi_faulted:
1660 /*
b5686363
DH
1661 * We have to r/w *(int __user *)uaddr, and we have to modify it
1662 * atomically. Therefore, if we continue to fault after get_user()
1663 * below, we need to handle the fault ourselves, while still holding
1664 * the mmap_sem. This can occur if the uaddr is under contention as
1665 * we have to drop the mmap_sem in order to call get_user().
c87e2837 1666 */
778e9a9c 1667 spin_unlock(&hb->lock);
e4dc5b7a 1668 put_futex_key(fshared, &key);
c87e2837 1669
c87e2837 1670 ret = get_user(uval, uaddr);
b5686363 1671 if (!ret)
c87e2837
IM
1672 goto retry;
1673
1da177e4
LT
1674 return ret;
1675}
1676
0771dfef
IM
1677/*
1678 * Support for robust futexes: the kernel cleans up held futexes at
1679 * thread exit time.
1680 *
1681 * Implementation: user-space maintains a per-thread list of locks it
1682 * is holding. Upon do_exit(), the kernel carefully walks this list,
1683 * and marks all locks that are owned by this thread with the
c87e2837 1684 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
0771dfef
IM
1685 * always manipulated with the lock held, so the list is private and
1686 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1687 * field, to allow the kernel to clean up if the thread dies after
1688 * acquiring the lock, but just before it could have added itself to
1689 * the list. There can only be one such pending lock.
1690 */
1691
1692/**
1693 * sys_set_robust_list - set the robust-futex list head of a task
1694 * @head: pointer to the list-head
1695 * @len: length of the list-head, as userspace expects
1696 */
836f92ad
HC
1697SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
1698 size_t, len)
0771dfef 1699{
a0c1e907
TG
1700 if (!futex_cmpxchg_enabled)
1701 return -ENOSYS;
0771dfef
IM
1702 /*
1703 * The kernel knows only one size for now:
1704 */
1705 if (unlikely(len != sizeof(*head)))
1706 return -EINVAL;
1707
1708 current->robust_list = head;
1709
1710 return 0;
1711}
1712
1713/**
1714 * sys_get_robust_list - get the robust-futex list head of a task
1715 * @pid: pid of the process [zero for current task]
1716 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1717 * @len_ptr: pointer to a length field, the kernel fills in the header size
1718 */
836f92ad
HC
1719SYSCALL_DEFINE3(get_robust_list, int, pid,
1720 struct robust_list_head __user * __user *, head_ptr,
1721 size_t __user *, len_ptr)
0771dfef 1722{
ba46df98 1723 struct robust_list_head __user *head;
0771dfef 1724 unsigned long ret;
c69e8d9c 1725 const struct cred *cred = current_cred(), *pcred;
0771dfef 1726
a0c1e907
TG
1727 if (!futex_cmpxchg_enabled)
1728 return -ENOSYS;
1729
0771dfef
IM
1730 if (!pid)
1731 head = current->robust_list;
1732 else {
1733 struct task_struct *p;
1734
1735 ret = -ESRCH;
aaa2a97e 1736 rcu_read_lock();
228ebcbe 1737 p = find_task_by_vpid(pid);
0771dfef
IM
1738 if (!p)
1739 goto err_unlock;
1740 ret = -EPERM;
c69e8d9c
DH
1741 pcred = __task_cred(p);
1742 if (cred->euid != pcred->euid &&
1743 cred->euid != pcred->uid &&
76aac0e9 1744 !capable(CAP_SYS_PTRACE))
0771dfef
IM
1745 goto err_unlock;
1746 head = p->robust_list;
aaa2a97e 1747 rcu_read_unlock();
0771dfef
IM
1748 }
1749
1750 if (put_user(sizeof(*head), len_ptr))
1751 return -EFAULT;
1752 return put_user(head, head_ptr);
1753
1754err_unlock:
aaa2a97e 1755 rcu_read_unlock();
0771dfef
IM
1756
1757 return ret;
1758}
1759
1760/*
1761 * Process a futex-list entry, check whether it's owned by the
1762 * dying task, and do notification if so:
1763 */
e3f2ddea 1764int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
0771dfef 1765{
e3f2ddea 1766 u32 uval, nval, mval;
0771dfef 1767
8f17d3a5
IM
1768retry:
1769 if (get_user(uval, uaddr))
0771dfef
IM
1770 return -1;
1771
b488893a 1772 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
0771dfef
IM
1773 /*
1774 * Ok, this dying thread is truly holding a futex
1775 * of interest. Set the OWNER_DIED bit atomically
1776 * via cmpxchg, and if the value had FUTEX_WAITERS
1777 * set, wake up a waiter (if any). (We have to do a
1778 * futex_wake() even if OWNER_DIED is already set -
1779 * to handle the rare but possible case of recursive
1780 * thread-death.) The rest of the cleanup is done in
1781 * userspace.
1782 */
e3f2ddea
IM
1783 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1784 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1785
c87e2837
IM
1786 if (nval == -EFAULT)
1787 return -1;
1788
1789 if (nval != uval)
8f17d3a5 1790 goto retry;
0771dfef 1791
e3f2ddea
IM
1792 /*
1793 * Wake robust non-PI futexes here. The wakeup of
1794 * PI futexes happens in exit_pi_state():
1795 */
36cf3b5c 1796 if (!pi && (uval & FUTEX_WAITERS))
c2f9f201 1797 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
0771dfef
IM
1798 }
1799 return 0;
1800}
1801
e3f2ddea
IM
1802/*
1803 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1804 */
1805static inline int fetch_robust_entry(struct robust_list __user **entry,
ba46df98
AV
1806 struct robust_list __user * __user *head,
1807 int *pi)
e3f2ddea
IM
1808{
1809 unsigned long uentry;
1810
ba46df98 1811 if (get_user(uentry, (unsigned long __user *)head))
e3f2ddea
IM
1812 return -EFAULT;
1813
ba46df98 1814 *entry = (void __user *)(uentry & ~1UL);
e3f2ddea
IM
1815 *pi = uentry & 1;
1816
1817 return 0;
1818}
1819
0771dfef
IM
1820/*
1821 * Walk curr->robust_list (very carefully, it's a userspace list!)
1822 * and mark any locks found there dead, and notify any waiters.
1823 *
1824 * We silently return on any sign of list-walking problem.
1825 */
1826void exit_robust_list(struct task_struct *curr)
1827{
1828 struct robust_list_head __user *head = curr->robust_list;
9f96cb1e
MS
1829 struct robust_list __user *entry, *next_entry, *pending;
1830 unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
0771dfef 1831 unsigned long futex_offset;
9f96cb1e 1832 int rc;
0771dfef 1833
a0c1e907
TG
1834 if (!futex_cmpxchg_enabled)
1835 return;
1836
0771dfef
IM
1837 /*
1838 * Fetch the list head (which was registered earlier, via
1839 * sys_set_robust_list()):
1840 */
e3f2ddea 1841 if (fetch_robust_entry(&entry, &head->list.next, &pi))
0771dfef
IM
1842 return;
1843 /*
1844 * Fetch the relative futex offset:
1845 */
1846 if (get_user(futex_offset, &head->futex_offset))
1847 return;
1848 /*
1849 * Fetch any possibly pending lock-add first, and handle it
1850 * if it exists:
1851 */
e3f2ddea 1852 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
0771dfef 1853 return;
e3f2ddea 1854
9f96cb1e 1855 next_entry = NULL; /* avoid warning with gcc */
0771dfef 1856 while (entry != &head->list) {
9f96cb1e
MS
1857 /*
1858 * Fetch the next entry in the list before calling
1859 * handle_futex_death:
1860 */
1861 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
0771dfef
IM
1862 /*
1863 * A pending lock might already be on the list, so
c87e2837 1864 * don't process it twice:
0771dfef
IM
1865 */
1866 if (entry != pending)
ba46df98 1867 if (handle_futex_death((void __user *)entry + futex_offset,
e3f2ddea 1868 curr, pi))
0771dfef 1869 return;
9f96cb1e 1870 if (rc)
0771dfef 1871 return;
9f96cb1e
MS
1872 entry = next_entry;
1873 pi = next_pi;
0771dfef
IM
1874 /*
1875 * Avoid excessively long or circular lists:
1876 */
1877 if (!--limit)
1878 break;
1879
1880 cond_resched();
1881 }
9f96cb1e
MS
1882
1883 if (pending)
1884 handle_futex_death((void __user *)pending + futex_offset,
1885 curr, pip);
0771dfef
IM
1886}
1887
c19384b5 1888long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
e2970f2f 1889 u32 __user *uaddr2, u32 val2, u32 val3)
1da177e4 1890{
1acdac10 1891 int clockrt, ret = -ENOSYS;
34f01cc1 1892 int cmd = op & FUTEX_CMD_MASK;
c2f9f201 1893 int fshared = 0;
34f01cc1
ED
1894
1895 if (!(op & FUTEX_PRIVATE_FLAG))
c2f9f201 1896 fshared = 1;
1da177e4 1897
1acdac10
TG
1898 clockrt = op & FUTEX_CLOCK_REALTIME;
1899 if (clockrt && cmd != FUTEX_WAIT_BITSET)
1900 return -ENOSYS;
1da177e4 1901
34f01cc1 1902 switch (cmd) {
1da177e4 1903 case FUTEX_WAIT:
cd689985
TG
1904 val3 = FUTEX_BITSET_MATCH_ANY;
1905 case FUTEX_WAIT_BITSET:
1acdac10 1906 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
1da177e4
LT
1907 break;
1908 case FUTEX_WAKE:
cd689985
TG
1909 val3 = FUTEX_BITSET_MATCH_ANY;
1910 case FUTEX_WAKE_BITSET:
1911 ret = futex_wake(uaddr, fshared, val, val3);
1da177e4 1912 break;
1da177e4 1913 case FUTEX_REQUEUE:
34f01cc1 1914 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
1da177e4
LT
1915 break;
1916 case FUTEX_CMP_REQUEUE:
34f01cc1 1917 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
1da177e4 1918 break;
4732efbe 1919 case FUTEX_WAKE_OP:
34f01cc1 1920 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
4732efbe 1921 break;
c87e2837 1922 case FUTEX_LOCK_PI:
a0c1e907
TG
1923 if (futex_cmpxchg_enabled)
1924 ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
c87e2837
IM
1925 break;
1926 case FUTEX_UNLOCK_PI:
a0c1e907
TG
1927 if (futex_cmpxchg_enabled)
1928 ret = futex_unlock_pi(uaddr, fshared);
c87e2837
IM
1929 break;
1930 case FUTEX_TRYLOCK_PI:
a0c1e907
TG
1931 if (futex_cmpxchg_enabled)
1932 ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
c87e2837 1933 break;
1da177e4
LT
1934 default:
1935 ret = -ENOSYS;
1936 }
1937 return ret;
1938}
1939
1940
17da2bd9
HC
1941SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1942 struct timespec __user *, utime, u32 __user *, uaddr2,
1943 u32, val3)
1da177e4 1944{
c19384b5
PP
1945 struct timespec ts;
1946 ktime_t t, *tp = NULL;
e2970f2f 1947 u32 val2 = 0;
34f01cc1 1948 int cmd = op & FUTEX_CMD_MASK;
1da177e4 1949
cd689985
TG
1950 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
1951 cmd == FUTEX_WAIT_BITSET)) {
c19384b5 1952 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
1da177e4 1953 return -EFAULT;
c19384b5 1954 if (!timespec_valid(&ts))
9741ef96 1955 return -EINVAL;
c19384b5
PP
1956
1957 t = timespec_to_ktime(ts);
34f01cc1 1958 if (cmd == FUTEX_WAIT)
5a7780e7 1959 t = ktime_add_safe(ktime_get(), t);
c19384b5 1960 tp = &t;
1da177e4
LT
1961 }
1962 /*
34f01cc1 1963 * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
f54f0986 1964 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
1da177e4 1965 */
f54f0986
AS
1966 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
1967 cmd == FUTEX_WAKE_OP)
e2970f2f 1968 val2 = (u32) (unsigned long) utime;
1da177e4 1969
c19384b5 1970 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
1da177e4
LT
1971}
1972
f6d107fb 1973static int __init futex_init(void)
1da177e4 1974{
a0c1e907 1975 u32 curval;
3e4ab747 1976 int i;
95362fa9 1977
a0c1e907
TG
1978 /*
1979 * This will fail and we want it. Some arch implementations do
1980 * runtime detection of the futex_atomic_cmpxchg_inatomic()
1981 * functionality. We want to know that before we call in any
1982 * of the complex code paths. Also we want to prevent
1983 * registration of robust lists in that case. NULL is
1984 * guaranteed to fault and we get -EFAULT on functional
1985 * implementation, the non functional ones will return
1986 * -ENOSYS.
1987 */
1988 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
1989 if (curval == -EFAULT)
1990 futex_cmpxchg_enabled = 1;
1991
3e4ab747
TG
1992 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
1993 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
1994 spin_lock_init(&futex_queues[i].lock);
1995 }
1996
1da177e4
LT
1997 return 0;
1998}
f6d107fb 1999__initcall(futex_init);