Merge branches 'v3.10/topic/be' and 'v3.10/topic/arm64-be' of git://git.linaro.org...
[firefly-linux-kernel-4.4.55.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/export.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62 #include <linux/ptrace.h>
63 #include <linux/sched/rt.h>
64 #include <linux/hugetlb.h>
65
66 #include <asm/futex.h>
67
68 #include "rtmutex_common.h"
69
70 int __read_mostly futex_cmpxchg_enabled;
71
72 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
73
74 /*
75  * Futex flags used to encode options to functions and preserve them across
76  * restarts.
77  */
78 #define FLAGS_SHARED            0x01
79 #define FLAGS_CLOCKRT           0x02
80 #define FLAGS_HAS_TIMEOUT       0x04
81
82 /*
83  * Priority Inheritance state:
84  */
85 struct futex_pi_state {
86         /*
87          * list of 'owned' pi_state instances - these have to be
88          * cleaned up in do_exit() if the task exits prematurely:
89          */
90         struct list_head list;
91
92         /*
93          * The PI object:
94          */
95         struct rt_mutex pi_mutex;
96
97         struct task_struct *owner;
98         atomic_t refcount;
99
100         union futex_key key;
101 };
102
103 /**
104  * struct futex_q - The hashed futex queue entry, one per waiting task
105  * @list:               priority-sorted list of tasks waiting on this futex
106  * @task:               the task waiting on the futex
107  * @lock_ptr:           the hash bucket lock
108  * @key:                the key the futex is hashed on
109  * @pi_state:           optional priority inheritance state
110  * @rt_waiter:          rt_waiter storage for use with requeue_pi
111  * @requeue_pi_key:     the requeue_pi target futex key
112  * @bitset:             bitset for the optional bitmasked wakeup
113  *
114  * We use this hashed waitqueue, instead of a normal wait_queue_t, so
115  * we can wake only the relevant ones (hashed queues may be shared).
116  *
117  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
118  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
119  * The order of wakeup is always to make the first condition true, then
120  * the second.
121  *
122  * PI futexes are typically woken before they are removed from the hash list via
123  * the rt_mutex code. See unqueue_me_pi().
124  */
125 struct futex_q {
126         struct plist_node list;
127
128         struct task_struct *task;
129         spinlock_t *lock_ptr;
130         union futex_key key;
131         struct futex_pi_state *pi_state;
132         struct rt_mutex_waiter *rt_waiter;
133         union futex_key *requeue_pi_key;
134         u32 bitset;
135 };
136
137 static const struct futex_q futex_q_init = {
138         /* list gets initialized in queue_me()*/
139         .key = FUTEX_KEY_INIT,
140         .bitset = FUTEX_BITSET_MATCH_ANY
141 };
142
143 /*
144  * Hash buckets are shared by all the futex_keys that hash to the same
145  * location.  Each key may have multiple futex_q structures, one for each task
146  * waiting on a futex.
147  */
148 struct futex_hash_bucket {
149         spinlock_t lock;
150         struct plist_head chain;
151 };
152
153 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
154
155 /*
156  * We hash on the keys returned from get_futex_key (see below).
157  */
158 static struct futex_hash_bucket *hash_futex(union futex_key *key)
159 {
160         u32 hash = jhash2((u32*)&key->both.word,
161                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
162                           key->both.offset);
163         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
164 }
165
166 /*
167  * Return 1 if two futex_keys are equal, 0 otherwise.
168  */
169 static inline int match_futex(union futex_key *key1, union futex_key *key2)
170 {
171         return (key1 && key2
172                 && key1->both.word == key2->both.word
173                 && key1->both.ptr == key2->both.ptr
174                 && key1->both.offset == key2->both.offset);
175 }
176
177 /*
178  * Take a reference to the resource addressed by a key.
179  * Can be called while holding spinlocks.
180  *
181  */
182 static void get_futex_key_refs(union futex_key *key)
183 {
184         if (!key->both.ptr)
185                 return;
186
187         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
188         case FUT_OFF_INODE:
189                 ihold(key->shared.inode);
190                 break;
191         case FUT_OFF_MMSHARED:
192                 atomic_inc(&key->private.mm->mm_count);
193                 break;
194         }
195 }
196
197 /*
198  * Drop a reference to the resource addressed by a key.
199  * The hash bucket spinlock must not be held.
200  */
201 static void drop_futex_key_refs(union futex_key *key)
202 {
203         if (!key->both.ptr) {
204                 /* If we're here then we tried to put a key we failed to get */
205                 WARN_ON_ONCE(1);
206                 return;
207         }
208
209         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
210         case FUT_OFF_INODE:
211                 iput(key->shared.inode);
212                 break;
213         case FUT_OFF_MMSHARED:
214                 mmdrop(key->private.mm);
215                 break;
216         }
217 }
218
219 /**
220  * get_futex_key() - Get parameters which are the keys for a futex
221  * @uaddr:      virtual address of the futex
222  * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
223  * @key:        address where result is stored.
224  * @rw:         mapping needs to be read/write (values: VERIFY_READ,
225  *              VERIFY_WRITE)
226  *
227  * Return: a negative error code or 0
228  *
229  * The key words are stored in *key on success.
230  *
231  * For shared mappings, it's (page->index, file_inode(vma->vm_file),
232  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
233  * We can usually work out the index without swapping in the page.
234  *
235  * lock_page() might sleep, the caller should not hold a spinlock.
236  */
237 static int
238 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
239 {
240         unsigned long address = (unsigned long)uaddr;
241         struct mm_struct *mm = current->mm;
242         struct page *page, *page_head;
243         int err, ro = 0;
244
245         /*
246          * The futex address must be "naturally" aligned.
247          */
248         key->both.offset = address % PAGE_SIZE;
249         if (unlikely((address % sizeof(u32)) != 0))
250                 return -EINVAL;
251         address -= key->both.offset;
252
253         /*
254          * PROCESS_PRIVATE futexes are fast.
255          * As the mm cannot disappear under us and the 'key' only needs
256          * virtual address, we dont even have to find the underlying vma.
257          * Note : We do have to check 'uaddr' is a valid user address,
258          *        but access_ok() should be faster than find_vma()
259          */
260         if (!fshared) {
261                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
262                         return -EFAULT;
263                 key->private.mm = mm;
264                 key->private.address = address;
265                 get_futex_key_refs(key);
266                 return 0;
267         }
268
269 again:
270         err = get_user_pages_fast(address, 1, 1, &page);
271         /*
272          * If write access is not required (eg. FUTEX_WAIT), try
273          * and get read-only access.
274          */
275         if (err == -EFAULT && rw == VERIFY_READ) {
276                 err = get_user_pages_fast(address, 1, 0, &page);
277                 ro = 1;
278         }
279         if (err < 0)
280                 return err;
281         else
282                 err = 0;
283
284 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
285         page_head = page;
286         if (unlikely(PageTail(page))) {
287                 put_page(page);
288                 /* serialize against __split_huge_page_splitting() */
289                 local_irq_disable();
290                 if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
291                         page_head = compound_head(page);
292                         /*
293                          * page_head is valid pointer but we must pin
294                          * it before taking the PG_lock and/or
295                          * PG_compound_lock. The moment we re-enable
296                          * irqs __split_huge_page_splitting() can
297                          * return and the head page can be freed from
298                          * under us. We can't take the PG_lock and/or
299                          * PG_compound_lock on a page that could be
300                          * freed from under us.
301                          */
302                         if (page != page_head) {
303                                 get_page(page_head);
304                                 put_page(page);
305                         }
306                         local_irq_enable();
307                 } else {
308                         local_irq_enable();
309                         goto again;
310                 }
311         }
312 #else
313         page_head = compound_head(page);
314         if (page != page_head) {
315                 get_page(page_head);
316                 put_page(page);
317         }
318 #endif
319
320         lock_page(page_head);
321
322         /*
323          * If page_head->mapping is NULL, then it cannot be a PageAnon
324          * page; but it might be the ZERO_PAGE or in the gate area or
325          * in a special mapping (all cases which we are happy to fail);
326          * or it may have been a good file page when get_user_pages_fast
327          * found it, but truncated or holepunched or subjected to
328          * invalidate_complete_page2 before we got the page lock (also
329          * cases which we are happy to fail).  And we hold a reference,
330          * so refcount care in invalidate_complete_page's remove_mapping
331          * prevents drop_caches from setting mapping to NULL beneath us.
332          *
333          * The case we do have to guard against is when memory pressure made
334          * shmem_writepage move it from filecache to swapcache beneath us:
335          * an unlikely race, but we do need to retry for page_head->mapping.
336          */
337         if (!page_head->mapping) {
338                 int shmem_swizzled = PageSwapCache(page_head);
339                 unlock_page(page_head);
340                 put_page(page_head);
341                 if (shmem_swizzled)
342                         goto again;
343                 return -EFAULT;
344         }
345
346         /*
347          * Private mappings are handled in a simple way.
348          *
349          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
350          * it's a read-only handle, it's expected that futexes attach to
351          * the object not the particular process.
352          */
353         if (PageAnon(page_head)) {
354                 /*
355                  * A RO anonymous page will never change and thus doesn't make
356                  * sense for futex operations.
357                  */
358                 if (ro) {
359                         err = -EFAULT;
360                         goto out;
361                 }
362
363                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
364                 key->private.mm = mm;
365                 key->private.address = address;
366         } else {
367                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
368                 key->shared.inode = page_head->mapping->host;
369                 key->shared.pgoff = basepage_index(page);
370         }
371
372         get_futex_key_refs(key);
373
374 out:
375         unlock_page(page_head);
376         put_page(page_head);
377         return err;
378 }
379
380 static inline void put_futex_key(union futex_key *key)
381 {
382         drop_futex_key_refs(key);
383 }
384
385 /**
386  * fault_in_user_writeable() - Fault in user address and verify RW access
387  * @uaddr:      pointer to faulting user space address
388  *
389  * Slow path to fixup the fault we just took in the atomic write
390  * access to @uaddr.
391  *
392  * We have no generic implementation of a non-destructive write to the
393  * user address. We know that we faulted in the atomic pagefault
394  * disabled section so we can as well avoid the #PF overhead by
395  * calling get_user_pages() right away.
396  */
397 static int fault_in_user_writeable(u32 __user *uaddr)
398 {
399         struct mm_struct *mm = current->mm;
400         int ret;
401
402         down_read(&mm->mmap_sem);
403         ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
404                                FAULT_FLAG_WRITE);
405         up_read(&mm->mmap_sem);
406
407         return ret < 0 ? ret : 0;
408 }
409
410 /**
411  * futex_top_waiter() - Return the highest priority waiter on a futex
412  * @hb:         the hash bucket the futex_q's reside in
413  * @key:        the futex key (to distinguish it from other futex futex_q's)
414  *
415  * Must be called with the hb lock held.
416  */
417 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
418                                         union futex_key *key)
419 {
420         struct futex_q *this;
421
422         plist_for_each_entry(this, &hb->chain, list) {
423                 if (match_futex(&this->key, key))
424                         return this;
425         }
426         return NULL;
427 }
428
429 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
430                                       u32 uval, u32 newval)
431 {
432         int ret;
433
434         pagefault_disable();
435         ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
436         pagefault_enable();
437
438         return ret;
439 }
440
441 static int get_futex_value_locked(u32 *dest, u32 __user *from)
442 {
443         int ret;
444
445         pagefault_disable();
446         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
447         pagefault_enable();
448
449         return ret ? -EFAULT : 0;
450 }
451
452
453 /*
454  * PI code:
455  */
456 static int refill_pi_state_cache(void)
457 {
458         struct futex_pi_state *pi_state;
459
460         if (likely(current->pi_state_cache))
461                 return 0;
462
463         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
464
465         if (!pi_state)
466                 return -ENOMEM;
467
468         INIT_LIST_HEAD(&pi_state->list);
469         /* pi_mutex gets initialized later */
470         pi_state->owner = NULL;
471         atomic_set(&pi_state->refcount, 1);
472         pi_state->key = FUTEX_KEY_INIT;
473
474         current->pi_state_cache = pi_state;
475
476         return 0;
477 }
478
479 static struct futex_pi_state * alloc_pi_state(void)
480 {
481         struct futex_pi_state *pi_state = current->pi_state_cache;
482
483         WARN_ON(!pi_state);
484         current->pi_state_cache = NULL;
485
486         return pi_state;
487 }
488
489 static void free_pi_state(struct futex_pi_state *pi_state)
490 {
491         if (!atomic_dec_and_test(&pi_state->refcount))
492                 return;
493
494         /*
495          * If pi_state->owner is NULL, the owner is most probably dying
496          * and has cleaned up the pi_state already
497          */
498         if (pi_state->owner) {
499                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
500                 list_del_init(&pi_state->list);
501                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
502
503                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
504         }
505
506         if (current->pi_state_cache)
507                 kfree(pi_state);
508         else {
509                 /*
510                  * pi_state->list is already empty.
511                  * clear pi_state->owner.
512                  * refcount is at 0 - put it back to 1.
513                  */
514                 pi_state->owner = NULL;
515                 atomic_set(&pi_state->refcount, 1);
516                 current->pi_state_cache = pi_state;
517         }
518 }
519
520 /*
521  * Look up the task based on what TID userspace gave us.
522  * We dont trust it.
523  */
524 static struct task_struct * futex_find_get_task(pid_t pid)
525 {
526         struct task_struct *p;
527
528         rcu_read_lock();
529         p = find_task_by_vpid(pid);
530         if (p)
531                 get_task_struct(p);
532
533         rcu_read_unlock();
534
535         return p;
536 }
537
538 /*
539  * This task is holding PI mutexes at exit time => bad.
540  * Kernel cleans up PI-state, but userspace is likely hosed.
541  * (Robust-futex cleanup is separate and might save the day for userspace.)
542  */
543 void exit_pi_state_list(struct task_struct *curr)
544 {
545         struct list_head *next, *head = &curr->pi_state_list;
546         struct futex_pi_state *pi_state;
547         struct futex_hash_bucket *hb;
548         union futex_key key = FUTEX_KEY_INIT;
549
550         if (!futex_cmpxchg_enabled)
551                 return;
552         /*
553          * We are a ZOMBIE and nobody can enqueue itself on
554          * pi_state_list anymore, but we have to be careful
555          * versus waiters unqueueing themselves:
556          */
557         raw_spin_lock_irq(&curr->pi_lock);
558         while (!list_empty(head)) {
559
560                 next = head->next;
561                 pi_state = list_entry(next, struct futex_pi_state, list);
562                 key = pi_state->key;
563                 hb = hash_futex(&key);
564                 raw_spin_unlock_irq(&curr->pi_lock);
565
566                 spin_lock(&hb->lock);
567
568                 raw_spin_lock_irq(&curr->pi_lock);
569                 /*
570                  * We dropped the pi-lock, so re-check whether this
571                  * task still owns the PI-state:
572                  */
573                 if (head->next != next) {
574                         spin_unlock(&hb->lock);
575                         continue;
576                 }
577
578                 WARN_ON(pi_state->owner != curr);
579                 WARN_ON(list_empty(&pi_state->list));
580                 list_del_init(&pi_state->list);
581                 pi_state->owner = NULL;
582                 raw_spin_unlock_irq(&curr->pi_lock);
583
584                 rt_mutex_unlock(&pi_state->pi_mutex);
585
586                 spin_unlock(&hb->lock);
587
588                 raw_spin_lock_irq(&curr->pi_lock);
589         }
590         raw_spin_unlock_irq(&curr->pi_lock);
591 }
592
593 static int
594 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
595                 union futex_key *key, struct futex_pi_state **ps)
596 {
597         struct futex_pi_state *pi_state = NULL;
598         struct futex_q *this, *next;
599         struct plist_head *head;
600         struct task_struct *p;
601         pid_t pid = uval & FUTEX_TID_MASK;
602
603         head = &hb->chain;
604
605         plist_for_each_entry_safe(this, next, head, list) {
606                 if (match_futex(&this->key, key)) {
607                         /*
608                          * Another waiter already exists - bump up
609                          * the refcount and return its pi_state:
610                          */
611                         pi_state = this->pi_state;
612                         /*
613                          * Userspace might have messed up non-PI and PI futexes
614                          */
615                         if (unlikely(!pi_state))
616                                 return -EINVAL;
617
618                         WARN_ON(!atomic_read(&pi_state->refcount));
619
620                         /*
621                          * When pi_state->owner is NULL then the owner died
622                          * and another waiter is on the fly. pi_state->owner
623                          * is fixed up by the task which acquires
624                          * pi_state->rt_mutex.
625                          *
626                          * We do not check for pid == 0 which can happen when
627                          * the owner died and robust_list_exit() cleared the
628                          * TID.
629                          */
630                         if (pid && pi_state->owner) {
631                                 /*
632                                  * Bail out if user space manipulated the
633                                  * futex value.
634                                  */
635                                 if (pid != task_pid_vnr(pi_state->owner))
636                                         return -EINVAL;
637                         }
638
639                         atomic_inc(&pi_state->refcount);
640                         *ps = pi_state;
641
642                         return 0;
643                 }
644         }
645
646         /*
647          * We are the first waiter - try to look up the real owner and attach
648          * the new pi_state to it, but bail out when TID = 0
649          */
650         if (!pid)
651                 return -ESRCH;
652         p = futex_find_get_task(pid);
653         if (!p)
654                 return -ESRCH;
655
656         /*
657          * We need to look at the task state flags to figure out,
658          * whether the task is exiting. To protect against the do_exit
659          * change of the task flags, we do this protected by
660          * p->pi_lock:
661          */
662         raw_spin_lock_irq(&p->pi_lock);
663         if (unlikely(p->flags & PF_EXITING)) {
664                 /*
665                  * The task is on the way out. When PF_EXITPIDONE is
666                  * set, we know that the task has finished the
667                  * cleanup:
668                  */
669                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
670
671                 raw_spin_unlock_irq(&p->pi_lock);
672                 put_task_struct(p);
673                 return ret;
674         }
675
676         pi_state = alloc_pi_state();
677
678         /*
679          * Initialize the pi_mutex in locked state and make 'p'
680          * the owner of it:
681          */
682         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
683
684         /* Store the key for possible exit cleanups: */
685         pi_state->key = *key;
686
687         WARN_ON(!list_empty(&pi_state->list));
688         list_add(&pi_state->list, &p->pi_state_list);
689         pi_state->owner = p;
690         raw_spin_unlock_irq(&p->pi_lock);
691
692         put_task_struct(p);
693
694         *ps = pi_state;
695
696         return 0;
697 }
698
699 /**
700  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
701  * @uaddr:              the pi futex user address
702  * @hb:                 the pi futex hash bucket
703  * @key:                the futex key associated with uaddr and hb
704  * @ps:                 the pi_state pointer where we store the result of the
705  *                      lookup
706  * @task:               the task to perform the atomic lock work for.  This will
707  *                      be "current" except in the case of requeue pi.
708  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
709  *
710  * Return:
711  *  0 - ready to wait;
712  *  1 - acquired the lock;
713  * <0 - error
714  *
715  * The hb->lock and futex_key refs shall be held by the caller.
716  */
717 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
718                                 union futex_key *key,
719                                 struct futex_pi_state **ps,
720                                 struct task_struct *task, int set_waiters)
721 {
722         int lock_taken, ret, force_take = 0;
723         u32 uval, newval, curval, vpid = task_pid_vnr(task);
724
725 retry:
726         ret = lock_taken = 0;
727
728         /*
729          * To avoid races, we attempt to take the lock here again
730          * (by doing a 0 -> TID atomic cmpxchg), while holding all
731          * the locks. It will most likely not succeed.
732          */
733         newval = vpid;
734         if (set_waiters)
735                 newval |= FUTEX_WAITERS;
736
737         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
738                 return -EFAULT;
739
740         /*
741          * Detect deadlocks.
742          */
743         if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
744                 return -EDEADLK;
745
746         /*
747          * Surprise - we got the lock. Just return to userspace:
748          */
749         if (unlikely(!curval))
750                 return 1;
751
752         uval = curval;
753
754         /*
755          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
756          * to wake at the next unlock.
757          */
758         newval = curval | FUTEX_WAITERS;
759
760         /*
761          * Should we force take the futex? See below.
762          */
763         if (unlikely(force_take)) {
764                 /*
765                  * Keep the OWNER_DIED and the WAITERS bit and set the
766                  * new TID value.
767                  */
768                 newval = (curval & ~FUTEX_TID_MASK) | vpid;
769                 force_take = 0;
770                 lock_taken = 1;
771         }
772
773         if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
774                 return -EFAULT;
775         if (unlikely(curval != uval))
776                 goto retry;
777
778         /*
779          * We took the lock due to forced take over.
780          */
781         if (unlikely(lock_taken))
782                 return 1;
783
784         /*
785          * We dont have the lock. Look up the PI state (or create it if
786          * we are the first waiter):
787          */
788         ret = lookup_pi_state(uval, hb, key, ps);
789
790         if (unlikely(ret)) {
791                 switch (ret) {
792                 case -ESRCH:
793                         /*
794                          * We failed to find an owner for this
795                          * futex. So we have no pi_state to block
796                          * on. This can happen in two cases:
797                          *
798                          * 1) The owner died
799                          * 2) A stale FUTEX_WAITERS bit
800                          *
801                          * Re-read the futex value.
802                          */
803                         if (get_futex_value_locked(&curval, uaddr))
804                                 return -EFAULT;
805
806                         /*
807                          * If the owner died or we have a stale
808                          * WAITERS bit the owner TID in the user space
809                          * futex is 0.
810                          */
811                         if (!(curval & FUTEX_TID_MASK)) {
812                                 force_take = 1;
813                                 goto retry;
814                         }
815                 default:
816                         break;
817                 }
818         }
819
820         return ret;
821 }
822
823 /**
824  * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
825  * @q:  The futex_q to unqueue
826  *
827  * The q->lock_ptr must not be NULL and must be held by the caller.
828  */
829 static void __unqueue_futex(struct futex_q *q)
830 {
831         struct futex_hash_bucket *hb;
832
833         if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
834             || WARN_ON(plist_node_empty(&q->list)))
835                 return;
836
837         hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
838         plist_del(&q->list, &hb->chain);
839 }
840
841 /*
842  * The hash bucket lock must be held when this is called.
843  * Afterwards, the futex_q must not be accessed.
844  */
845 static void wake_futex(struct futex_q *q)
846 {
847         struct task_struct *p = q->task;
848
849         if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
850                 return;
851
852         /*
853          * We set q->lock_ptr = NULL _before_ we wake up the task. If
854          * a non-futex wake up happens on another CPU then the task
855          * might exit and p would dereference a non-existing task
856          * struct. Prevent this by holding a reference on p across the
857          * wake up.
858          */
859         get_task_struct(p);
860
861         __unqueue_futex(q);
862         /*
863          * The waiting task can free the futex_q as soon as
864          * q->lock_ptr = NULL is written, without taking any locks. A
865          * memory barrier is required here to prevent the following
866          * store to lock_ptr from getting ahead of the plist_del.
867          */
868         smp_wmb();
869         q->lock_ptr = NULL;
870
871         wake_up_state(p, TASK_NORMAL);
872         put_task_struct(p);
873 }
874
875 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
876 {
877         struct task_struct *new_owner;
878         struct futex_pi_state *pi_state = this->pi_state;
879         u32 uninitialized_var(curval), newval;
880
881         if (!pi_state)
882                 return -EINVAL;
883
884         /*
885          * If current does not own the pi_state then the futex is
886          * inconsistent and user space fiddled with the futex value.
887          */
888         if (pi_state->owner != current)
889                 return -EINVAL;
890
891         raw_spin_lock(&pi_state->pi_mutex.wait_lock);
892         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
893
894         /*
895          * It is possible that the next waiter (the one that brought
896          * this owner to the kernel) timed out and is no longer
897          * waiting on the lock.
898          */
899         if (!new_owner)
900                 new_owner = this->task;
901
902         /*
903          * We pass it to the next owner. (The WAITERS bit is always
904          * kept enabled while there is PI state around. We must also
905          * preserve the owner died bit.)
906          */
907         if (!(uval & FUTEX_OWNER_DIED)) {
908                 int ret = 0;
909
910                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
911
912                 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
913                         ret = -EFAULT;
914                 else if (curval != uval)
915                         ret = -EINVAL;
916                 if (ret) {
917                         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
918                         return ret;
919                 }
920         }
921
922         raw_spin_lock_irq(&pi_state->owner->pi_lock);
923         WARN_ON(list_empty(&pi_state->list));
924         list_del_init(&pi_state->list);
925         raw_spin_unlock_irq(&pi_state->owner->pi_lock);
926
927         raw_spin_lock_irq(&new_owner->pi_lock);
928         WARN_ON(!list_empty(&pi_state->list));
929         list_add(&pi_state->list, &new_owner->pi_state_list);
930         pi_state->owner = new_owner;
931         raw_spin_unlock_irq(&new_owner->pi_lock);
932
933         raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
934         rt_mutex_unlock(&pi_state->pi_mutex);
935
936         return 0;
937 }
938
939 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
940 {
941         u32 uninitialized_var(oldval);
942
943         /*
944          * There is no waiter, so we unlock the futex. The owner died
945          * bit has not to be preserved here. We are the owner:
946          */
947         if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
948                 return -EFAULT;
949         if (oldval != uval)
950                 return -EAGAIN;
951
952         return 0;
953 }
954
955 /*
956  * Express the locking dependencies for lockdep:
957  */
958 static inline void
959 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
960 {
961         if (hb1 <= hb2) {
962                 spin_lock(&hb1->lock);
963                 if (hb1 < hb2)
964                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
965         } else { /* hb1 > hb2 */
966                 spin_lock(&hb2->lock);
967                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
968         }
969 }
970
971 static inline void
972 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
973 {
974         spin_unlock(&hb1->lock);
975         if (hb1 != hb2)
976                 spin_unlock(&hb2->lock);
977 }
978
979 /*
980  * Wake up waiters matching bitset queued on this futex (uaddr).
981  */
982 static int
983 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
984 {
985         struct futex_hash_bucket *hb;
986         struct futex_q *this, *next;
987         struct plist_head *head;
988         union futex_key key = FUTEX_KEY_INIT;
989         int ret;
990
991         if (!bitset)
992                 return -EINVAL;
993
994         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
995         if (unlikely(ret != 0))
996                 goto out;
997
998         hb = hash_futex(&key);
999         spin_lock(&hb->lock);
1000         head = &hb->chain;
1001
1002         plist_for_each_entry_safe(this, next, head, list) {
1003                 if (match_futex (&this->key, &key)) {
1004                         if (this->pi_state || this->rt_waiter) {
1005                                 ret = -EINVAL;
1006                                 break;
1007                         }
1008
1009                         /* Check if one of the bits is set in both bitsets */
1010                         if (!(this->bitset & bitset))
1011                                 continue;
1012
1013                         wake_futex(this);
1014                         if (++ret >= nr_wake)
1015                                 break;
1016                 }
1017         }
1018
1019         spin_unlock(&hb->lock);
1020         put_futex_key(&key);
1021 out:
1022         return ret;
1023 }
1024
1025 /*
1026  * Wake up all waiters hashed on the physical page that is mapped
1027  * to this virtual address:
1028  */
1029 static int
1030 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1031               int nr_wake, int nr_wake2, int op)
1032 {
1033         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1034         struct futex_hash_bucket *hb1, *hb2;
1035         struct plist_head *head;
1036         struct futex_q *this, *next;
1037         int ret, op_ret;
1038
1039 retry:
1040         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1041         if (unlikely(ret != 0))
1042                 goto out;
1043         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
1044         if (unlikely(ret != 0))
1045                 goto out_put_key1;
1046
1047         hb1 = hash_futex(&key1);
1048         hb2 = hash_futex(&key2);
1049
1050 retry_private:
1051         double_lock_hb(hb1, hb2);
1052         op_ret = futex_atomic_op_inuser(op, uaddr2);
1053         if (unlikely(op_ret < 0)) {
1054
1055                 double_unlock_hb(hb1, hb2);
1056
1057 #ifndef CONFIG_MMU
1058                 /*
1059                  * we don't get EFAULT from MMU faults if we don't have an MMU,
1060                  * but we might get them from range checking
1061                  */
1062                 ret = op_ret;
1063                 goto out_put_keys;
1064 #endif
1065
1066                 if (unlikely(op_ret != -EFAULT)) {
1067                         ret = op_ret;
1068                         goto out_put_keys;
1069                 }
1070
1071                 ret = fault_in_user_writeable(uaddr2);
1072                 if (ret)
1073                         goto out_put_keys;
1074
1075                 if (!(flags & FLAGS_SHARED))
1076                         goto retry_private;
1077
1078                 put_futex_key(&key2);
1079                 put_futex_key(&key1);
1080                 goto retry;
1081         }
1082
1083         head = &hb1->chain;
1084
1085         plist_for_each_entry_safe(this, next, head, list) {
1086                 if (match_futex (&this->key, &key1)) {
1087                         if (this->pi_state || this->rt_waiter) {
1088                                 ret = -EINVAL;
1089                                 goto out_unlock;
1090                         }
1091                         wake_futex(this);
1092                         if (++ret >= nr_wake)
1093                                 break;
1094                 }
1095         }
1096
1097         if (op_ret > 0) {
1098                 head = &hb2->chain;
1099
1100                 op_ret = 0;
1101                 plist_for_each_entry_safe(this, next, head, list) {
1102                         if (match_futex (&this->key, &key2)) {
1103                                 if (this->pi_state || this->rt_waiter) {
1104                                         ret = -EINVAL;
1105                                         goto out_unlock;
1106                                 }
1107                                 wake_futex(this);
1108                                 if (++op_ret >= nr_wake2)
1109                                         break;
1110                         }
1111                 }
1112                 ret += op_ret;
1113         }
1114
1115 out_unlock:
1116         double_unlock_hb(hb1, hb2);
1117 out_put_keys:
1118         put_futex_key(&key2);
1119 out_put_key1:
1120         put_futex_key(&key1);
1121 out:
1122         return ret;
1123 }
1124
1125 /**
1126  * requeue_futex() - Requeue a futex_q from one hb to another
1127  * @q:          the futex_q to requeue
1128  * @hb1:        the source hash_bucket
1129  * @hb2:        the target hash_bucket
1130  * @key2:       the new key for the requeued futex_q
1131  */
1132 static inline
1133 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1134                    struct futex_hash_bucket *hb2, union futex_key *key2)
1135 {
1136
1137         /*
1138          * If key1 and key2 hash to the same bucket, no need to
1139          * requeue.
1140          */
1141         if (likely(&hb1->chain != &hb2->chain)) {
1142                 plist_del(&q->list, &hb1->chain);
1143                 plist_add(&q->list, &hb2->chain);
1144                 q->lock_ptr = &hb2->lock;
1145         }
1146         get_futex_key_refs(key2);
1147         q->key = *key2;
1148 }
1149
1150 /**
1151  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1152  * @q:          the futex_q
1153  * @key:        the key of the requeue target futex
1154  * @hb:         the hash_bucket of the requeue target futex
1155  *
1156  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1157  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1158  * to the requeue target futex so the waiter can detect the wakeup on the right
1159  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1160  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1161  * to protect access to the pi_state to fixup the owner later.  Must be called
1162  * with both q->lock_ptr and hb->lock held.
1163  */
1164 static inline
1165 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1166                            struct futex_hash_bucket *hb)
1167 {
1168         get_futex_key_refs(key);
1169         q->key = *key;
1170
1171         __unqueue_futex(q);
1172
1173         WARN_ON(!q->rt_waiter);
1174         q->rt_waiter = NULL;
1175
1176         q->lock_ptr = &hb->lock;
1177
1178         wake_up_state(q->task, TASK_NORMAL);
1179 }
1180
1181 /**
1182  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1183  * @pifutex:            the user address of the to futex
1184  * @hb1:                the from futex hash bucket, must be locked by the caller
1185  * @hb2:                the to futex hash bucket, must be locked by the caller
1186  * @key1:               the from futex key
1187  * @key2:               the to futex key
1188  * @ps:                 address to store the pi_state pointer
1189  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1190  *
1191  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1192  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1193  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1194  * hb1 and hb2 must be held by the caller.
1195  *
1196  * Return:
1197  *  0 - failed to acquire the lock atomically;
1198  *  1 - acquired the lock;
1199  * <0 - error
1200  */
1201 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1202                                  struct futex_hash_bucket *hb1,
1203                                  struct futex_hash_bucket *hb2,
1204                                  union futex_key *key1, union futex_key *key2,
1205                                  struct futex_pi_state **ps, int set_waiters)
1206 {
1207         struct futex_q *top_waiter = NULL;
1208         u32 curval;
1209         int ret;
1210
1211         if (get_futex_value_locked(&curval, pifutex))
1212                 return -EFAULT;
1213
1214         /*
1215          * Find the top_waiter and determine if there are additional waiters.
1216          * If the caller intends to requeue more than 1 waiter to pifutex,
1217          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1218          * as we have means to handle the possible fault.  If not, don't set
1219          * the bit unecessarily as it will force the subsequent unlock to enter
1220          * the kernel.
1221          */
1222         top_waiter = futex_top_waiter(hb1, key1);
1223
1224         /* There are no waiters, nothing for us to do. */
1225         if (!top_waiter)
1226                 return 0;
1227
1228         /* Ensure we requeue to the expected futex. */
1229         if (!match_futex(top_waiter->requeue_pi_key, key2))
1230                 return -EINVAL;
1231
1232         /*
1233          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1234          * the contended case or if set_waiters is 1.  The pi_state is returned
1235          * in ps in contended cases.
1236          */
1237         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1238                                    set_waiters);
1239         if (ret == 1)
1240                 requeue_pi_wake_futex(top_waiter, key2, hb2);
1241
1242         return ret;
1243 }
1244
1245 /**
1246  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1247  * @uaddr1:     source futex user address
1248  * @flags:      futex flags (FLAGS_SHARED, etc.)
1249  * @uaddr2:     target futex user address
1250  * @nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
1251  * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1252  * @cmpval:     @uaddr1 expected value (or %NULL)
1253  * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
1254  *              pi futex (pi to pi requeue is not supported)
1255  *
1256  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1257  * uaddr2 atomically on behalf of the top waiter.
1258  *
1259  * Return:
1260  * >=0 - on success, the number of tasks requeued or woken;
1261  *  <0 - on error
1262  */
1263 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1264                          u32 __user *uaddr2, int nr_wake, int nr_requeue,
1265                          u32 *cmpval, int requeue_pi)
1266 {
1267         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1268         int drop_count = 0, task_count = 0, ret;
1269         struct futex_pi_state *pi_state = NULL;
1270         struct futex_hash_bucket *hb1, *hb2;
1271         struct plist_head *head1;
1272         struct futex_q *this, *next;
1273         u32 curval2;
1274
1275         if (requeue_pi) {
1276                 /*
1277                  * requeue_pi requires a pi_state, try to allocate it now
1278                  * without any locks in case it fails.
1279                  */
1280                 if (refill_pi_state_cache())
1281                         return -ENOMEM;
1282                 /*
1283                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1284                  * + nr_requeue, since it acquires the rt_mutex prior to
1285                  * returning to userspace, so as to not leave the rt_mutex with
1286                  * waiters and no owner.  However, second and third wake-ups
1287                  * cannot be predicted as they involve race conditions with the
1288                  * first wake and a fault while looking up the pi_state.  Both
1289                  * pthread_cond_signal() and pthread_cond_broadcast() should
1290                  * use nr_wake=1.
1291                  */
1292                 if (nr_wake != 1)
1293                         return -EINVAL;
1294         }
1295
1296 retry:
1297         if (pi_state != NULL) {
1298                 /*
1299                  * We will have to lookup the pi_state again, so free this one
1300                  * to keep the accounting correct.
1301                  */
1302                 free_pi_state(pi_state);
1303                 pi_state = NULL;
1304         }
1305
1306         ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1307         if (unlikely(ret != 0))
1308                 goto out;
1309         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1310                             requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1311         if (unlikely(ret != 0))
1312                 goto out_put_key1;
1313
1314         hb1 = hash_futex(&key1);
1315         hb2 = hash_futex(&key2);
1316
1317 retry_private:
1318         double_lock_hb(hb1, hb2);
1319
1320         if (likely(cmpval != NULL)) {
1321                 u32 curval;
1322
1323                 ret = get_futex_value_locked(&curval, uaddr1);
1324
1325                 if (unlikely(ret)) {
1326                         double_unlock_hb(hb1, hb2);
1327
1328                         ret = get_user(curval, uaddr1);
1329                         if (ret)
1330                                 goto out_put_keys;
1331
1332                         if (!(flags & FLAGS_SHARED))
1333                                 goto retry_private;
1334
1335                         put_futex_key(&key2);
1336                         put_futex_key(&key1);
1337                         goto retry;
1338                 }
1339                 if (curval != *cmpval) {
1340                         ret = -EAGAIN;
1341                         goto out_unlock;
1342                 }
1343         }
1344
1345         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1346                 /*
1347                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1348                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1349                  * bit.  We force this here where we are able to easily handle
1350                  * faults rather in the requeue loop below.
1351                  */
1352                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1353                                                  &key2, &pi_state, nr_requeue);
1354
1355                 /*
1356                  * At this point the top_waiter has either taken uaddr2 or is
1357                  * waiting on it.  If the former, then the pi_state will not
1358                  * exist yet, look it up one more time to ensure we have a
1359                  * reference to it.
1360                  */
1361                 if (ret == 1) {
1362                         WARN_ON(pi_state);
1363                         drop_count++;
1364                         task_count++;
1365                         ret = get_futex_value_locked(&curval2, uaddr2);
1366                         if (!ret)
1367                                 ret = lookup_pi_state(curval2, hb2, &key2,
1368                                                       &pi_state);
1369                 }
1370
1371                 switch (ret) {
1372                 case 0:
1373                         break;
1374                 case -EFAULT:
1375                         double_unlock_hb(hb1, hb2);
1376                         put_futex_key(&key2);
1377                         put_futex_key(&key1);
1378                         ret = fault_in_user_writeable(uaddr2);
1379                         if (!ret)
1380                                 goto retry;
1381                         goto out;
1382                 case -EAGAIN:
1383                         /* The owner was exiting, try again. */
1384                         double_unlock_hb(hb1, hb2);
1385                         put_futex_key(&key2);
1386                         put_futex_key(&key1);
1387                         cond_resched();
1388                         goto retry;
1389                 default:
1390                         goto out_unlock;
1391                 }
1392         }
1393
1394         head1 = &hb1->chain;
1395         plist_for_each_entry_safe(this, next, head1, list) {
1396                 if (task_count - nr_wake >= nr_requeue)
1397                         break;
1398
1399                 if (!match_futex(&this->key, &key1))
1400                         continue;
1401
1402                 /*
1403                  * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1404                  * be paired with each other and no other futex ops.
1405                  *
1406                  * We should never be requeueing a futex_q with a pi_state,
1407                  * which is awaiting a futex_unlock_pi().
1408                  */
1409                 if ((requeue_pi && !this->rt_waiter) ||
1410                     (!requeue_pi && this->rt_waiter) ||
1411                     this->pi_state) {
1412                         ret = -EINVAL;
1413                         break;
1414                 }
1415
1416                 /*
1417                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1418                  * lock, we already woke the top_waiter.  If not, it will be
1419                  * woken by futex_unlock_pi().
1420                  */
1421                 if (++task_count <= nr_wake && !requeue_pi) {
1422                         wake_futex(this);
1423                         continue;
1424                 }
1425
1426                 /* Ensure we requeue to the expected futex for requeue_pi. */
1427                 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1428                         ret = -EINVAL;
1429                         break;
1430                 }
1431
1432                 /*
1433                  * Requeue nr_requeue waiters and possibly one more in the case
1434                  * of requeue_pi if we couldn't acquire the lock atomically.
1435                  */
1436                 if (requeue_pi) {
1437                         /* Prepare the waiter to take the rt_mutex. */
1438                         atomic_inc(&pi_state->refcount);
1439                         this->pi_state = pi_state;
1440                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1441                                                         this->rt_waiter,
1442                                                         this->task, 1);
1443                         if (ret == 1) {
1444                                 /* We got the lock. */
1445                                 requeue_pi_wake_futex(this, &key2, hb2);
1446                                 drop_count++;
1447                                 continue;
1448                         } else if (ret) {
1449                                 /* -EDEADLK */
1450                                 this->pi_state = NULL;
1451                                 free_pi_state(pi_state);
1452                                 goto out_unlock;
1453                         }
1454                 }
1455                 requeue_futex(this, hb1, hb2, &key2);
1456                 drop_count++;
1457         }
1458
1459 out_unlock:
1460         double_unlock_hb(hb1, hb2);
1461
1462         /*
1463          * drop_futex_key_refs() must be called outside the spinlocks. During
1464          * the requeue we moved futex_q's from the hash bucket at key1 to the
1465          * one at key2 and updated their key pointer.  We no longer need to
1466          * hold the references to key1.
1467          */
1468         while (--drop_count >= 0)
1469                 drop_futex_key_refs(&key1);
1470
1471 out_put_keys:
1472         put_futex_key(&key2);
1473 out_put_key1:
1474         put_futex_key(&key1);
1475 out:
1476         if (pi_state != NULL)
1477                 free_pi_state(pi_state);
1478         return ret ? ret : task_count;
1479 }
1480
1481 /* The key must be already stored in q->key. */
1482 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1483         __acquires(&hb->lock)
1484 {
1485         struct futex_hash_bucket *hb;
1486
1487         hb = hash_futex(&q->key);
1488         q->lock_ptr = &hb->lock;
1489
1490         spin_lock(&hb->lock);
1491         return hb;
1492 }
1493
1494 static inline void
1495 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1496         __releases(&hb->lock)
1497 {
1498         spin_unlock(&hb->lock);
1499 }
1500
1501 /**
1502  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1503  * @q:  The futex_q to enqueue
1504  * @hb: The destination hash bucket
1505  *
1506  * The hb->lock must be held by the caller, and is released here. A call to
1507  * queue_me() is typically paired with exactly one call to unqueue_me().  The
1508  * exceptions involve the PI related operations, which may use unqueue_me_pi()
1509  * or nothing if the unqueue is done as part of the wake process and the unqueue
1510  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1511  * an example).
1512  */
1513 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1514         __releases(&hb->lock)
1515 {
1516         int prio;
1517
1518         /*
1519          * The priority used to register this element is
1520          * - either the real thread-priority for the real-time threads
1521          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1522          * - or MAX_RT_PRIO for non-RT threads.
1523          * Thus, all RT-threads are woken first in priority order, and
1524          * the others are woken last, in FIFO order.
1525          */
1526         prio = min(current->normal_prio, MAX_RT_PRIO);
1527
1528         plist_node_init(&q->list, prio);
1529         plist_add(&q->list, &hb->chain);
1530         q->task = current;
1531         spin_unlock(&hb->lock);
1532 }
1533
1534 /**
1535  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1536  * @q:  The futex_q to unqueue
1537  *
1538  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1539  * be paired with exactly one earlier call to queue_me().
1540  *
1541  * Return:
1542  *   1 - if the futex_q was still queued (and we removed unqueued it);
1543  *   0 - if the futex_q was already removed by the waking thread
1544  */
1545 static int unqueue_me(struct futex_q *q)
1546 {
1547         spinlock_t *lock_ptr;
1548         int ret = 0;
1549
1550         /* In the common case we don't take the spinlock, which is nice. */
1551 retry:
1552         lock_ptr = q->lock_ptr;
1553         barrier();
1554         if (lock_ptr != NULL) {
1555                 spin_lock(lock_ptr);
1556                 /*
1557                  * q->lock_ptr can change between reading it and
1558                  * spin_lock(), causing us to take the wrong lock.  This
1559                  * corrects the race condition.
1560                  *
1561                  * Reasoning goes like this: if we have the wrong lock,
1562                  * q->lock_ptr must have changed (maybe several times)
1563                  * between reading it and the spin_lock().  It can
1564                  * change again after the spin_lock() but only if it was
1565                  * already changed before the spin_lock().  It cannot,
1566                  * however, change back to the original value.  Therefore
1567                  * we can detect whether we acquired the correct lock.
1568                  */
1569                 if (unlikely(lock_ptr != q->lock_ptr)) {
1570                         spin_unlock(lock_ptr);
1571                         goto retry;
1572                 }
1573                 __unqueue_futex(q);
1574
1575                 BUG_ON(q->pi_state);
1576
1577                 spin_unlock(lock_ptr);
1578                 ret = 1;
1579         }
1580
1581         drop_futex_key_refs(&q->key);
1582         return ret;
1583 }
1584
1585 /*
1586  * PI futexes can not be requeued and must remove themself from the
1587  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1588  * and dropped here.
1589  */
1590 static void unqueue_me_pi(struct futex_q *q)
1591         __releases(q->lock_ptr)
1592 {
1593         __unqueue_futex(q);
1594
1595         BUG_ON(!q->pi_state);
1596         free_pi_state(q->pi_state);
1597         q->pi_state = NULL;
1598
1599         spin_unlock(q->lock_ptr);
1600 }
1601
1602 /*
1603  * Fixup the pi_state owner with the new owner.
1604  *
1605  * Must be called with hash bucket lock held and mm->sem held for non
1606  * private futexes.
1607  */
1608 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1609                                 struct task_struct *newowner)
1610 {
1611         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1612         struct futex_pi_state *pi_state = q->pi_state;
1613         struct task_struct *oldowner = pi_state->owner;
1614         u32 uval, uninitialized_var(curval), newval;
1615         int ret;
1616
1617         /* Owner died? */
1618         if (!pi_state->owner)
1619                 newtid |= FUTEX_OWNER_DIED;
1620
1621         /*
1622          * We are here either because we stole the rtmutex from the
1623          * previous highest priority waiter or we are the highest priority
1624          * waiter but failed to get the rtmutex the first time.
1625          * We have to replace the newowner TID in the user space variable.
1626          * This must be atomic as we have to preserve the owner died bit here.
1627          *
1628          * Note: We write the user space value _before_ changing the pi_state
1629          * because we can fault here. Imagine swapped out pages or a fork
1630          * that marked all the anonymous memory readonly for cow.
1631          *
1632          * Modifying pi_state _before_ the user space value would
1633          * leave the pi_state in an inconsistent state when we fault
1634          * here, because we need to drop the hash bucket lock to
1635          * handle the fault. This might be observed in the PID check
1636          * in lookup_pi_state.
1637          */
1638 retry:
1639         if (get_futex_value_locked(&uval, uaddr))
1640                 goto handle_fault;
1641
1642         while (1) {
1643                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1644
1645                 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1646                         goto handle_fault;
1647                 if (curval == uval)
1648                         break;
1649                 uval = curval;
1650         }
1651
1652         /*
1653          * We fixed up user space. Now we need to fix the pi_state
1654          * itself.
1655          */
1656         if (pi_state->owner != NULL) {
1657                 raw_spin_lock_irq(&pi_state->owner->pi_lock);
1658                 WARN_ON(list_empty(&pi_state->list));
1659                 list_del_init(&pi_state->list);
1660                 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1661         }
1662
1663         pi_state->owner = newowner;
1664
1665         raw_spin_lock_irq(&newowner->pi_lock);
1666         WARN_ON(!list_empty(&pi_state->list));
1667         list_add(&pi_state->list, &newowner->pi_state_list);
1668         raw_spin_unlock_irq(&newowner->pi_lock);
1669         return 0;
1670
1671         /*
1672          * To handle the page fault we need to drop the hash bucket
1673          * lock here. That gives the other task (either the highest priority
1674          * waiter itself or the task which stole the rtmutex) the
1675          * chance to try the fixup of the pi_state. So once we are
1676          * back from handling the fault we need to check the pi_state
1677          * after reacquiring the hash bucket lock and before trying to
1678          * do another fixup. When the fixup has been done already we
1679          * simply return.
1680          */
1681 handle_fault:
1682         spin_unlock(q->lock_ptr);
1683
1684         ret = fault_in_user_writeable(uaddr);
1685
1686         spin_lock(q->lock_ptr);
1687
1688         /*
1689          * Check if someone else fixed it for us:
1690          */
1691         if (pi_state->owner != oldowner)
1692                 return 0;
1693
1694         if (ret)
1695                 return ret;
1696
1697         goto retry;
1698 }
1699
1700 static long futex_wait_restart(struct restart_block *restart);
1701
1702 /**
1703  * fixup_owner() - Post lock pi_state and corner case management
1704  * @uaddr:      user address of the futex
1705  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1706  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1707  *
1708  * After attempting to lock an rt_mutex, this function is called to cleanup
1709  * the pi_state owner as well as handle race conditions that may allow us to
1710  * acquire the lock. Must be called with the hb lock held.
1711  *
1712  * Return:
1713  *  1 - success, lock taken;
1714  *  0 - success, lock not taken;
1715  * <0 - on error (-EFAULT)
1716  */
1717 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1718 {
1719         struct task_struct *owner;
1720         int ret = 0;
1721
1722         if (locked) {
1723                 /*
1724                  * Got the lock. We might not be the anticipated owner if we
1725                  * did a lock-steal - fix up the PI-state in that case:
1726                  */
1727                 if (q->pi_state->owner != current)
1728                         ret = fixup_pi_state_owner(uaddr, q, current);
1729                 goto out;
1730         }
1731
1732         /*
1733          * Catch the rare case, where the lock was released when we were on the
1734          * way back before we locked the hash bucket.
1735          */
1736         if (q->pi_state->owner == current) {
1737                 /*
1738                  * Try to get the rt_mutex now. This might fail as some other
1739                  * task acquired the rt_mutex after we removed ourself from the
1740                  * rt_mutex waiters list.
1741                  */
1742                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1743                         locked = 1;
1744                         goto out;
1745                 }
1746
1747                 /*
1748                  * pi_state is incorrect, some other task did a lock steal and
1749                  * we returned due to timeout or signal without taking the
1750                  * rt_mutex. Too late.
1751                  */
1752                 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1753                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1754                 if (!owner)
1755                         owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
1756                 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1757                 ret = fixup_pi_state_owner(uaddr, q, owner);
1758                 goto out;
1759         }
1760
1761         /*
1762          * Paranoia check. If we did not take the lock, then we should not be
1763          * the owner of the rt_mutex.
1764          */
1765         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1766                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1767                                 "pi-state %p\n", ret,
1768                                 q->pi_state->pi_mutex.owner,
1769                                 q->pi_state->owner);
1770
1771 out:
1772         return ret ? ret : locked;
1773 }
1774
1775 /**
1776  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1777  * @hb:         the futex hash bucket, must be locked by the caller
1778  * @q:          the futex_q to queue up on
1779  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1780  */
1781 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1782                                 struct hrtimer_sleeper *timeout)
1783 {
1784         /*
1785          * The task state is guaranteed to be set before another task can
1786          * wake it. set_current_state() is implemented using set_mb() and
1787          * queue_me() calls spin_unlock() upon completion, both serializing
1788          * access to the hash list and forcing another memory barrier.
1789          */
1790         set_current_state(TASK_INTERRUPTIBLE);
1791         queue_me(q, hb);
1792
1793         /* Arm the timer */
1794         if (timeout) {
1795                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1796                 if (!hrtimer_active(&timeout->timer))
1797                         timeout->task = NULL;
1798         }
1799
1800         /*
1801          * If we have been removed from the hash list, then another task
1802          * has tried to wake us, and we can skip the call to schedule().
1803          */
1804         if (likely(!plist_node_empty(&q->list))) {
1805                 /*
1806                  * If the timer has already expired, current will already be
1807                  * flagged for rescheduling. Only call schedule if there
1808                  * is no timeout, or if it has yet to expire.
1809                  */
1810                 if (!timeout || timeout->task)
1811                         schedule();
1812         }
1813         __set_current_state(TASK_RUNNING);
1814 }
1815
1816 /**
1817  * futex_wait_setup() - Prepare to wait on a futex
1818  * @uaddr:      the futex userspace address
1819  * @val:        the expected value
1820  * @flags:      futex flags (FLAGS_SHARED, etc.)
1821  * @q:          the associated futex_q
1822  * @hb:         storage for hash_bucket pointer to be returned to caller
1823  *
1824  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
1825  * compare it with the expected value.  Handle atomic faults internally.
1826  * Return with the hb lock held and a q.key reference on success, and unlocked
1827  * with no q.key reference on failure.
1828  *
1829  * Return:
1830  *  0 - uaddr contains val and hb has been locked;
1831  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
1832  */
1833 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
1834                            struct futex_q *q, struct futex_hash_bucket **hb)
1835 {
1836         u32 uval;
1837         int ret;
1838
1839         /*
1840          * Access the page AFTER the hash-bucket is locked.
1841          * Order is important:
1842          *
1843          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1844          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1845          *
1846          * The basic logical guarantee of a futex is that it blocks ONLY
1847          * if cond(var) is known to be true at the time of blocking, for
1848          * any cond.  If we locked the hash-bucket after testing *uaddr, that
1849          * would open a race condition where we could block indefinitely with
1850          * cond(var) false, which would violate the guarantee.
1851          *
1852          * On the other hand, we insert q and release the hash-bucket only
1853          * after testing *uaddr.  This guarantees that futex_wait() will NOT
1854          * absorb a wakeup if *uaddr does not match the desired values
1855          * while the syscall executes.
1856          */
1857 retry:
1858         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
1859         if (unlikely(ret != 0))
1860                 return ret;
1861
1862 retry_private:
1863         *hb = queue_lock(q);
1864
1865         ret = get_futex_value_locked(&uval, uaddr);
1866
1867         if (ret) {
1868                 queue_unlock(q, *hb);
1869
1870                 ret = get_user(uval, uaddr);
1871                 if (ret)
1872                         goto out;
1873
1874                 if (!(flags & FLAGS_SHARED))
1875                         goto retry_private;
1876
1877                 put_futex_key(&q->key);
1878                 goto retry;
1879         }
1880
1881         if (uval != val) {
1882                 queue_unlock(q, *hb);
1883                 ret = -EWOULDBLOCK;
1884         }
1885
1886 out:
1887         if (ret)
1888                 put_futex_key(&q->key);
1889         return ret;
1890 }
1891
1892 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
1893                       ktime_t *abs_time, u32 bitset)
1894 {
1895         struct hrtimer_sleeper timeout, *to = NULL;
1896         struct restart_block *restart;
1897         struct futex_hash_bucket *hb;
1898         struct futex_q q = futex_q_init;
1899         int ret;
1900
1901         if (!bitset)
1902                 return -EINVAL;
1903         q.bitset = bitset;
1904
1905         if (abs_time) {
1906                 to = &timeout;
1907
1908                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
1909                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
1910                                       HRTIMER_MODE_ABS);
1911                 hrtimer_init_sleeper(to, current);
1912                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1913                                              current->timer_slack_ns);
1914         }
1915
1916 retry:
1917         /*
1918          * Prepare to wait on uaddr. On success, holds hb lock and increments
1919          * q.key refs.
1920          */
1921         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
1922         if (ret)
1923                 goto out;
1924
1925         /* queue_me and wait for wakeup, timeout, or a signal. */
1926         futex_wait_queue_me(hb, &q, to);
1927
1928         /* If we were woken (and unqueued), we succeeded, whatever. */
1929         ret = 0;
1930         /* unqueue_me() drops q.key ref */
1931         if (!unqueue_me(&q))
1932                 goto out;
1933         ret = -ETIMEDOUT;
1934         if (to && !to->task)
1935                 goto out;
1936
1937         /*
1938          * We expect signal_pending(current), but we might be the
1939          * victim of a spurious wakeup as well.
1940          */
1941         if (!signal_pending(current))
1942                 goto retry;
1943
1944         ret = -ERESTARTSYS;
1945         if (!abs_time)
1946                 goto out;
1947
1948         restart = &current_thread_info()->restart_block;
1949         restart->fn = futex_wait_restart;
1950         restart->futex.uaddr = uaddr;
1951         restart->futex.val = val;
1952         restart->futex.time = abs_time->tv64;
1953         restart->futex.bitset = bitset;
1954         restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1955
1956         ret = -ERESTART_RESTARTBLOCK;
1957
1958 out:
1959         if (to) {
1960                 hrtimer_cancel(&to->timer);
1961                 destroy_hrtimer_on_stack(&to->timer);
1962         }
1963         return ret;
1964 }
1965
1966
1967 static long futex_wait_restart(struct restart_block *restart)
1968 {
1969         u32 __user *uaddr = restart->futex.uaddr;
1970         ktime_t t, *tp = NULL;
1971
1972         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1973                 t.tv64 = restart->futex.time;
1974                 tp = &t;
1975         }
1976         restart->fn = do_no_restart_syscall;
1977
1978         return (long)futex_wait(uaddr, restart->futex.flags,
1979                                 restart->futex.val, tp, restart->futex.bitset);
1980 }
1981
1982
1983 /*
1984  * Userspace tried a 0 -> TID atomic transition of the futex value
1985  * and failed. The kernel side here does the whole locking operation:
1986  * if there are waiters then it will block, it does PI, etc. (Due to
1987  * races the kernel might see a 0 value of the futex too.)
1988  */
1989 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
1990                          ktime_t *time, int trylock)
1991 {
1992         struct hrtimer_sleeper timeout, *to = NULL;
1993         struct futex_hash_bucket *hb;
1994         struct futex_q q = futex_q_init;
1995         int res, ret;
1996
1997         if (refill_pi_state_cache())
1998                 return -ENOMEM;
1999
2000         if (time) {
2001                 to = &timeout;
2002                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2003                                       HRTIMER_MODE_ABS);
2004                 hrtimer_init_sleeper(to, current);
2005                 hrtimer_set_expires(&to->timer, *time);
2006         }
2007
2008 retry:
2009         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
2010         if (unlikely(ret != 0))
2011                 goto out;
2012
2013 retry_private:
2014         hb = queue_lock(&q);
2015
2016         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
2017         if (unlikely(ret)) {
2018                 switch (ret) {
2019                 case 1:
2020                         /* We got the lock. */
2021                         ret = 0;
2022                         goto out_unlock_put_key;
2023                 case -EFAULT:
2024                         goto uaddr_faulted;
2025                 case -EAGAIN:
2026                         /*
2027                          * Task is exiting and we just wait for the
2028                          * exit to complete.
2029                          */
2030                         queue_unlock(&q, hb);
2031                         put_futex_key(&q.key);
2032                         cond_resched();
2033                         goto retry;
2034                 default:
2035                         goto out_unlock_put_key;
2036                 }
2037         }
2038
2039         /*
2040          * Only actually queue now that the atomic ops are done:
2041          */
2042         queue_me(&q, hb);
2043
2044         WARN_ON(!q.pi_state);
2045         /*
2046          * Block on the PI mutex:
2047          */
2048         if (!trylock)
2049                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
2050         else {
2051                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2052                 /* Fixup the trylock return value: */
2053                 ret = ret ? 0 : -EWOULDBLOCK;
2054         }
2055
2056         spin_lock(q.lock_ptr);
2057         /*
2058          * Fixup the pi_state owner and possibly acquire the lock if we
2059          * haven't already.
2060          */
2061         res = fixup_owner(uaddr, &q, !ret);
2062         /*
2063          * If fixup_owner() returned an error, proprogate that.  If it acquired
2064          * the lock, clear our -ETIMEDOUT or -EINTR.
2065          */
2066         if (res)
2067                 ret = (res < 0) ? res : 0;
2068
2069         /*
2070          * If fixup_owner() faulted and was unable to handle the fault, unlock
2071          * it and return the fault to userspace.
2072          */
2073         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2074                 rt_mutex_unlock(&q.pi_state->pi_mutex);
2075
2076         /* Unqueue and drop the lock */
2077         unqueue_me_pi(&q);
2078
2079         goto out_put_key;
2080
2081 out_unlock_put_key:
2082         queue_unlock(&q, hb);
2083
2084 out_put_key:
2085         put_futex_key(&q.key);
2086 out:
2087         if (to)
2088                 destroy_hrtimer_on_stack(&to->timer);
2089         return ret != -EINTR ? ret : -ERESTARTNOINTR;
2090
2091 uaddr_faulted:
2092         queue_unlock(&q, hb);
2093
2094         ret = fault_in_user_writeable(uaddr);
2095         if (ret)
2096                 goto out_put_key;
2097
2098         if (!(flags & FLAGS_SHARED))
2099                 goto retry_private;
2100
2101         put_futex_key(&q.key);
2102         goto retry;
2103 }
2104
2105 /*
2106  * Userspace attempted a TID -> 0 atomic transition, and failed.
2107  * This is the in-kernel slowpath: we look up the PI state (if any),
2108  * and do the rt-mutex unlock.
2109  */
2110 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
2111 {
2112         struct futex_hash_bucket *hb;
2113         struct futex_q *this, *next;
2114         struct plist_head *head;
2115         union futex_key key = FUTEX_KEY_INIT;
2116         u32 uval, vpid = task_pid_vnr(current);
2117         int ret;
2118
2119 retry:
2120         if (get_user(uval, uaddr))
2121                 return -EFAULT;
2122         /*
2123          * We release only a lock we actually own:
2124          */
2125         if ((uval & FUTEX_TID_MASK) != vpid)
2126                 return -EPERM;
2127
2128         ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
2129         if (unlikely(ret != 0))
2130                 goto out;
2131
2132         hb = hash_futex(&key);
2133         spin_lock(&hb->lock);
2134
2135         /*
2136          * To avoid races, try to do the TID -> 0 atomic transition
2137          * again. If it succeeds then we can return without waking
2138          * anyone else up:
2139          */
2140         if (!(uval & FUTEX_OWNER_DIED) &&
2141             cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
2142                 goto pi_faulted;
2143         /*
2144          * Rare case: we managed to release the lock atomically,
2145          * no need to wake anyone else up:
2146          */
2147         if (unlikely(uval == vpid))
2148                 goto out_unlock;
2149
2150         /*
2151          * Ok, other tasks may need to be woken up - check waiters
2152          * and do the wakeup if necessary:
2153          */
2154         head = &hb->chain;
2155
2156         plist_for_each_entry_safe(this, next, head, list) {
2157                 if (!match_futex (&this->key, &key))
2158                         continue;
2159                 ret = wake_futex_pi(uaddr, uval, this);
2160                 /*
2161                  * The atomic access to the futex value
2162                  * generated a pagefault, so retry the
2163                  * user-access and the wakeup:
2164                  */
2165                 if (ret == -EFAULT)
2166                         goto pi_faulted;
2167                 goto out_unlock;
2168         }
2169         /*
2170          * No waiters - kernel unlocks the futex:
2171          */
2172         if (!(uval & FUTEX_OWNER_DIED)) {
2173                 ret = unlock_futex_pi(uaddr, uval);
2174                 if (ret == -EFAULT)
2175                         goto pi_faulted;
2176         }
2177
2178 out_unlock:
2179         spin_unlock(&hb->lock);
2180         put_futex_key(&key);
2181
2182 out:
2183         return ret;
2184
2185 pi_faulted:
2186         spin_unlock(&hb->lock);
2187         put_futex_key(&key);
2188
2189         ret = fault_in_user_writeable(uaddr);
2190         if (!ret)
2191                 goto retry;
2192
2193         return ret;
2194 }
2195
2196 /**
2197  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2198  * @hb:         the hash_bucket futex_q was original enqueued on
2199  * @q:          the futex_q woken while waiting to be requeued
2200  * @key2:       the futex_key of the requeue target futex
2201  * @timeout:    the timeout associated with the wait (NULL if none)
2202  *
2203  * Detect if the task was woken on the initial futex as opposed to the requeue
2204  * target futex.  If so, determine if it was a timeout or a signal that caused
2205  * the wakeup and return the appropriate error code to the caller.  Must be
2206  * called with the hb lock held.
2207  *
2208  * Return:
2209  *  0 = no early wakeup detected;
2210  * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2211  */
2212 static inline
2213 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2214                                    struct futex_q *q, union futex_key *key2,
2215                                    struct hrtimer_sleeper *timeout)
2216 {
2217         int ret = 0;
2218
2219         /*
2220          * With the hb lock held, we avoid races while we process the wakeup.
2221          * We only need to hold hb (and not hb2) to ensure atomicity as the
2222          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2223          * It can't be requeued from uaddr2 to something else since we don't
2224          * support a PI aware source futex for requeue.
2225          */
2226         if (!match_futex(&q->key, key2)) {
2227                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2228                 /*
2229                  * We were woken prior to requeue by a timeout or a signal.
2230                  * Unqueue the futex_q and determine which it was.
2231                  */
2232                 plist_del(&q->list, &hb->chain);
2233
2234                 /* Handle spurious wakeups gracefully */
2235                 ret = -EWOULDBLOCK;
2236                 if (timeout && !timeout->task)
2237                         ret = -ETIMEDOUT;
2238                 else if (signal_pending(current))
2239                         ret = -ERESTARTNOINTR;
2240         }
2241         return ret;
2242 }
2243
2244 /**
2245  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2246  * @uaddr:      the futex we initially wait on (non-pi)
2247  * @flags:      futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
2248  *              the same type, no requeueing from private to shared, etc.
2249  * @val:        the expected value of uaddr
2250  * @abs_time:   absolute timeout
2251  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2252  * @uaddr2:     the pi futex we will take prior to returning to user-space
2253  *
2254  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2255  * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
2256  * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2257  * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
2258  * without one, the pi logic would not know which task to boost/deboost, if
2259  * there was a need to.
2260  *
2261  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2262  * via the following--
2263  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2264  * 2) wakeup on uaddr2 after a requeue
2265  * 3) signal
2266  * 4) timeout
2267  *
2268  * If 3, cleanup and return -ERESTARTNOINTR.
2269  *
2270  * If 2, we may then block on trying to take the rt_mutex and return via:
2271  * 5) successful lock
2272  * 6) signal
2273  * 7) timeout
2274  * 8) other lock acquisition failure
2275  *
2276  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2277  *
2278  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2279  *
2280  * Return:
2281  *  0 - On success;
2282  * <0 - On error
2283  */
2284 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
2285                                  u32 val, ktime_t *abs_time, u32 bitset,
2286                                  u32 __user *uaddr2)
2287 {
2288         struct hrtimer_sleeper timeout, *to = NULL;
2289         struct rt_mutex_waiter rt_waiter;
2290         struct rt_mutex *pi_mutex = NULL;
2291         struct futex_hash_bucket *hb;
2292         union futex_key key2 = FUTEX_KEY_INIT;
2293         struct futex_q q = futex_q_init;
2294         int res, ret;
2295
2296         if (uaddr == uaddr2)
2297                 return -EINVAL;
2298
2299         if (!bitset)
2300                 return -EINVAL;
2301
2302         if (abs_time) {
2303                 to = &timeout;
2304                 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2305                                       CLOCK_REALTIME : CLOCK_MONOTONIC,
2306                                       HRTIMER_MODE_ABS);
2307                 hrtimer_init_sleeper(to, current);
2308                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2309                                              current->timer_slack_ns);
2310         }
2311
2312         /*
2313          * The waiter is allocated on our stack, manipulated by the requeue
2314          * code while we sleep on uaddr.
2315          */
2316         debug_rt_mutex_init_waiter(&rt_waiter);
2317         rt_waiter.task = NULL;
2318
2319         ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
2320         if (unlikely(ret != 0))
2321                 goto out;
2322
2323         q.bitset = bitset;
2324         q.rt_waiter = &rt_waiter;
2325         q.requeue_pi_key = &key2;
2326
2327         /*
2328          * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2329          * count.
2330          */
2331         ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2332         if (ret)
2333                 goto out_key2;
2334
2335         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2336         futex_wait_queue_me(hb, &q, to);
2337
2338         spin_lock(&hb->lock);
2339         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2340         spin_unlock(&hb->lock);
2341         if (ret)
2342                 goto out_put_keys;
2343
2344         /*
2345          * In order for us to be here, we know our q.key == key2, and since
2346          * we took the hb->lock above, we also know that futex_requeue() has
2347          * completed and we no longer have to concern ourselves with a wakeup
2348          * race with the atomic proxy lock acquisition by the requeue code. The
2349          * futex_requeue dropped our key1 reference and incremented our key2
2350          * reference count.
2351          */
2352
2353         /* Check if the requeue code acquired the second futex for us. */
2354         if (!q.rt_waiter) {
2355                 /*
2356                  * Got the lock. We might not be the anticipated owner if we
2357                  * did a lock-steal - fix up the PI-state in that case.
2358                  */
2359                 if (q.pi_state && (q.pi_state->owner != current)) {
2360                         spin_lock(q.lock_ptr);
2361                         ret = fixup_pi_state_owner(uaddr2, &q, current);
2362                         spin_unlock(q.lock_ptr);
2363                 }
2364         } else {
2365                 /*
2366                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2367                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2368                  * the pi_state.
2369                  */
2370                 WARN_ON(!q.pi_state);
2371                 pi_mutex = &q.pi_state->pi_mutex;
2372                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2373                 debug_rt_mutex_free_waiter(&rt_waiter);
2374
2375                 spin_lock(q.lock_ptr);
2376                 /*
2377                  * Fixup the pi_state owner and possibly acquire the lock if we
2378                  * haven't already.
2379                  */
2380                 res = fixup_owner(uaddr2, &q, !ret);
2381                 /*
2382                  * If fixup_owner() returned an error, proprogate that.  If it
2383                  * acquired the lock, clear -ETIMEDOUT or -EINTR.
2384                  */
2385                 if (res)
2386                         ret = (res < 0) ? res : 0;
2387
2388                 /* Unqueue and drop the lock. */
2389                 unqueue_me_pi(&q);
2390         }
2391
2392         /*
2393          * If fixup_pi_state_owner() faulted and was unable to handle the
2394          * fault, unlock the rt_mutex and return the fault to userspace.
2395          */
2396         if (ret == -EFAULT) {
2397                 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
2398                         rt_mutex_unlock(pi_mutex);
2399         } else if (ret == -EINTR) {
2400                 /*
2401                  * We've already been requeued, but cannot restart by calling
2402                  * futex_lock_pi() directly. We could restart this syscall, but
2403                  * it would detect that the user space "val" changed and return
2404                  * -EWOULDBLOCK.  Save the overhead of the restart and return
2405                  * -EWOULDBLOCK directly.
2406                  */
2407                 ret = -EWOULDBLOCK;
2408         }
2409
2410 out_put_keys:
2411         put_futex_key(&q.key);
2412 out_key2:
2413         put_futex_key(&key2);
2414
2415 out:
2416         if (to) {
2417                 hrtimer_cancel(&to->timer);
2418                 destroy_hrtimer_on_stack(&to->timer);
2419         }
2420         return ret;
2421 }
2422
2423 /*
2424  * Support for robust futexes: the kernel cleans up held futexes at
2425  * thread exit time.
2426  *
2427  * Implementation: user-space maintains a per-thread list of locks it
2428  * is holding. Upon do_exit(), the kernel carefully walks this list,
2429  * and marks all locks that are owned by this thread with the
2430  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2431  * always manipulated with the lock held, so the list is private and
2432  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2433  * field, to allow the kernel to clean up if the thread dies after
2434  * acquiring the lock, but just before it could have added itself to
2435  * the list. There can only be one such pending lock.
2436  */
2437
2438 /**
2439  * sys_set_robust_list() - Set the robust-futex list head of a task
2440  * @head:       pointer to the list-head
2441  * @len:        length of the list-head, as userspace expects
2442  */
2443 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2444                 size_t, len)
2445 {
2446         if (!futex_cmpxchg_enabled)
2447                 return -ENOSYS;
2448         /*
2449          * The kernel knows only one size for now:
2450          */
2451         if (unlikely(len != sizeof(*head)))
2452                 return -EINVAL;
2453
2454         current->robust_list = head;
2455
2456         return 0;
2457 }
2458
2459 /**
2460  * sys_get_robust_list() - Get the robust-futex list head of a task
2461  * @pid:        pid of the process [zero for current task]
2462  * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2463  * @len_ptr:    pointer to a length field, the kernel fills in the header size
2464  */
2465 SYSCALL_DEFINE3(get_robust_list, int, pid,
2466                 struct robust_list_head __user * __user *, head_ptr,
2467                 size_t __user *, len_ptr)
2468 {
2469         struct robust_list_head __user *head;
2470         unsigned long ret;
2471         struct task_struct *p;
2472
2473         if (!futex_cmpxchg_enabled)
2474                 return -ENOSYS;
2475
2476         rcu_read_lock();
2477
2478         ret = -ESRCH;
2479         if (!pid)
2480                 p = current;
2481         else {
2482                 p = find_task_by_vpid(pid);
2483                 if (!p)
2484                         goto err_unlock;
2485         }
2486
2487         ret = -EPERM;
2488         if (!ptrace_may_access(p, PTRACE_MODE_READ))
2489                 goto err_unlock;
2490
2491         head = p->robust_list;
2492         rcu_read_unlock();
2493
2494         if (put_user(sizeof(*head), len_ptr))
2495                 return -EFAULT;
2496         return put_user(head, head_ptr);
2497
2498 err_unlock:
2499         rcu_read_unlock();
2500
2501         return ret;
2502 }
2503
2504 /*
2505  * Process a futex-list entry, check whether it's owned by the
2506  * dying task, and do notification if so:
2507  */
2508 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2509 {
2510         u32 uval, uninitialized_var(nval), mval;
2511
2512 retry:
2513         if (get_user(uval, uaddr))
2514                 return -1;
2515
2516         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2517                 /*
2518                  * Ok, this dying thread is truly holding a futex
2519                  * of interest. Set the OWNER_DIED bit atomically
2520                  * via cmpxchg, and if the value had FUTEX_WAITERS
2521                  * set, wake up a waiter (if any). (We have to do a
2522                  * futex_wake() even if OWNER_DIED is already set -
2523                  * to handle the rare but possible case of recursive
2524                  * thread-death.) The rest of the cleanup is done in
2525                  * userspace.
2526                  */
2527                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2528                 /*
2529                  * We are not holding a lock here, but we want to have
2530                  * the pagefault_disable/enable() protection because
2531                  * we want to handle the fault gracefully. If the
2532                  * access fails we try to fault in the futex with R/W
2533                  * verification via get_user_pages. get_user() above
2534                  * does not guarantee R/W access. If that fails we
2535                  * give up and leave the futex locked.
2536                  */
2537                 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
2538                         if (fault_in_user_writeable(uaddr))
2539                                 return -1;
2540                         goto retry;
2541                 }
2542                 if (nval != uval)
2543                         goto retry;
2544
2545                 /*
2546                  * Wake robust non-PI futexes here. The wakeup of
2547                  * PI futexes happens in exit_pi_state():
2548                  */
2549                 if (!pi && (uval & FUTEX_WAITERS))
2550                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2551         }
2552         return 0;
2553 }
2554
2555 /*
2556  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2557  */
2558 static inline int fetch_robust_entry(struct robust_list __user **entry,
2559                                      struct robust_list __user * __user *head,
2560                                      unsigned int *pi)
2561 {
2562         unsigned long uentry;
2563
2564         if (get_user(uentry, (unsigned long __user *)head))
2565                 return -EFAULT;
2566
2567         *entry = (void __user *)(uentry & ~1UL);
2568         *pi = uentry & 1;
2569
2570         return 0;
2571 }
2572
2573 /*
2574  * Walk curr->robust_list (very carefully, it's a userspace list!)
2575  * and mark any locks found there dead, and notify any waiters.
2576  *
2577  * We silently return on any sign of list-walking problem.
2578  */
2579 void exit_robust_list(struct task_struct *curr)
2580 {
2581         struct robust_list_head __user *head = curr->robust_list;
2582         struct robust_list __user *entry, *next_entry, *pending;
2583         unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
2584         unsigned int uninitialized_var(next_pi);
2585         unsigned long futex_offset;
2586         int rc;
2587
2588         if (!futex_cmpxchg_enabled)
2589                 return;
2590
2591         /*
2592          * Fetch the list head (which was registered earlier, via
2593          * sys_set_robust_list()):
2594          */
2595         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2596                 return;
2597         /*
2598          * Fetch the relative futex offset:
2599          */
2600         if (get_user(futex_offset, &head->futex_offset))
2601                 return;
2602         /*
2603          * Fetch any possibly pending lock-add first, and handle it
2604          * if it exists:
2605          */
2606         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2607                 return;
2608
2609         next_entry = NULL;      /* avoid warning with gcc */
2610         while (entry != &head->list) {
2611                 /*
2612                  * Fetch the next entry in the list before calling
2613                  * handle_futex_death:
2614                  */
2615                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2616                 /*
2617                  * A pending lock might already be on the list, so
2618                  * don't process it twice:
2619                  */
2620                 if (entry != pending)
2621                         if (handle_futex_death((void __user *)entry + futex_offset,
2622                                                 curr, pi))
2623                                 return;
2624                 if (rc)
2625                         return;
2626                 entry = next_entry;
2627                 pi = next_pi;
2628                 /*
2629                  * Avoid excessively long or circular lists:
2630                  */
2631                 if (!--limit)
2632                         break;
2633
2634                 cond_resched();
2635         }
2636
2637         if (pending)
2638                 handle_futex_death((void __user *)pending + futex_offset,
2639                                    curr, pip);
2640 }
2641
2642 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2643                 u32 __user *uaddr2, u32 val2, u32 val3)
2644 {
2645         int cmd = op & FUTEX_CMD_MASK;
2646         unsigned int flags = 0;
2647
2648         if (!(op & FUTEX_PRIVATE_FLAG))
2649                 flags |= FLAGS_SHARED;
2650
2651         if (op & FUTEX_CLOCK_REALTIME) {
2652                 flags |= FLAGS_CLOCKRT;
2653                 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2654                         return -ENOSYS;
2655         }
2656
2657         switch (cmd) {
2658         case FUTEX_LOCK_PI:
2659         case FUTEX_UNLOCK_PI:
2660         case FUTEX_TRYLOCK_PI:
2661         case FUTEX_WAIT_REQUEUE_PI:
2662         case FUTEX_CMP_REQUEUE_PI:
2663                 if (!futex_cmpxchg_enabled)
2664                         return -ENOSYS;
2665         }
2666
2667         switch (cmd) {
2668         case FUTEX_WAIT:
2669                 val3 = FUTEX_BITSET_MATCH_ANY;
2670         case FUTEX_WAIT_BITSET:
2671                 return futex_wait(uaddr, flags, val, timeout, val3);
2672         case FUTEX_WAKE:
2673                 val3 = FUTEX_BITSET_MATCH_ANY;
2674         case FUTEX_WAKE_BITSET:
2675                 return futex_wake(uaddr, flags, val, val3);
2676         case FUTEX_REQUEUE:
2677                 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
2678         case FUTEX_CMP_REQUEUE:
2679                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
2680         case FUTEX_WAKE_OP:
2681                 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
2682         case FUTEX_LOCK_PI:
2683                 return futex_lock_pi(uaddr, flags, val, timeout, 0);
2684         case FUTEX_UNLOCK_PI:
2685                 return futex_unlock_pi(uaddr, flags);
2686         case FUTEX_TRYLOCK_PI:
2687                 return futex_lock_pi(uaddr, flags, 0, timeout, 1);
2688         case FUTEX_WAIT_REQUEUE_PI:
2689                 val3 = FUTEX_BITSET_MATCH_ANY;
2690                 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
2691                                              uaddr2);
2692         case FUTEX_CMP_REQUEUE_PI:
2693                 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
2694         }
2695         return -ENOSYS;
2696 }
2697
2698
2699 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2700                 struct timespec __user *, utime, u32 __user *, uaddr2,
2701                 u32, val3)
2702 {
2703         struct timespec ts;
2704         ktime_t t, *tp = NULL;
2705         u32 val2 = 0;
2706         int cmd = op & FUTEX_CMD_MASK;
2707
2708         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2709                       cmd == FUTEX_WAIT_BITSET ||
2710                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
2711                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2712                         return -EFAULT;
2713                 if (!timespec_valid(&ts))
2714                         return -EINVAL;
2715
2716                 t = timespec_to_ktime(ts);
2717                 if (cmd == FUTEX_WAIT)
2718                         t = ktime_add_safe(ktime_get(), t);
2719                 tp = &t;
2720         }
2721         /*
2722          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2723          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2724          */
2725         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2726             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2727                 val2 = (u32) (unsigned long) utime;
2728
2729         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2730 }
2731
2732 static int __init futex_init(void)
2733 {
2734         u32 curval;
2735         int i;
2736
2737         /*
2738          * This will fail and we want it. Some arch implementations do
2739          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2740          * functionality. We want to know that before we call in any
2741          * of the complex code paths. Also we want to prevent
2742          * registration of robust lists in that case. NULL is
2743          * guaranteed to fault and we get -EFAULT on functional
2744          * implementation, the non-functional ones will return
2745          * -ENOSYS.
2746          */
2747         if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
2748                 futex_cmpxchg_enabled = 1;
2749
2750         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2751                 plist_head_init(&futex_queues[i].chain);
2752                 spin_lock_init(&futex_queues[i].lock);
2753         }
2754
2755         return 0;
2756 }
2757 __initcall(futex_init);