x86: remove pointless uaccess_32.h complexity
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 22 May 2016 21:19:37 +0000 (14:19 -0700)
committerAlex Shi <alex.shi@linaro.org>
Sat, 27 Aug 2016 03:23:38 +0000 (11:23 +0800)
I'm looking at trying to possibly merge the 32-bit and 64-bit versions
of the x86 uaccess.h implementation, but first this needs to be cleaned
up.

For example, the 32-bit version of "__copy_to_user_inatomic()" is mostly
the special cases for the constant size, and it's actually never
relevant.  Every user except for one aren't actually using a constant
size anyway, and the one user that uses it is better off just using
__put_user() instead.

So get rid of the unnecessary complexity.

[ The same cleanup should likely happen to __copy_from_user_inatomic()
  as well, but that one has a lot more users that I need to take a look
  at first ]

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 5b09c3edecd37ec1a52fbd5ae97a19734edc7a77)
Signed-off-by: Alex Shi <alex.shi@linaro.org>
arch/x86/include/asm/uaccess_32.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index 3fe0eac59462b39fc12ce28a6e22d6e347b8719a..537cc883ea29bd66ce0ce75d8bbb3eb56023df09 100644 (file)
@@ -33,46 +33,10 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
  * the specified block with access_ok() before calling this function.
  * The caller should also make sure he pins the user space address
  * so that we don't result in page fault and sleep.
- *
- * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
- * we return the initial request size (1, 2 or 4), as copy_*_user should do.
- * If a store crosses a page boundary and gets a fault, the x86 will not write
- * anything, so this is accurate.
  */
-
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
-       if (__builtin_constant_p(n)) {
-               unsigned long ret;
-
-               switch (n) {
-               case 1:
-                       __uaccess_begin();
-                       __put_user_size(*(u8 *)from, (u8 __user *)to,
-                                       1, ret, 1);
-                       __uaccess_end();
-                       return ret;
-               case 2:
-                       __uaccess_begin();
-                       __put_user_size(*(u16 *)from, (u16 __user *)to,
-                                       2, ret, 2);
-                       __uaccess_end();
-                       return ret;
-               case 4:
-                       __uaccess_begin();
-                       __put_user_size(*(u32 *)from, (u32 __user *)to,
-                                       4, ret, 4);
-                       __uaccess_end();
-                       return ret;
-               case 8:
-                       __uaccess_begin();
-                       __put_user_size(*(u64 *)from, (u64 __user *)to,
-                                       8, ret, 8);
-                       __uaccess_end();
-                       return ret;
-               }
-       }
        return __copy_to_user_ll(to, from, n);
 }
 
index 6ed7d63a0688384830894f4a455463bd7e9e060c..201947b4377c769d9393d718fc5b35c5000328ed 100644 (file)
@@ -513,9 +513,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
                                return ret;
 
                        if (r->presumed_offset != offset &&
-                           __copy_to_user_inatomic(&user_relocs->presumed_offset,
-                                                   &r->presumed_offset,
-                                                   sizeof(r->presumed_offset))) {
+                           __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
                                return -EFAULT;
                        }