Revert "x86/mm: Expand the exception table logic to allow new handling options"
[firefly-linux-kernel-4.4.55.git] / arch / x86 / include / asm / uaccess.h
1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
3 /*
4  * User space memory access functions
5  */
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
16
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24
25 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
26
27 #define KERNEL_DS       MAKE_MM_SEG(-1UL)
28 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
29
30 #define get_ds()        (KERNEL_DS)
31 #define get_fs()        (current_thread_info()->addr_limit)
32 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
33
34 #define segment_eq(a, b)        ((a).seg == (b).seg)
35
36 #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 #define __addr_ok(addr)         \
38         ((unsigned long __force)(addr) < user_addr_max())
39
40 /*
41  * Test whether a block of memory is a valid user space address.
42  * Returns 0 if the range is valid, nonzero otherwise.
43  */
44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45 {
46         /*
47          * If we have used "sizeof()" for the size,
48          * we know it won't overflow the limit (but
49          * it might overflow the 'addr', so it's
50          * important to subtract the size from the
51          * limit, not add it to the address).
52          */
53         if (__builtin_constant_p(size))
54                 return unlikely(addr > limit - size);
55
56         /* Arbitrary sizes? Be careful about overflow */
57         addr += size;
58         if (unlikely(addr < size))
59                 return true;
60         return unlikely(addr > limit);
61 }
62
63 #define __range_not_ok(addr, size, limit)                               \
64 ({                                                                      \
65         __chk_user_ptr(addr);                                           \
66         __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67 })
68
69 /**
70  * access_ok: - Checks if a user space pointer is valid
71  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
72  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73  *        to write to a block, it is always safe to read from it.
74  * @addr: User space pointer to start of block to check
75  * @size: Size of block to check
76  *
77  * Context: User context only. This function may sleep if pagefaults are
78  *          enabled.
79  *
80  * Checks if a pointer to a block of memory in user space is valid.
81  *
82  * Returns true (nonzero) if the memory block may be valid, false (zero)
83  * if it is definitely invalid.
84  *
85  * Note that, depending on architecture, this function probably just
86  * checks that the pointer is in the user space range - after calling
87  * this function, memory access functions may still return -EFAULT.
88  */
89 #define access_ok(type, addr, size) \
90         likely(!__range_not_ok(addr, size, user_addr_max()))
91
92 /*
93  * The exception table consists of pairs of addresses relative to the
94  * exception table enty itself: the first is the address of an
95  * instruction that is allowed to fault, and the second is the address
96  * at which the program should continue.  No registers are modified,
97  * so it is entirely up to the continuation code to figure out what to
98  * do.
99  *
100  * All the routines below use bits of fixup code that are out of line
101  * with the main instruction path.  This means when everything is well,
102  * we don't even have to jump over them.  Further, they do not intrude
103  * on our cache or tlb entries.
104  */
105
106 struct exception_table_entry {
107         int insn, fixup;
108 };
109 /* This is not the generic standard exception_table_entry format */
110 #define ARCH_HAS_SORT_EXTABLE
111 #define ARCH_HAS_SEARCH_EXTABLE
112
113 extern int fixup_exception(struct pt_regs *regs);
114 extern int early_fixup_exception(unsigned long *ip);
115
116 /*
117  * These are the main single-value transfer routines.  They automatically
118  * use the right size if we just have the right pointer type.
119  *
120  * This gets kind of ugly. We want to return _two_ values in "get_user()"
121  * and yet we don't want to do any pointers, because that is too much
122  * of a performance impact. Thus we have a few rather ugly macros here,
123  * and hide all the ugliness from the user.
124  *
125  * The "__xxx" versions of the user access functions are versions that
126  * do not verify the address space, that must have been done previously
127  * with a separate "access_ok()" call (this is used when we do multiple
128  * accesses to the same area of user memory).
129  */
130
131 extern int __get_user_1(void);
132 extern int __get_user_2(void);
133 extern int __get_user_4(void);
134 extern int __get_user_8(void);
135 extern int __get_user_bad(void);
136
137 /*
138  * This is a type: either unsigned long, if the argument fits into
139  * that type, or otherwise unsigned long long.
140  */
141 #define __inttype(x) \
142 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
143
144 /**
145  * get_user: - Get a simple variable from user space.
146  * @x:   Variable to store result.
147  * @ptr: Source address, in user space.
148  *
149  * Context: User context only. This function may sleep if pagefaults are
150  *          enabled.
151  *
152  * This macro copies a single simple variable from user space to kernel
153  * space.  It supports simple types like char and int, but not larger
154  * data types like structures or arrays.
155  *
156  * @ptr must have pointer-to-simple-variable type, and the result of
157  * dereferencing @ptr must be assignable to @x without a cast.
158  *
159  * Returns zero on success, or -EFAULT on error.
160  * On error, the variable @x is set to zero.
161  */
162 /*
163  * Careful: we have to cast the result to the type of the pointer
164  * for sign reasons.
165  *
166  * The use of _ASM_DX as the register specifier is a bit of a
167  * simplification, as gcc only cares about it as the starting point
168  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
169  * (%ecx being the next register in gcc's x86 register sequence), and
170  * %rdx on 64 bits.
171  *
172  * Clang/LLVM cares about the size of the register, but still wants
173  * the base register for something that ends up being a pair.
174  */
175 #define get_user(x, ptr)                                                \
176 ({                                                                      \
177         int __ret_gu;                                                   \
178         register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
179         __chk_user_ptr(ptr);                                            \
180         might_fault();                                                  \
181         asm volatile("call __get_user_%P3"                              \
182                      : "=a" (__ret_gu), "=r" (__val_gu)                 \
183                      : "0" (ptr), "i" (sizeof(*(ptr))));                \
184         (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
185         __builtin_expect(__ret_gu, 0);                                  \
186 })
187
188 #define __put_user_x(size, x, ptr, __ret_pu)                    \
189         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
190                      : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
191
192
193
194 #ifdef CONFIG_X86_32
195 #define __put_user_asm_u64(x, addr, err, errret)                        \
196         asm volatile(ASM_STAC "\n"                                      \
197                      "1:        movl %%eax,0(%2)\n"                     \
198                      "2:        movl %%edx,4(%2)\n"                     \
199                      "3: " ASM_CLAC "\n"                                \
200                      ".section .fixup,\"ax\"\n"                         \
201                      "4:        movl %3,%0\n"                           \
202                      "  jmp 3b\n"                                       \
203                      ".previous\n"                                      \
204                      _ASM_EXTABLE(1b, 4b)                               \
205                      _ASM_EXTABLE(2b, 4b)                               \
206                      : "=r" (err)                                       \
207                      : "A" (x), "r" (addr), "i" (errret), "0" (err))
208
209 #define __put_user_asm_ex_u64(x, addr)                                  \
210         asm volatile(ASM_STAC "\n"                                      \
211                      "1:        movl %%eax,0(%1)\n"                     \
212                      "2:        movl %%edx,4(%1)\n"                     \
213                      "3: " ASM_CLAC "\n"                                \
214                      _ASM_EXTABLE_EX(1b, 2b)                            \
215                      _ASM_EXTABLE_EX(2b, 3b)                            \
216                      : : "A" (x), "r" (addr))
217
218 #define __put_user_x8(x, ptr, __ret_pu)                         \
219         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
220                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
221 #else
222 #define __put_user_asm_u64(x, ptr, retval, errret) \
223         __put_user_asm(x, ptr, retval, "q", "", "er", errret)
224 #define __put_user_asm_ex_u64(x, addr)  \
225         __put_user_asm_ex(x, addr, "q", "", "er")
226 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
227 #endif
228
229 extern void __put_user_bad(void);
230
231 /*
232  * Strange magic calling convention: pointer in %ecx,
233  * value in %eax(:%edx), return value in %eax. clobbers %rbx
234  */
235 extern void __put_user_1(void);
236 extern void __put_user_2(void);
237 extern void __put_user_4(void);
238 extern void __put_user_8(void);
239
240 /**
241  * put_user: - Write a simple value into user space.
242  * @x:   Value to copy to user space.
243  * @ptr: Destination address, in user space.
244  *
245  * Context: User context only. This function may sleep if pagefaults are
246  *          enabled.
247  *
248  * This macro copies a single simple value from kernel space to user
249  * space.  It supports simple types like char and int, but not larger
250  * data types like structures or arrays.
251  *
252  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
253  * to the result of dereferencing @ptr.
254  *
255  * Returns zero on success, or -EFAULT on error.
256  */
257 #define put_user(x, ptr)                                        \
258 ({                                                              \
259         int __ret_pu;                                           \
260         __typeof__(*(ptr)) __pu_val;                            \
261         __chk_user_ptr(ptr);                                    \
262         might_fault();                                          \
263         __pu_val = x;                                           \
264         switch (sizeof(*(ptr))) {                               \
265         case 1:                                                 \
266                 __put_user_x(1, __pu_val, ptr, __ret_pu);       \
267                 break;                                          \
268         case 2:                                                 \
269                 __put_user_x(2, __pu_val, ptr, __ret_pu);       \
270                 break;                                          \
271         case 4:                                                 \
272                 __put_user_x(4, __pu_val, ptr, __ret_pu);       \
273                 break;                                          \
274         case 8:                                                 \
275                 __put_user_x8(__pu_val, ptr, __ret_pu);         \
276                 break;                                          \
277         default:                                                \
278                 __put_user_x(X, __pu_val, ptr, __ret_pu);       \
279                 break;                                          \
280         }                                                       \
281         __builtin_expect(__ret_pu, 0);                          \
282 })
283
284 #define __put_user_size(x, ptr, size, retval, errret)                   \
285 do {                                                                    \
286         retval = 0;                                                     \
287         __chk_user_ptr(ptr);                                            \
288         switch (size) {                                                 \
289         case 1:                                                         \
290                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
291                 break;                                                  \
292         case 2:                                                         \
293                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
294                 break;                                                  \
295         case 4:                                                         \
296                 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
297                 break;                                                  \
298         case 8:                                                         \
299                 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
300                                    errret);                             \
301                 break;                                                  \
302         default:                                                        \
303                 __put_user_bad();                                       \
304         }                                                               \
305 } while (0)
306
307 #define __put_user_size_ex(x, ptr, size)                                \
308 do {                                                                    \
309         __chk_user_ptr(ptr);                                            \
310         switch (size) {                                                 \
311         case 1:                                                         \
312                 __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
313                 break;                                                  \
314         case 2:                                                         \
315                 __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
316                 break;                                                  \
317         case 4:                                                         \
318                 __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
319                 break;                                                  \
320         case 8:                                                         \
321                 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
322                 break;                                                  \
323         default:                                                        \
324                 __put_user_bad();                                       \
325         }                                                               \
326 } while (0)
327
328 #ifdef CONFIG_X86_32
329 #define __get_user_asm_u64(x, ptr, retval, errret)      (x) = __get_user_bad()
330 #define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
331 #else
332 #define __get_user_asm_u64(x, ptr, retval, errret) \
333          __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
334 #define __get_user_asm_ex_u64(x, ptr) \
335          __get_user_asm_ex(x, ptr, "q", "", "=r")
336 #endif
337
338 #define __get_user_size(x, ptr, size, retval, errret)                   \
339 do {                                                                    \
340         retval = 0;                                                     \
341         __chk_user_ptr(ptr);                                            \
342         switch (size) {                                                 \
343         case 1:                                                         \
344                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
345                 break;                                                  \
346         case 2:                                                         \
347                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
348                 break;                                                  \
349         case 4:                                                         \
350                 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
351                 break;                                                  \
352         case 8:                                                         \
353                 __get_user_asm_u64(x, ptr, retval, errret);             \
354                 break;                                                  \
355         default:                                                        \
356                 (x) = __get_user_bad();                                 \
357         }                                                               \
358 } while (0)
359
360 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
361         asm volatile(ASM_STAC "\n"                                      \
362                      "1:        mov"itype" %2,%"rtype"1\n"              \
363                      "2: " ASM_CLAC "\n"                                \
364                      ".section .fixup,\"ax\"\n"                         \
365                      "3:        mov %3,%0\n"                            \
366                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
367                      "  jmp 2b\n"                                       \
368                      ".previous\n"                                      \
369                      _ASM_EXTABLE(1b, 3b)                               \
370                      : "=r" (err), ltype(x)                             \
371                      : "m" (__m(addr)), "i" (errret), "0" (err))
372
373 #define __get_user_size_ex(x, ptr, size)                                \
374 do {                                                                    \
375         __chk_user_ptr(ptr);                                            \
376         switch (size) {                                                 \
377         case 1:                                                         \
378                 __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
379                 break;                                                  \
380         case 2:                                                         \
381                 __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
382                 break;                                                  \
383         case 4:                                                         \
384                 __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
385                 break;                                                  \
386         case 8:                                                         \
387                 __get_user_asm_ex_u64(x, ptr);                          \
388                 break;                                                  \
389         default:                                                        \
390                 (x) = __get_user_bad();                                 \
391         }                                                               \
392 } while (0)
393
394 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
395         asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
396                      "2:\n"                                             \
397                      ".section .fixup,\"ax\"\n"                         \
398                      "3:xor"itype" %"rtype"0,%"rtype"0\n"               \
399                      "  jmp 2b\n"                                       \
400                      ".previous\n"                                      \
401                      _ASM_EXTABLE_EX(1b, 3b)                            \
402                      : ltype(x) : "m" (__m(addr)))
403
404 #define __put_user_nocheck(x, ptr, size)                        \
405 ({                                                              \
406         int __pu_err;                                           \
407         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
408         __builtin_expect(__pu_err, 0);                          \
409 })
410
411 #define __get_user_nocheck(x, ptr, size)                                \
412 ({                                                                      \
413         int __gu_err;                                                   \
414         unsigned long __gu_val;                                         \
415         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
416         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
417         __builtin_expect(__gu_err, 0);                                  \
418 })
419
420 /* FIXME: this hack is definitely wrong -AK */
421 struct __large_struct { unsigned long buf[100]; };
422 #define __m(x) (*(struct __large_struct __user *)(x))
423
424 /*
425  * Tell gcc we read from memory instead of writing: this is because
426  * we do not write to any memory gcc knows about, so there are no
427  * aliasing issues.
428  */
429 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
430         asm volatile(ASM_STAC "\n"                                      \
431                      "1:        mov"itype" %"rtype"1,%2\n"              \
432                      "2: " ASM_CLAC "\n"                                \
433                      ".section .fixup,\"ax\"\n"                         \
434                      "3:        mov %3,%0\n"                            \
435                      "  jmp 2b\n"                                       \
436                      ".previous\n"                                      \
437                      _ASM_EXTABLE(1b, 3b)                               \
438                      : "=r"(err)                                        \
439                      : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
440
441 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
442         asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
443                      "2:\n"                                             \
444                      _ASM_EXTABLE_EX(1b, 2b)                            \
445                      : : ltype(x), "m" (__m(addr)))
446
447 /*
448  * uaccess_try and catch
449  */
450 #define uaccess_try     do {                                            \
451         current_thread_info()->uaccess_err = 0;                         \
452         stac();                                                         \
453         barrier();
454
455 #define uaccess_catch(err)                                              \
456         clac();                                                         \
457         (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
458 } while (0)
459
460 /**
461  * __get_user: - Get a simple variable from user space, with less checking.
462  * @x:   Variable to store result.
463  * @ptr: Source address, in user space.
464  *
465  * Context: User context only. This function may sleep if pagefaults are
466  *          enabled.
467  *
468  * This macro copies a single simple variable from user space to kernel
469  * space.  It supports simple types like char and int, but not larger
470  * data types like structures or arrays.
471  *
472  * @ptr must have pointer-to-simple-variable type, and the result of
473  * dereferencing @ptr must be assignable to @x without a cast.
474  *
475  * Caller must check the pointer with access_ok() before calling this
476  * function.
477  *
478  * Returns zero on success, or -EFAULT on error.
479  * On error, the variable @x is set to zero.
480  */
481
482 #define __get_user(x, ptr)                                              \
483         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
484
485 /**
486  * __put_user: - Write a simple value into user space, with less checking.
487  * @x:   Value to copy to user space.
488  * @ptr: Destination address, in user space.
489  *
490  * Context: User context only. This function may sleep if pagefaults are
491  *          enabled.
492  *
493  * This macro copies a single simple value from kernel space to user
494  * space.  It supports simple types like char and int, but not larger
495  * data types like structures or arrays.
496  *
497  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
498  * to the result of dereferencing @ptr.
499  *
500  * Caller must check the pointer with access_ok() before calling this
501  * function.
502  *
503  * Returns zero on success, or -EFAULT on error.
504  */
505
506 #define __put_user(x, ptr)                                              \
507         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
508
509 #define __get_user_unaligned __get_user
510 #define __put_user_unaligned __put_user
511
512 /*
513  * {get|put}_user_try and catch
514  *
515  * get_user_try {
516  *      get_user_ex(...);
517  * } get_user_catch(err)
518  */
519 #define get_user_try            uaccess_try
520 #define get_user_catch(err)     uaccess_catch(err)
521
522 #define get_user_ex(x, ptr)     do {                                    \
523         unsigned long __gue_val;                                        \
524         __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
525         (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
526 } while (0)
527
528 #define put_user_try            uaccess_try
529 #define put_user_catch(err)     uaccess_catch(err)
530
531 #define put_user_ex(x, ptr)                                             \
532         __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
533
534 extern unsigned long
535 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
536 extern __must_check long
537 strncpy_from_user(char *dst, const char __user *src, long count);
538
539 extern __must_check long strlen_user(const char __user *str);
540 extern __must_check long strnlen_user(const char __user *str, long n);
541
542 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
543 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
544
545 extern void __cmpxchg_wrong_size(void)
546         __compiletime_error("Bad argument size for cmpxchg");
547
548 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
549 ({                                                                      \
550         int __ret = 0;                                                  \
551         __typeof__(ptr) __uval = (uval);                                \
552         __typeof__(*(ptr)) __old = (old);                               \
553         __typeof__(*(ptr)) __new = (new);                               \
554         switch (size) {                                                 \
555         case 1:                                                         \
556         {                                                               \
557                 asm volatile("\t" ASM_STAC "\n"                         \
558                         "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
559                         "2:\t" ASM_CLAC "\n"                            \
560                         "\t.section .fixup, \"ax\"\n"                   \
561                         "3:\tmov     %3, %0\n"                          \
562                         "\tjmp     2b\n"                                \
563                         "\t.previous\n"                                 \
564                         _ASM_EXTABLE(1b, 3b)                            \
565                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
566                         : "i" (-EFAULT), "q" (__new), "1" (__old)       \
567                         : "memory"                                      \
568                 );                                                      \
569                 break;                                                  \
570         }                                                               \
571         case 2:                                                         \
572         {                                                               \
573                 asm volatile("\t" ASM_STAC "\n"                         \
574                         "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
575                         "2:\t" ASM_CLAC "\n"                            \
576                         "\t.section .fixup, \"ax\"\n"                   \
577                         "3:\tmov     %3, %0\n"                          \
578                         "\tjmp     2b\n"                                \
579                         "\t.previous\n"                                 \
580                         _ASM_EXTABLE(1b, 3b)                            \
581                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
582                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
583                         : "memory"                                      \
584                 );                                                      \
585                 break;                                                  \
586         }                                                               \
587         case 4:                                                         \
588         {                                                               \
589                 asm volatile("\t" ASM_STAC "\n"                         \
590                         "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
591                         "2:\t" ASM_CLAC "\n"                            \
592                         "\t.section .fixup, \"ax\"\n"                   \
593                         "3:\tmov     %3, %0\n"                          \
594                         "\tjmp     2b\n"                                \
595                         "\t.previous\n"                                 \
596                         _ASM_EXTABLE(1b, 3b)                            \
597                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
598                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
599                         : "memory"                                      \
600                 );                                                      \
601                 break;                                                  \
602         }                                                               \
603         case 8:                                                         \
604         {                                                               \
605                 if (!IS_ENABLED(CONFIG_X86_64))                         \
606                         __cmpxchg_wrong_size();                         \
607                                                                         \
608                 asm volatile("\t" ASM_STAC "\n"                         \
609                         "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
610                         "2:\t" ASM_CLAC "\n"                            \
611                         "\t.section .fixup, \"ax\"\n"                   \
612                         "3:\tmov     %3, %0\n"                          \
613                         "\tjmp     2b\n"                                \
614                         "\t.previous\n"                                 \
615                         _ASM_EXTABLE(1b, 3b)                            \
616                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
617                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
618                         : "memory"                                      \
619                 );                                                      \
620                 break;                                                  \
621         }                                                               \
622         default:                                                        \
623                 __cmpxchg_wrong_size();                                 \
624         }                                                               \
625         *__uval = __old;                                                \
626         __ret;                                                          \
627 })
628
629 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
630 ({                                                                      \
631         access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
632                 __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
633                                 (old), (new), sizeof(*(ptr))) :         \
634                 -EFAULT;                                                \
635 })
636
637 /*
638  * movsl can be slow when source and dest are not both 8-byte aligned
639  */
640 #ifdef CONFIG_X86_INTEL_USERCOPY
641 extern struct movsl_mask {
642         int mask;
643 } ____cacheline_aligned_in_smp movsl_mask;
644 #endif
645
646 #define ARCH_HAS_NOCACHE_UACCESS 1
647
648 #ifdef CONFIG_X86_32
649 # include <asm/uaccess_32.h>
650 #else
651 # include <asm/uaccess_64.h>
652 #endif
653
654 unsigned long __must_check _copy_from_user(void *to, const void __user *from,
655                                            unsigned n);
656 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
657                                          unsigned n);
658
659 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
660 # define copy_user_diag __compiletime_error
661 #else
662 # define copy_user_diag __compiletime_warning
663 #endif
664
665 extern void copy_user_diag("copy_from_user() buffer size is too small")
666 copy_from_user_overflow(void);
667 extern void copy_user_diag("copy_to_user() buffer size is too small")
668 copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
669
670 #undef copy_user_diag
671
672 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
673
674 extern void
675 __compiletime_warning("copy_from_user() buffer size is not provably correct")
676 __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
677 #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
678
679 extern void
680 __compiletime_warning("copy_to_user() buffer size is not provably correct")
681 __copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
682 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
683
684 #else
685
686 static inline void
687 __copy_from_user_overflow(int size, unsigned long count)
688 {
689         WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
690 }
691
692 #define __copy_to_user_overflow __copy_from_user_overflow
693
694 #endif
695
696 static inline unsigned long __must_check
697 copy_from_user(void *to, const void __user *from, unsigned long n)
698 {
699         int sz = __compiletime_object_size(to);
700
701         might_fault();
702
703         /*
704          * While we would like to have the compiler do the checking for us
705          * even in the non-constant size case, any false positives there are
706          * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
707          * without - the [hopefully] dangerous looking nature of the warning
708          * would make people go look at the respecitive call sites over and
709          * over again just to find that there's no problem).
710          *
711          * And there are cases where it's just not realistic for the compiler
712          * to prove the count to be in range. For example when multiple call
713          * sites of a helper function - perhaps in different source files -
714          * all doing proper range checking, yet the helper function not doing
715          * so again.
716          *
717          * Therefore limit the compile time checking to the constant size
718          * case, and do only runtime checking for non-constant sizes.
719          */
720
721         if (likely(sz < 0 || sz >= n))
722                 n = _copy_from_user(to, from, n);
723         else if(__builtin_constant_p(n))
724                 copy_from_user_overflow();
725         else
726                 __copy_from_user_overflow(sz, n);
727
728         return n;
729 }
730
731 static inline unsigned long __must_check
732 copy_to_user(void __user *to, const void *from, unsigned long n)
733 {
734         int sz = __compiletime_object_size(from);
735
736         might_fault();
737
738         /* See the comment in copy_from_user() above. */
739         if (likely(sz < 0 || sz >= n))
740                 n = _copy_to_user(to, from, n);
741         else if(__builtin_constant_p(n))
742                 copy_to_user_overflow();
743         else
744                 __copy_to_user_overflow(sz, n);
745
746         return n;
747 }
748
749 #undef __copy_from_user_overflow
750 #undef __copy_to_user_overflow
751
752 #endif /* _ASM_X86_UACCESS_H */
753