Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / lib / usercopy_32.c
1 /*
2  * User address space access functions.
3  * The non inlined parts of asm-i386/uaccess.h are here.
4  *
5  * Copyright 1997 Andi Kleen <ak@muc.de>
6  * Copyright 1997 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/highmem.h>
10 #include <linux/blkdev.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/interrupt.h>
14 #include <asm/uaccess.h>
15 #include <asm/mmx.h>
16 #include <asm/asm.h>
17
18 #ifdef CONFIG_X86_INTEL_USERCOPY
19 /*
20  * Alignment at which movsl is preferred for bulk memory copies.
21  */
22 struct movsl_mask movsl_mask __read_mostly;
23 #endif
24
25 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
26 {
27 #ifdef CONFIG_X86_INTEL_USERCOPY
28         if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
29                 return 0;
30 #endif
31         return 1;
32 }
33 #define movsl_is_ok(a1, a2, n) \
34         __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
35
36 /*
37  * Zero Userspace
38  */
39
40 #define __do_clear_user(addr,size)                                      \
41 do {                                                                    \
42         int __d0;                                                       \
43         might_fault();                                                  \
44         __asm__ __volatile__(                                           \
45                 "0:     rep; stosl\n"                                   \
46                 "       movl %2,%0\n"                                   \
47                 "1:     rep; stosb\n"                                   \
48                 "2:\n"                                                  \
49                 ".section .fixup,\"ax\"\n"                              \
50                 "3:     lea 0(%2,%0,4),%0\n"                            \
51                 "       jmp 2b\n"                                       \
52                 ".previous\n"                                           \
53                 _ASM_EXTABLE(0b,3b)                                     \
54                 _ASM_EXTABLE(1b,2b)                                     \
55                 : "=&c"(size), "=&D" (__d0)                             \
56                 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0));     \
57 } while (0)
58
59 /**
60  * clear_user: - Zero a block of memory in user space.
61  * @to:   Destination address, in user space.
62  * @n:    Number of bytes to zero.
63  *
64  * Zero a block of memory in user space.
65  *
66  * Returns number of bytes that could not be cleared.
67  * On success, this will be zero.
68  */
69 unsigned long
70 clear_user(void __user *to, unsigned long n)
71 {
72         might_fault();
73         if (access_ok(VERIFY_WRITE, to, n))
74                 __do_clear_user(to, n);
75         return n;
76 }
77 EXPORT_SYMBOL(clear_user);
78
79 /**
80  * __clear_user: - Zero a block of memory in user space, with less checking.
81  * @to:   Destination address, in user space.
82  * @n:    Number of bytes to zero.
83  *
84  * Zero a block of memory in user space.  Caller must check
85  * the specified block with access_ok() before calling this function.
86  *
87  * Returns number of bytes that could not be cleared.
88  * On success, this will be zero.
89  */
90 unsigned long
91 __clear_user(void __user *to, unsigned long n)
92 {
93         __do_clear_user(to, n);
94         return n;
95 }
96 EXPORT_SYMBOL(__clear_user);
97
98 #ifdef CONFIG_X86_INTEL_USERCOPY
99 static unsigned long
100 __copy_user_intel(void __user *to, const void *from, unsigned long size)
101 {
102         int d0, d1;
103         __asm__ __volatile__(
104                        "       .align 2,0x90\n"
105                        "1:     movl 32(%4), %%eax\n"
106                        "       cmpl $67, %0\n"
107                        "       jbe 3f\n"
108                        "2:     movl 64(%4), %%eax\n"
109                        "       .align 2,0x90\n"
110                        "3:     movl 0(%4), %%eax\n"
111                        "4:     movl 4(%4), %%edx\n"
112                        "5:     movl %%eax, 0(%3)\n"
113                        "6:     movl %%edx, 4(%3)\n"
114                        "7:     movl 8(%4), %%eax\n"
115                        "8:     movl 12(%4),%%edx\n"
116                        "9:     movl %%eax, 8(%3)\n"
117                        "10:    movl %%edx, 12(%3)\n"
118                        "11:    movl 16(%4), %%eax\n"
119                        "12:    movl 20(%4), %%edx\n"
120                        "13:    movl %%eax, 16(%3)\n"
121                        "14:    movl %%edx, 20(%3)\n"
122                        "15:    movl 24(%4), %%eax\n"
123                        "16:    movl 28(%4), %%edx\n"
124                        "17:    movl %%eax, 24(%3)\n"
125                        "18:    movl %%edx, 28(%3)\n"
126                        "19:    movl 32(%4), %%eax\n"
127                        "20:    movl 36(%4), %%edx\n"
128                        "21:    movl %%eax, 32(%3)\n"
129                        "22:    movl %%edx, 36(%3)\n"
130                        "23:    movl 40(%4), %%eax\n"
131                        "24:    movl 44(%4), %%edx\n"
132                        "25:    movl %%eax, 40(%3)\n"
133                        "26:    movl %%edx, 44(%3)\n"
134                        "27:    movl 48(%4), %%eax\n"
135                        "28:    movl 52(%4), %%edx\n"
136                        "29:    movl %%eax, 48(%3)\n"
137                        "30:    movl %%edx, 52(%3)\n"
138                        "31:    movl 56(%4), %%eax\n"
139                        "32:    movl 60(%4), %%edx\n"
140                        "33:    movl %%eax, 56(%3)\n"
141                        "34:    movl %%edx, 60(%3)\n"
142                        "       addl $-64, %0\n"
143                        "       addl $64, %4\n"
144                        "       addl $64, %3\n"
145                        "       cmpl $63, %0\n"
146                        "       ja  1b\n"
147                        "35:    movl  %0, %%eax\n"
148                        "       shrl  $2, %0\n"
149                        "       andl  $3, %%eax\n"
150                        "       cld\n"
151                        "99:    rep; movsl\n"
152                        "36:    movl %%eax, %0\n"
153                        "37:    rep; movsb\n"
154                        "100:\n"
155                        ".section .fixup,\"ax\"\n"
156                        "101:   lea 0(%%eax,%0,4),%0\n"
157                        "       jmp 100b\n"
158                        ".previous\n"
159                        _ASM_EXTABLE(1b,100b)
160                        _ASM_EXTABLE(2b,100b)
161                        _ASM_EXTABLE(3b,100b)
162                        _ASM_EXTABLE(4b,100b)
163                        _ASM_EXTABLE(5b,100b)
164                        _ASM_EXTABLE(6b,100b)
165                        _ASM_EXTABLE(7b,100b)
166                        _ASM_EXTABLE(8b,100b)
167                        _ASM_EXTABLE(9b,100b)
168                        _ASM_EXTABLE(10b,100b)
169                        _ASM_EXTABLE(11b,100b)
170                        _ASM_EXTABLE(12b,100b)
171                        _ASM_EXTABLE(13b,100b)
172                        _ASM_EXTABLE(14b,100b)
173                        _ASM_EXTABLE(15b,100b)
174                        _ASM_EXTABLE(16b,100b)
175                        _ASM_EXTABLE(17b,100b)
176                        _ASM_EXTABLE(18b,100b)
177                        _ASM_EXTABLE(19b,100b)
178                        _ASM_EXTABLE(20b,100b)
179                        _ASM_EXTABLE(21b,100b)
180                        _ASM_EXTABLE(22b,100b)
181                        _ASM_EXTABLE(23b,100b)
182                        _ASM_EXTABLE(24b,100b)
183                        _ASM_EXTABLE(25b,100b)
184                        _ASM_EXTABLE(26b,100b)
185                        _ASM_EXTABLE(27b,100b)
186                        _ASM_EXTABLE(28b,100b)
187                        _ASM_EXTABLE(29b,100b)
188                        _ASM_EXTABLE(30b,100b)
189                        _ASM_EXTABLE(31b,100b)
190                        _ASM_EXTABLE(32b,100b)
191                        _ASM_EXTABLE(33b,100b)
192                        _ASM_EXTABLE(34b,100b)
193                        _ASM_EXTABLE(35b,100b)
194                        _ASM_EXTABLE(36b,100b)
195                        _ASM_EXTABLE(37b,100b)
196                        _ASM_EXTABLE(99b,101b)
197                        : "=&c"(size), "=&D" (d0), "=&S" (d1)
198                        :  "1"(to), "2"(from), "0"(size)
199                        : "eax", "edx", "memory");
200         return size;
201 }
202
203 static unsigned long
204 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
205 {
206         int d0, d1;
207         __asm__ __volatile__(
208                        "        .align 2,0x90\n"
209                        "0:      movl 32(%4), %%eax\n"
210                        "        cmpl $67, %0\n"
211                        "        jbe 2f\n"
212                        "1:      movl 64(%4), %%eax\n"
213                        "        .align 2,0x90\n"
214                        "2:      movl 0(%4), %%eax\n"
215                        "21:     movl 4(%4), %%edx\n"
216                        "        movl %%eax, 0(%3)\n"
217                        "        movl %%edx, 4(%3)\n"
218                        "3:      movl 8(%4), %%eax\n"
219                        "31:     movl 12(%4),%%edx\n"
220                        "        movl %%eax, 8(%3)\n"
221                        "        movl %%edx, 12(%3)\n"
222                        "4:      movl 16(%4), %%eax\n"
223                        "41:     movl 20(%4), %%edx\n"
224                        "        movl %%eax, 16(%3)\n"
225                        "        movl %%edx, 20(%3)\n"
226                        "10:     movl 24(%4), %%eax\n"
227                        "51:     movl 28(%4), %%edx\n"
228                        "        movl %%eax, 24(%3)\n"
229                        "        movl %%edx, 28(%3)\n"
230                        "11:     movl 32(%4), %%eax\n"
231                        "61:     movl 36(%4), %%edx\n"
232                        "        movl %%eax, 32(%3)\n"
233                        "        movl %%edx, 36(%3)\n"
234                        "12:     movl 40(%4), %%eax\n"
235                        "71:     movl 44(%4), %%edx\n"
236                        "        movl %%eax, 40(%3)\n"
237                        "        movl %%edx, 44(%3)\n"
238                        "13:     movl 48(%4), %%eax\n"
239                        "81:     movl 52(%4), %%edx\n"
240                        "        movl %%eax, 48(%3)\n"
241                        "        movl %%edx, 52(%3)\n"
242                        "14:     movl 56(%4), %%eax\n"
243                        "91:     movl 60(%4), %%edx\n"
244                        "        movl %%eax, 56(%3)\n"
245                        "        movl %%edx, 60(%3)\n"
246                        "        addl $-64, %0\n"
247                        "        addl $64, %4\n"
248                        "        addl $64, %3\n"
249                        "        cmpl $63, %0\n"
250                        "        ja  0b\n"
251                        "5:      movl  %0, %%eax\n"
252                        "        shrl  $2, %0\n"
253                        "        andl $3, %%eax\n"
254                        "        cld\n"
255                        "6:      rep; movsl\n"
256                        "        movl %%eax,%0\n"
257                        "7:      rep; movsb\n"
258                        "8:\n"
259                        ".section .fixup,\"ax\"\n"
260                        "9:      lea 0(%%eax,%0,4),%0\n"
261                        "16:     pushl %0\n"
262                        "        pushl %%eax\n"
263                        "        xorl %%eax,%%eax\n"
264                        "        rep; stosb\n"
265                        "        popl %%eax\n"
266                        "        popl %0\n"
267                        "        jmp 8b\n"
268                        ".previous\n"
269                        _ASM_EXTABLE(0b,16b)
270                        _ASM_EXTABLE(1b,16b)
271                        _ASM_EXTABLE(2b,16b)
272                        _ASM_EXTABLE(21b,16b)
273                        _ASM_EXTABLE(3b,16b)
274                        _ASM_EXTABLE(31b,16b)
275                        _ASM_EXTABLE(4b,16b)
276                        _ASM_EXTABLE(41b,16b)
277                        _ASM_EXTABLE(10b,16b)
278                        _ASM_EXTABLE(51b,16b)
279                        _ASM_EXTABLE(11b,16b)
280                        _ASM_EXTABLE(61b,16b)
281                        _ASM_EXTABLE(12b,16b)
282                        _ASM_EXTABLE(71b,16b)
283                        _ASM_EXTABLE(13b,16b)
284                        _ASM_EXTABLE(81b,16b)
285                        _ASM_EXTABLE(14b,16b)
286                        _ASM_EXTABLE(91b,16b)
287                        _ASM_EXTABLE(6b,9b)
288                        _ASM_EXTABLE(7b,16b)
289                        : "=&c"(size), "=&D" (d0), "=&S" (d1)
290                        :  "1"(to), "2"(from), "0"(size)
291                        : "eax", "edx", "memory");
292         return size;
293 }
294
295 /*
296  * Non Temporal Hint version of __copy_user_zeroing_intel.  It is cache aware.
297  * hyoshiok@miraclelinux.com
298  */
299
300 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
301                                 const void __user *from, unsigned long size)
302 {
303         int d0, d1;
304
305         __asm__ __volatile__(
306                "        .align 2,0x90\n"
307                "0:      movl 32(%4), %%eax\n"
308                "        cmpl $67, %0\n"
309                "        jbe 2f\n"
310                "1:      movl 64(%4), %%eax\n"
311                "        .align 2,0x90\n"
312                "2:      movl 0(%4), %%eax\n"
313                "21:     movl 4(%4), %%edx\n"
314                "        movnti %%eax, 0(%3)\n"
315                "        movnti %%edx, 4(%3)\n"
316                "3:      movl 8(%4), %%eax\n"
317                "31:     movl 12(%4),%%edx\n"
318                "        movnti %%eax, 8(%3)\n"
319                "        movnti %%edx, 12(%3)\n"
320                "4:      movl 16(%4), %%eax\n"
321                "41:     movl 20(%4), %%edx\n"
322                "        movnti %%eax, 16(%3)\n"
323                "        movnti %%edx, 20(%3)\n"
324                "10:     movl 24(%4), %%eax\n"
325                "51:     movl 28(%4), %%edx\n"
326                "        movnti %%eax, 24(%3)\n"
327                "        movnti %%edx, 28(%3)\n"
328                "11:     movl 32(%4), %%eax\n"
329                "61:     movl 36(%4), %%edx\n"
330                "        movnti %%eax, 32(%3)\n"
331                "        movnti %%edx, 36(%3)\n"
332                "12:     movl 40(%4), %%eax\n"
333                "71:     movl 44(%4), %%edx\n"
334                "        movnti %%eax, 40(%3)\n"
335                "        movnti %%edx, 44(%3)\n"
336                "13:     movl 48(%4), %%eax\n"
337                "81:     movl 52(%4), %%edx\n"
338                "        movnti %%eax, 48(%3)\n"
339                "        movnti %%edx, 52(%3)\n"
340                "14:     movl 56(%4), %%eax\n"
341                "91:     movl 60(%4), %%edx\n"
342                "        movnti %%eax, 56(%3)\n"
343                "        movnti %%edx, 60(%3)\n"
344                "        addl $-64, %0\n"
345                "        addl $64, %4\n"
346                "        addl $64, %3\n"
347                "        cmpl $63, %0\n"
348                "        ja  0b\n"
349                "        sfence \n"
350                "5:      movl  %0, %%eax\n"
351                "        shrl  $2, %0\n"
352                "        andl $3, %%eax\n"
353                "        cld\n"
354                "6:      rep; movsl\n"
355                "        movl %%eax,%0\n"
356                "7:      rep; movsb\n"
357                "8:\n"
358                ".section .fixup,\"ax\"\n"
359                "9:      lea 0(%%eax,%0,4),%0\n"
360                "16:     pushl %0\n"
361                "        pushl %%eax\n"
362                "        xorl %%eax,%%eax\n"
363                "        rep; stosb\n"
364                "        popl %%eax\n"
365                "        popl %0\n"
366                "        jmp 8b\n"
367                ".previous\n"
368                _ASM_EXTABLE(0b,16b)
369                _ASM_EXTABLE(1b,16b)
370                _ASM_EXTABLE(2b,16b)
371                _ASM_EXTABLE(21b,16b)
372                _ASM_EXTABLE(3b,16b)
373                _ASM_EXTABLE(31b,16b)
374                _ASM_EXTABLE(4b,16b)
375                _ASM_EXTABLE(41b,16b)
376                _ASM_EXTABLE(10b,16b)
377                _ASM_EXTABLE(51b,16b)
378                _ASM_EXTABLE(11b,16b)
379                _ASM_EXTABLE(61b,16b)
380                _ASM_EXTABLE(12b,16b)
381                _ASM_EXTABLE(71b,16b)
382                _ASM_EXTABLE(13b,16b)
383                _ASM_EXTABLE(81b,16b)
384                _ASM_EXTABLE(14b,16b)
385                _ASM_EXTABLE(91b,16b)
386                _ASM_EXTABLE(6b,9b)
387                _ASM_EXTABLE(7b,16b)
388                : "=&c"(size), "=&D" (d0), "=&S" (d1)
389                :  "1"(to), "2"(from), "0"(size)
390                : "eax", "edx", "memory");
391         return size;
392 }
393
394 static unsigned long __copy_user_intel_nocache(void *to,
395                                 const void __user *from, unsigned long size)
396 {
397         int d0, d1;
398
399         __asm__ __volatile__(
400                "        .align 2,0x90\n"
401                "0:      movl 32(%4), %%eax\n"
402                "        cmpl $67, %0\n"
403                "        jbe 2f\n"
404                "1:      movl 64(%4), %%eax\n"
405                "        .align 2,0x90\n"
406                "2:      movl 0(%4), %%eax\n"
407                "21:     movl 4(%4), %%edx\n"
408                "        movnti %%eax, 0(%3)\n"
409                "        movnti %%edx, 4(%3)\n"
410                "3:      movl 8(%4), %%eax\n"
411                "31:     movl 12(%4),%%edx\n"
412                "        movnti %%eax, 8(%3)\n"
413                "        movnti %%edx, 12(%3)\n"
414                "4:      movl 16(%4), %%eax\n"
415                "41:     movl 20(%4), %%edx\n"
416                "        movnti %%eax, 16(%3)\n"
417                "        movnti %%edx, 20(%3)\n"
418                "10:     movl 24(%4), %%eax\n"
419                "51:     movl 28(%4), %%edx\n"
420                "        movnti %%eax, 24(%3)\n"
421                "        movnti %%edx, 28(%3)\n"
422                "11:     movl 32(%4), %%eax\n"
423                "61:     movl 36(%4), %%edx\n"
424                "        movnti %%eax, 32(%3)\n"
425                "        movnti %%edx, 36(%3)\n"
426                "12:     movl 40(%4), %%eax\n"
427                "71:     movl 44(%4), %%edx\n"
428                "        movnti %%eax, 40(%3)\n"
429                "        movnti %%edx, 44(%3)\n"
430                "13:     movl 48(%4), %%eax\n"
431                "81:     movl 52(%4), %%edx\n"
432                "        movnti %%eax, 48(%3)\n"
433                "        movnti %%edx, 52(%3)\n"
434                "14:     movl 56(%4), %%eax\n"
435                "91:     movl 60(%4), %%edx\n"
436                "        movnti %%eax, 56(%3)\n"
437                "        movnti %%edx, 60(%3)\n"
438                "        addl $-64, %0\n"
439                "        addl $64, %4\n"
440                "        addl $64, %3\n"
441                "        cmpl $63, %0\n"
442                "        ja  0b\n"
443                "        sfence \n"
444                "5:      movl  %0, %%eax\n"
445                "        shrl  $2, %0\n"
446                "        andl $3, %%eax\n"
447                "        cld\n"
448                "6:      rep; movsl\n"
449                "        movl %%eax,%0\n"
450                "7:      rep; movsb\n"
451                "8:\n"
452                ".section .fixup,\"ax\"\n"
453                "9:      lea 0(%%eax,%0,4),%0\n"
454                "16:     jmp 8b\n"
455                ".previous\n"
456                _ASM_EXTABLE(0b,16b)
457                _ASM_EXTABLE(1b,16b)
458                _ASM_EXTABLE(2b,16b)
459                _ASM_EXTABLE(21b,16b)
460                _ASM_EXTABLE(3b,16b)
461                _ASM_EXTABLE(31b,16b)
462                _ASM_EXTABLE(4b,16b)
463                _ASM_EXTABLE(41b,16b)
464                _ASM_EXTABLE(10b,16b)
465                _ASM_EXTABLE(51b,16b)
466                _ASM_EXTABLE(11b,16b)
467                _ASM_EXTABLE(61b,16b)
468                _ASM_EXTABLE(12b,16b)
469                _ASM_EXTABLE(71b,16b)
470                _ASM_EXTABLE(13b,16b)
471                _ASM_EXTABLE(81b,16b)
472                _ASM_EXTABLE(14b,16b)
473                _ASM_EXTABLE(91b,16b)
474                _ASM_EXTABLE(6b,9b)
475                _ASM_EXTABLE(7b,16b)
476                : "=&c"(size), "=&D" (d0), "=&S" (d1)
477                :  "1"(to), "2"(from), "0"(size)
478                : "eax", "edx", "memory");
479         return size;
480 }
481
482 #else
483
484 /*
485  * Leave these declared but undefined.  They should not be any references to
486  * them
487  */
488 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
489                                         unsigned long size);
490 unsigned long __copy_user_intel(void __user *to, const void *from,
491                                         unsigned long size);
492 unsigned long __copy_user_zeroing_intel_nocache(void *to,
493                                 const void __user *from, unsigned long size);
494 #endif /* CONFIG_X86_INTEL_USERCOPY */
495
496 /* Generic arbitrary sized copy.  */
497 #define __copy_user(to, from, size)                                     \
498 do {                                                                    \
499         int __d0, __d1, __d2;                                           \
500         __asm__ __volatile__(                                           \
501                 "       cmp  $7,%0\n"                                   \
502                 "       jbe  1f\n"                                      \
503                 "       movl %1,%0\n"                                   \
504                 "       negl %0\n"                                      \
505                 "       andl $7,%0\n"                                   \
506                 "       subl %0,%3\n"                                   \
507                 "4:     rep; movsb\n"                                   \
508                 "       movl %3,%0\n"                                   \
509                 "       shrl $2,%0\n"                                   \
510                 "       andl $3,%3\n"                                   \
511                 "       .align 2,0x90\n"                                \
512                 "0:     rep; movsl\n"                                   \
513                 "       movl %3,%0\n"                                   \
514                 "1:     rep; movsb\n"                                   \
515                 "2:\n"                                                  \
516                 ".section .fixup,\"ax\"\n"                              \
517                 "5:     addl %3,%0\n"                                   \
518                 "       jmp 2b\n"                                       \
519                 "3:     lea 0(%3,%0,4),%0\n"                            \
520                 "       jmp 2b\n"                                       \
521                 ".previous\n"                                           \
522                 _ASM_EXTABLE(4b,5b)                                     \
523                 _ASM_EXTABLE(0b,3b)                                     \
524                 _ASM_EXTABLE(1b,2b)                                     \
525                 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)   \
526                 : "3"(size), "0"(size), "1"(to), "2"(from)              \
527                 : "memory");                                            \
528 } while (0)
529
530 #define __copy_user_zeroing(to, from, size)                             \
531 do {                                                                    \
532         int __d0, __d1, __d2;                                           \
533         __asm__ __volatile__(                                           \
534                 "       cmp  $7,%0\n"                                   \
535                 "       jbe  1f\n"                                      \
536                 "       movl %1,%0\n"                                   \
537                 "       negl %0\n"                                      \
538                 "       andl $7,%0\n"                                   \
539                 "       subl %0,%3\n"                                   \
540                 "4:     rep; movsb\n"                                   \
541                 "       movl %3,%0\n"                                   \
542                 "       shrl $2,%0\n"                                   \
543                 "       andl $3,%3\n"                                   \
544                 "       .align 2,0x90\n"                                \
545                 "0:     rep; movsl\n"                                   \
546                 "       movl %3,%0\n"                                   \
547                 "1:     rep; movsb\n"                                   \
548                 "2:\n"                                                  \
549                 ".section .fixup,\"ax\"\n"                              \
550                 "5:     addl %3,%0\n"                                   \
551                 "       jmp 6f\n"                                       \
552                 "3:     lea 0(%3,%0,4),%0\n"                            \
553                 "6:     pushl %0\n"                                     \
554                 "       pushl %%eax\n"                                  \
555                 "       xorl %%eax,%%eax\n"                             \
556                 "       rep; stosb\n"                                   \
557                 "       popl %%eax\n"                                   \
558                 "       popl %0\n"                                      \
559                 "       jmp 2b\n"                                       \
560                 ".previous\n"                                           \
561                 _ASM_EXTABLE(4b,5b)                                     \
562                 _ASM_EXTABLE(0b,3b)                                     \
563                 _ASM_EXTABLE(1b,6b)                                     \
564                 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)   \
565                 : "3"(size), "0"(size), "1"(to), "2"(from)              \
566                 : "memory");                                            \
567 } while (0)
568
569 unsigned long __copy_to_user_ll(void __user *to, const void *from,
570                                 unsigned long n)
571 {
572 #ifndef CONFIG_X86_WP_WORKS_OK
573         if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
574                         ((unsigned long)to) < TASK_SIZE) {
575                 /*
576                  * When we are in an atomic section (see
577                  * mm/filemap.c:file_read_actor), return the full
578                  * length to take the slow path.
579                  */
580                 if (in_atomic())
581                         return n;
582
583                 /*
584                  * CPU does not honor the WP bit when writing
585                  * from supervisory mode, and due to preemption or SMP,
586                  * the page tables can change at any time.
587                  * Do it manually.      Manfred <manfred@colorfullife.com>
588                  */
589                 while (n) {
590                         unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
591                         unsigned long len = PAGE_SIZE - offset;
592                         int retval;
593                         struct page *pg;
594                         void *maddr;
595
596                         if (len > n)
597                                 len = n;
598
599 survive:
600                         down_read(&current->mm->mmap_sem);
601                         retval = get_user_pages(current, current->mm,
602                                         (unsigned long)to, 1, 1, 0, &pg, NULL);
603
604                         if (retval == -ENOMEM && is_global_init(current)) {
605                                 up_read(&current->mm->mmap_sem);
606                                 congestion_wait(BLK_RW_ASYNC, HZ/50);
607                                 goto survive;
608                         }
609
610                         if (retval != 1) {
611                                 up_read(&current->mm->mmap_sem);
612                                 break;
613                         }
614
615                         maddr = kmap_atomic(pg);
616                         memcpy(maddr + offset, from, len);
617                         kunmap_atomic(maddr);
618                         set_page_dirty_lock(pg);
619                         put_page(pg);
620                         up_read(&current->mm->mmap_sem);
621
622                         from += len;
623                         to += len;
624                         n -= len;
625                 }
626                 return n;
627         }
628 #endif
629         if (movsl_is_ok(to, from, n))
630                 __copy_user(to, from, n);
631         else
632                 n = __copy_user_intel(to, from, n);
633         return n;
634 }
635 EXPORT_SYMBOL(__copy_to_user_ll);
636
637 unsigned long __copy_from_user_ll(void *to, const void __user *from,
638                                         unsigned long n)
639 {
640         if (movsl_is_ok(to, from, n))
641                 __copy_user_zeroing(to, from, n);
642         else
643                 n = __copy_user_zeroing_intel(to, from, n);
644         return n;
645 }
646 EXPORT_SYMBOL(__copy_from_user_ll);
647
648 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
649                                          unsigned long n)
650 {
651         if (movsl_is_ok(to, from, n))
652                 __copy_user(to, from, n);
653         else
654                 n = __copy_user_intel((void __user *)to,
655                                       (const void *)from, n);
656         return n;
657 }
658 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
659
660 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
661                                         unsigned long n)
662 {
663 #ifdef CONFIG_X86_INTEL_USERCOPY
664         if (n > 64 && cpu_has_xmm2)
665                 n = __copy_user_zeroing_intel_nocache(to, from, n);
666         else
667                 __copy_user_zeroing(to, from, n);
668 #else
669         __copy_user_zeroing(to, from, n);
670 #endif
671         return n;
672 }
673 EXPORT_SYMBOL(__copy_from_user_ll_nocache);
674
675 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
676                                         unsigned long n)
677 {
678 #ifdef CONFIG_X86_INTEL_USERCOPY
679         if (n > 64 && cpu_has_xmm2)
680                 n = __copy_user_intel_nocache(to, from, n);
681         else
682                 __copy_user(to, from, n);
683 #else
684         __copy_user(to, from, n);
685 #endif
686         return n;
687 }
688 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
689
690 /**
691  * copy_to_user: - Copy a block of data into user space.
692  * @to:   Destination address, in user space.
693  * @from: Source address, in kernel space.
694  * @n:    Number of bytes to copy.
695  *
696  * Context: User context only.  This function may sleep.
697  *
698  * Copy data from kernel space to user space.
699  *
700  * Returns number of bytes that could not be copied.
701  * On success, this will be zero.
702  */
703 unsigned long
704 copy_to_user(void __user *to, const void *from, unsigned long n)
705 {
706         if (access_ok(VERIFY_WRITE, to, n))
707                 n = __copy_to_user(to, from, n);
708         return n;
709 }
710 EXPORT_SYMBOL(copy_to_user);
711
712 /**
713  * copy_from_user: - Copy a block of data from user space.
714  * @to:   Destination address, in kernel space.
715  * @from: Source address, in user space.
716  * @n:    Number of bytes to copy.
717  *
718  * Context: User context only.  This function may sleep.
719  *
720  * Copy data from user space to kernel space.
721  *
722  * Returns number of bytes that could not be copied.
723  * On success, this will be zero.
724  *
725  * If some data could not be copied, this function will pad the copied
726  * data to the requested size using zero bytes.
727  */
728 unsigned long
729 _copy_from_user(void *to, const void __user *from, unsigned long n)
730 {
731         if (access_ok(VERIFY_READ, from, n))
732                 n = __copy_from_user(to, from, n);
733         else
734                 memset(to, 0, n);
735         return n;
736 }
737 EXPORT_SYMBOL(_copy_from_user);
738
739 void copy_from_user_overflow(void)
740 {
741         WARN(1, "Buffer overflow detected!\n");
742 }
743 EXPORT_SYMBOL(copy_from_user_overflow);