Merge branch 'drm-next-3.10' of git://people.freedesktop.org/~agd5f/linux into drm...
[firefly-linux-kernel-4.4.55.git] / arch / s390 / lib / uaccess_pt.c
1 /*
2  *  User access functions based on page table walks for enhanced
3  *  system layout without hardware support.
4  *
5  *    Copyright IBM Corp. 2006, 2012
6  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7  */
8
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
15 #include "uaccess.h"
16
17 #ifndef CONFIG_64BIT
18 #define AHI     "ahi"
19 #define SLR     "slr"
20 #else
21 #define AHI     "aghi"
22 #define SLR     "slgr"
23 #endif
24
25 static size_t strnlen_kernel(size_t count, const char __user *src)
26 {
27         register unsigned long reg0 asm("0") = 0UL;
28         unsigned long tmp1, tmp2;
29
30         asm volatile(
31                 "   la    %2,0(%1)\n"
32                 "   la    %3,0(%0,%1)\n"
33                 "  "SLR"  %0,%0\n"
34                 "0: srst  %3,%2\n"
35                 "   jo    0b\n"
36                 "   la    %0,1(%3)\n"   /* strnlen_kernel results includes \0 */
37                 "  "SLR"  %0,%1\n"
38                 "1:\n"
39                 EX_TABLE(0b,1b)
40                 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41                 : "d" (reg0) : "cc", "memory");
42         return count;
43 }
44
45 static size_t copy_in_kernel(size_t count, void __user *to,
46                              const void __user *from)
47 {
48         unsigned long tmp1;
49
50         asm volatile(
51                 "  "AHI"  %0,-1\n"
52                 "   jo    5f\n"
53                 "   bras  %3,3f\n"
54                 "0:"AHI"  %0,257\n"
55                 "1: mvc   0(1,%1),0(%2)\n"
56                 "   la    %1,1(%1)\n"
57                 "   la    %2,1(%2)\n"
58                 "  "AHI"  %0,-1\n"
59                 "   jnz   1b\n"
60                 "   j     5f\n"
61                 "2: mvc   0(256,%1),0(%2)\n"
62                 "   la    %1,256(%1)\n"
63                 "   la    %2,256(%2)\n"
64                 "3:"AHI"  %0,-256\n"
65                 "   jnm   2b\n"
66                 "4: ex    %0,1b-0b(%3)\n"
67                 "5:"SLR"  %0,%0\n"
68                 "6:\n"
69                 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70                 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
71                 : : "cc", "memory");
72         return count;
73 }
74
75 /*
76  * Returns kernel address for user virtual address. If the returned address is
77  * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
78  * contains the (negative) exception code.
79  */
80 #ifdef CONFIG_64BIT
81 static unsigned long follow_table(struct mm_struct *mm,
82                                   unsigned long address, int write)
83 {
84         unsigned long *table = (unsigned long *)__pa(mm->pgd);
85
86         switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87         case _ASCE_TYPE_REGION1:
88                 table = table + ((address >> 53) & 0x7ff);
89                 if (unlikely(*table & _REGION_ENTRY_INV))
90                         return -0x39UL;
91                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
92         case _ASCE_TYPE_REGION2:
93                 table = table + ((address >> 42) & 0x7ff);
94                 if (unlikely(*table & _REGION_ENTRY_INV))
95                         return -0x3aUL;
96                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
97         case _ASCE_TYPE_REGION3:
98                 table = table + ((address >> 31) & 0x7ff);
99                 if (unlikely(*table & _REGION_ENTRY_INV))
100                         return -0x3bUL;
101                 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
102         case _ASCE_TYPE_SEGMENT:
103                 table = table + ((address >> 20) & 0x7ff);
104                 if (unlikely(*table & _SEGMENT_ENTRY_INV))
105                         return -0x10UL;
106                 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
107                         if (write && (*table & _SEGMENT_ENTRY_RO))
108                                 return -0x04UL;
109                         return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
110                                 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
111                 }
112                 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
113         }
114         table = table + ((address >> 12) & 0xff);
115         if (unlikely(*table & _PAGE_INVALID))
116                 return -0x11UL;
117         if (write && (*table & _PAGE_RO))
118                 return -0x04UL;
119         return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
120 }
121
122 #else /* CONFIG_64BIT */
123
124 static unsigned long follow_table(struct mm_struct *mm,
125                                   unsigned long address, int write)
126 {
127         unsigned long *table = (unsigned long *)__pa(mm->pgd);
128
129         table = table + ((address >> 20) & 0x7ff);
130         if (unlikely(*table & _SEGMENT_ENTRY_INV))
131                 return -0x10UL;
132         table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
133         table = table + ((address >> 12) & 0xff);
134         if (unlikely(*table & _PAGE_INVALID))
135                 return -0x11UL;
136         if (write && (*table & _PAGE_RO))
137                 return -0x04UL;
138         return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
139 }
140
141 #endif /* CONFIG_64BIT */
142
143 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
144                                              size_t n, int write_user)
145 {
146         struct mm_struct *mm = current->mm;
147         unsigned long offset, done, size, kaddr;
148         void *from, *to;
149
150         done = 0;
151 retry:
152         spin_lock(&mm->page_table_lock);
153         do {
154                 kaddr = follow_table(mm, uaddr, write_user);
155                 if (IS_ERR_VALUE(kaddr))
156                         goto fault;
157
158                 offset = uaddr & ~PAGE_MASK;
159                 size = min(n - done, PAGE_SIZE - offset);
160                 if (write_user) {
161                         to = (void *) kaddr;
162                         from = kptr + done;
163                 } else {
164                         from = (void *) kaddr;
165                         to = kptr + done;
166                 }
167                 memcpy(to, from, size);
168                 done += size;
169                 uaddr += size;
170         } while (done < n);
171         spin_unlock(&mm->page_table_lock);
172         return n - done;
173 fault:
174         spin_unlock(&mm->page_table_lock);
175         if (__handle_fault(uaddr, -kaddr, write_user))
176                 return n - done;
177         goto retry;
178 }
179
180 /*
181  * Do DAT for user address by page table walk, return kernel address.
182  * This function needs to be called with current->mm->page_table_lock held.
183  */
184 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
185                                                      int write)
186 {
187         struct mm_struct *mm = current->mm;
188         unsigned long kaddr;
189         int rc;
190
191 retry:
192         kaddr = follow_table(mm, uaddr, write);
193         if (IS_ERR_VALUE(kaddr))
194                 goto fault;
195
196         return kaddr;
197 fault:
198         spin_unlock(&mm->page_table_lock);
199         rc = __handle_fault(uaddr, -kaddr, write);
200         spin_lock(&mm->page_table_lock);
201         if (!rc)
202                 goto retry;
203         return 0;
204 }
205
206 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
207 {
208         size_t rc;
209
210         if (segment_eq(get_fs(), KERNEL_DS))
211                 return copy_in_kernel(n, (void __user *) to, from);
212         rc = __user_copy_pt((unsigned long) from, to, n, 0);
213         if (unlikely(rc))
214                 memset(to + n - rc, 0, rc);
215         return rc;
216 }
217
218 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
219 {
220         if (segment_eq(get_fs(), KERNEL_DS))
221                 return copy_in_kernel(n, to, (void __user *) from);
222         return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
223 }
224
225 static size_t clear_user_pt(size_t n, void __user *to)
226 {
227         void *zpage = (void *) empty_zero_page;
228         long done, size, ret;
229
230         done = 0;
231         do {
232                 if (n - done > PAGE_SIZE)
233                         size = PAGE_SIZE;
234                 else
235                         size = n - done;
236                 if (segment_eq(get_fs(), KERNEL_DS))
237                         ret = copy_in_kernel(n, to, (void __user *) zpage);
238                 else
239                         ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
240                 done += size;
241                 to += size;
242                 if (ret)
243                         return ret + n - done;
244         } while (done < n);
245         return 0;
246 }
247
248 static size_t strnlen_user_pt(size_t count, const char __user *src)
249 {
250         unsigned long uaddr = (unsigned long) src;
251         struct mm_struct *mm = current->mm;
252         unsigned long offset, done, len, kaddr;
253         size_t len_str;
254
255         if (unlikely(!count))
256                 return 0;
257         if (segment_eq(get_fs(), KERNEL_DS))
258                 return strnlen_kernel(count, src);
259         done = 0;
260 retry:
261         spin_lock(&mm->page_table_lock);
262         do {
263                 kaddr = follow_table(mm, uaddr, 0);
264                 if (IS_ERR_VALUE(kaddr))
265                         goto fault;
266
267                 offset = uaddr & ~PAGE_MASK;
268                 len = min(count - done, PAGE_SIZE - offset);
269                 len_str = strnlen((char *) kaddr, len);
270                 done += len_str;
271                 uaddr += len_str;
272         } while ((len_str == len) && (done < count));
273         spin_unlock(&mm->page_table_lock);
274         return done + 1;
275 fault:
276         spin_unlock(&mm->page_table_lock);
277         if (__handle_fault(uaddr, -kaddr, 0))
278                 return 0;
279         goto retry;
280 }
281
282 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
283                                    char *dst)
284 {
285         size_t done, len, offset, len_str;
286
287         if (unlikely(!count))
288                 return 0;
289         done = 0;
290         do {
291                 offset = (size_t)src & ~PAGE_MASK;
292                 len = min(count - done, PAGE_SIZE - offset);
293                 if (segment_eq(get_fs(), KERNEL_DS)) {
294                         if (copy_in_kernel(len, (void __user *) dst, src))
295                                 return -EFAULT;
296                 } else {
297                         if (__user_copy_pt((unsigned long) src, dst, len, 0))
298                                 return -EFAULT;
299                 }
300                 len_str = strnlen(dst, len);
301                 done += len_str;
302                 src += len_str;
303                 dst += len_str;
304         } while ((len_str == len) && (done < count));
305         return done;
306 }
307
308 static size_t copy_in_user_pt(size_t n, void __user *to,
309                               const void __user *from)
310 {
311         struct mm_struct *mm = current->mm;
312         unsigned long offset_max, uaddr, done, size, error_code;
313         unsigned long uaddr_from = (unsigned long) from;
314         unsigned long uaddr_to = (unsigned long) to;
315         unsigned long kaddr_to, kaddr_from;
316         int write_user;
317
318         if (segment_eq(get_fs(), KERNEL_DS))
319                 return copy_in_kernel(n, to, from);
320         done = 0;
321 retry:
322         spin_lock(&mm->page_table_lock);
323         do {
324                 write_user = 0;
325                 uaddr = uaddr_from;
326                 kaddr_from = follow_table(mm, uaddr_from, 0);
327                 error_code = kaddr_from;
328                 if (IS_ERR_VALUE(error_code))
329                         goto fault;
330
331                 write_user = 1;
332                 uaddr = uaddr_to;
333                 kaddr_to = follow_table(mm, uaddr_to, 1);
334                 error_code = (unsigned long) kaddr_to;
335                 if (IS_ERR_VALUE(error_code))
336                         goto fault;
337
338                 offset_max = max(uaddr_from & ~PAGE_MASK,
339                                  uaddr_to & ~PAGE_MASK);
340                 size = min(n - done, PAGE_SIZE - offset_max);
341
342                 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
343                 done += size;
344                 uaddr_from += size;
345                 uaddr_to += size;
346         } while (done < n);
347         spin_unlock(&mm->page_table_lock);
348         return n - done;
349 fault:
350         spin_unlock(&mm->page_table_lock);
351         if (__handle_fault(uaddr, -error_code, write_user))
352                 return n - done;
353         goto retry;
354 }
355
356 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)      \
357         asm volatile("0: l   %1,0(%6)\n"                                \
358                      "1: " insn                                         \
359                      "2: cs  %1,%2,0(%6)\n"                             \
360                      "3: jl  1b\n"                                      \
361                      "   lhi %0,0\n"                                    \
362                      "4:\n"                                             \
363                      EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)    \
364                      : "=d" (ret), "=&d" (oldval), "=&d" (newval),      \
365                        "=m" (*uaddr)                                    \
366                      : "0" (-EFAULT), "d" (oparg), "a" (uaddr),         \
367                        "m" (*uaddr) : "cc" );
368
369 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
370 {
371         int oldval = 0, newval, ret;
372
373         switch (op) {
374         case FUTEX_OP_SET:
375                 __futex_atomic_op("lr %2,%5\n",
376                                   ret, oldval, newval, uaddr, oparg);
377                 break;
378         case FUTEX_OP_ADD:
379                 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
380                                   ret, oldval, newval, uaddr, oparg);
381                 break;
382         case FUTEX_OP_OR:
383                 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
384                                   ret, oldval, newval, uaddr, oparg);
385                 break;
386         case FUTEX_OP_ANDN:
387                 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
388                                   ret, oldval, newval, uaddr, oparg);
389                 break;
390         case FUTEX_OP_XOR:
391                 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
392                                   ret, oldval, newval, uaddr, oparg);
393                 break;
394         default:
395                 ret = -ENOSYS;
396         }
397         if (ret == 0)
398                 *old = oldval;
399         return ret;
400 }
401
402 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
403 {
404         int ret;
405
406         if (segment_eq(get_fs(), KERNEL_DS))
407                 return __futex_atomic_op_pt(op, uaddr, oparg, old);
408         spin_lock(&current->mm->page_table_lock);
409         uaddr = (u32 __force __user *)
410                 __dat_user_addr((__force unsigned long) uaddr, 1);
411         if (!uaddr) {
412                 spin_unlock(&current->mm->page_table_lock);
413                 return -EFAULT;
414         }
415         get_page(virt_to_page(uaddr));
416         spin_unlock(&current->mm->page_table_lock);
417         ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
418         put_page(virt_to_page(uaddr));
419         return ret;
420 }
421
422 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
423                                      u32 oldval, u32 newval)
424 {
425         int ret;
426
427         asm volatile("0: cs   %1,%4,0(%5)\n"
428                      "1: la   %0,0\n"
429                      "2:\n"
430                      EX_TABLE(0b,2b) EX_TABLE(1b,2b)
431                      : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
432                      : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
433                      : "cc", "memory" );
434         *uval = oldval;
435         return ret;
436 }
437
438 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
439                             u32 oldval, u32 newval)
440 {
441         int ret;
442
443         if (segment_eq(get_fs(), KERNEL_DS))
444                 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
445         spin_lock(&current->mm->page_table_lock);
446         uaddr = (u32 __force __user *)
447                 __dat_user_addr((__force unsigned long) uaddr, 1);
448         if (!uaddr) {
449                 spin_unlock(&current->mm->page_table_lock);
450                 return -EFAULT;
451         }
452         get_page(virt_to_page(uaddr));
453         spin_unlock(&current->mm->page_table_lock);
454         ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
455         put_page(virt_to_page(uaddr));
456         return ret;
457 }
458
459 struct uaccess_ops uaccess_pt = {
460         .copy_from_user         = copy_from_user_pt,
461         .copy_from_user_small   = copy_from_user_pt,
462         .copy_to_user           = copy_to_user_pt,
463         .copy_to_user_small     = copy_to_user_pt,
464         .copy_in_user           = copy_in_user_pt,
465         .clear_user             = clear_user_pt,
466         .strnlen_user           = strnlen_user_pt,
467         .strncpy_from_user      = strncpy_from_user_pt,
468         .futex_atomic_op        = futex_atomic_op_pt,
469         .futex_atomic_cmpxchg   = futex_atomic_cmpxchg_pt,
470 };