2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
25 static size_t strnlen_kernel(size_t count, const char __user *src)
27 register unsigned long reg0 asm("0") = 0UL;
28 unsigned long tmp1, tmp2;
36 " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
40 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41 : "d" (reg0) : "cc", "memory");
45 static size_t copy_in_kernel(size_t count, void __user *to,
46 const void __user *from)
55 "1: mvc 0(1,%1),0(%2)\n"
61 "2: mvc 0(256,%1),0(%2)\n"
66 "4: ex %0,1b-0b(%3)\n"
69 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
76 * Returns kernel address for user virtual address. If the returned address is
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
78 * contains the (negative) exception code.
81 static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write)
84 unsigned long *table = (unsigned long *)__pa(mm->pgd);
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff);
89 if (unlikely(*table & _REGION_ENTRY_INV))
91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
92 case _ASCE_TYPE_REGION2:
93 table = table + ((address >> 42) & 0x7ff);
94 if (unlikely(*table & _REGION_ENTRY_INV))
96 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
97 case _ASCE_TYPE_REGION3:
98 table = table + ((address >> 31) & 0x7ff);
99 if (unlikely(*table & _REGION_ENTRY_INV))
101 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
102 case _ASCE_TYPE_SEGMENT:
103 table = table + ((address >> 20) & 0x7ff);
104 if (unlikely(*table & _SEGMENT_ENTRY_INV))
106 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
107 if (write && (*table & _SEGMENT_ENTRY_RO))
109 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
110 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
112 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
114 table = table + ((address >> 12) & 0xff);
115 if (unlikely(*table & _PAGE_INVALID))
117 if (write && (*table & _PAGE_RO))
119 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
122 #else /* CONFIG_64BIT */
124 static unsigned long follow_table(struct mm_struct *mm,
125 unsigned long address, int write)
127 unsigned long *table = (unsigned long *)__pa(mm->pgd);
129 table = table + ((address >> 20) & 0x7ff);
130 if (unlikely(*table & _SEGMENT_ENTRY_INV))
132 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
133 table = table + ((address >> 12) & 0xff);
134 if (unlikely(*table & _PAGE_INVALID))
136 if (write && (*table & _PAGE_RO))
138 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
141 #endif /* CONFIG_64BIT */
143 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
144 size_t n, int write_user)
146 struct mm_struct *mm = current->mm;
147 unsigned long offset, done, size, kaddr;
152 spin_lock(&mm->page_table_lock);
154 kaddr = follow_table(mm, uaddr, write_user);
155 if (IS_ERR_VALUE(kaddr))
158 offset = uaddr & ~PAGE_MASK;
159 size = min(n - done, PAGE_SIZE - offset);
164 from = (void *) kaddr;
167 memcpy(to, from, size);
171 spin_unlock(&mm->page_table_lock);
174 spin_unlock(&mm->page_table_lock);
175 if (__handle_fault(uaddr, -kaddr, write_user))
181 * Do DAT for user address by page table walk, return kernel address.
182 * This function needs to be called with current->mm->page_table_lock held.
184 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
187 struct mm_struct *mm = current->mm;
192 kaddr = follow_table(mm, uaddr, write);
193 if (IS_ERR_VALUE(kaddr))
198 spin_unlock(&mm->page_table_lock);
199 rc = __handle_fault(uaddr, -kaddr, write);
200 spin_lock(&mm->page_table_lock);
206 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
210 if (segment_eq(get_fs(), KERNEL_DS))
211 return copy_in_kernel(n, (void __user *) to, from);
212 rc = __user_copy_pt((unsigned long) from, to, n, 0);
214 memset(to + n - rc, 0, rc);
218 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
220 if (segment_eq(get_fs(), KERNEL_DS))
221 return copy_in_kernel(n, to, (void __user *) from);
222 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
225 static size_t clear_user_pt(size_t n, void __user *to)
227 void *zpage = (void *) empty_zero_page;
228 long done, size, ret;
232 if (n - done > PAGE_SIZE)
236 if (segment_eq(get_fs(), KERNEL_DS))
237 ret = copy_in_kernel(n, to, (void __user *) zpage);
239 ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
243 return ret + n - done;
248 static size_t strnlen_user_pt(size_t count, const char __user *src)
250 unsigned long uaddr = (unsigned long) src;
251 struct mm_struct *mm = current->mm;
252 unsigned long offset, done, len, kaddr;
255 if (unlikely(!count))
257 if (segment_eq(get_fs(), KERNEL_DS))
258 return strnlen_kernel(count, src);
261 spin_lock(&mm->page_table_lock);
263 kaddr = follow_table(mm, uaddr, 0);
264 if (IS_ERR_VALUE(kaddr))
267 offset = uaddr & ~PAGE_MASK;
268 len = min(count - done, PAGE_SIZE - offset);
269 len_str = strnlen((char *) kaddr, len);
272 } while ((len_str == len) && (done < count));
273 spin_unlock(&mm->page_table_lock);
276 spin_unlock(&mm->page_table_lock);
277 if (__handle_fault(uaddr, -kaddr, 0))
282 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
285 size_t done, len, offset, len_str;
287 if (unlikely(!count))
291 offset = (size_t)src & ~PAGE_MASK;
292 len = min(count - done, PAGE_SIZE - offset);
293 if (segment_eq(get_fs(), KERNEL_DS)) {
294 if (copy_in_kernel(len, (void __user *) dst, src))
297 if (__user_copy_pt((unsigned long) src, dst, len, 0))
300 len_str = strnlen(dst, len);
304 } while ((len_str == len) && (done < count));
308 static size_t copy_in_user_pt(size_t n, void __user *to,
309 const void __user *from)
311 struct mm_struct *mm = current->mm;
312 unsigned long offset_max, uaddr, done, size, error_code;
313 unsigned long uaddr_from = (unsigned long) from;
314 unsigned long uaddr_to = (unsigned long) to;
315 unsigned long kaddr_to, kaddr_from;
318 if (segment_eq(get_fs(), KERNEL_DS))
319 return copy_in_kernel(n, to, from);
322 spin_lock(&mm->page_table_lock);
326 kaddr_from = follow_table(mm, uaddr_from, 0);
327 error_code = kaddr_from;
328 if (IS_ERR_VALUE(error_code))
333 kaddr_to = follow_table(mm, uaddr_to, 1);
334 error_code = (unsigned long) kaddr_to;
335 if (IS_ERR_VALUE(error_code))
338 offset_max = max(uaddr_from & ~PAGE_MASK,
339 uaddr_to & ~PAGE_MASK);
340 size = min(n - done, PAGE_SIZE - offset_max);
342 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
347 spin_unlock(&mm->page_table_lock);
350 spin_unlock(&mm->page_table_lock);
351 if (__handle_fault(uaddr, -error_code, write_user))
356 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
357 asm volatile("0: l %1,0(%6)\n" \
359 "2: cs %1,%2,0(%6)\n" \
363 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
364 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
366 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
367 "m" (*uaddr) : "cc" );
369 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
371 int oldval = 0, newval, ret;
375 __futex_atomic_op("lr %2,%5\n",
376 ret, oldval, newval, uaddr, oparg);
379 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
380 ret, oldval, newval, uaddr, oparg);
383 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
384 ret, oldval, newval, uaddr, oparg);
387 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
388 ret, oldval, newval, uaddr, oparg);
391 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
392 ret, oldval, newval, uaddr, oparg);
402 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
406 if (segment_eq(get_fs(), KERNEL_DS))
407 return __futex_atomic_op_pt(op, uaddr, oparg, old);
408 spin_lock(¤t->mm->page_table_lock);
409 uaddr = (u32 __force __user *)
410 __dat_user_addr((__force unsigned long) uaddr, 1);
412 spin_unlock(¤t->mm->page_table_lock);
415 get_page(virt_to_page(uaddr));
416 spin_unlock(¤t->mm->page_table_lock);
417 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
418 put_page(virt_to_page(uaddr));
422 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
423 u32 oldval, u32 newval)
427 asm volatile("0: cs %1,%4,0(%5)\n"
430 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
431 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
432 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
438 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
439 u32 oldval, u32 newval)
443 if (segment_eq(get_fs(), KERNEL_DS))
444 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
445 spin_lock(¤t->mm->page_table_lock);
446 uaddr = (u32 __force __user *)
447 __dat_user_addr((__force unsigned long) uaddr, 1);
449 spin_unlock(¤t->mm->page_table_lock);
452 get_page(virt_to_page(uaddr));
453 spin_unlock(¤t->mm->page_table_lock);
454 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
455 put_page(virt_to_page(uaddr));
459 struct uaccess_ops uaccess_pt = {
460 .copy_from_user = copy_from_user_pt,
461 .copy_from_user_small = copy_from_user_pt,
462 .copy_to_user = copy_to_user_pt,
463 .copy_to_user_small = copy_to_user_pt,
464 .copy_in_user = copy_in_user_pt,
465 .clear_user = clear_user_pt,
466 .strnlen_user = strnlen_user_pt,
467 .strncpy_from_user = strncpy_from_user_pt,
468 .futex_atomic_op = futex_atomic_op_pt,
469 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,