2 * Based on arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1995-2004 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/module.h>
22 #include <linux/signal.h>
24 #include <linux/hardirq.h>
25 #include <linux/init.h>
26 #include <linux/kprobes.h>
27 #include <linux/uaccess.h>
28 #include <linux/page-flags.h>
29 #include <linux/sched.h>
30 #include <linux/highmem.h>
31 #include <linux/perf_event.h>
32 #include <linux/preempt.h>
35 #include <asm/cpufeature.h>
36 #include <asm/exception.h>
37 #include <asm/debug-monitors.h>
39 #include <asm/sysreg.h>
40 #include <asm/system_misc.h>
41 #include <asm/pgtable.h>
42 #include <asm/tlbflush.h>
44 static const char *fault_name(unsigned int esr);
47 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
51 /* kprobe_running() needs smp_processor_id() */
52 if (!user_mode(regs)) {
54 if (kprobe_running() && kprobe_fault_handler(regs, esr))
62 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
69 * Dump out the page tables associated with 'addr' in mm 'mm'.
71 void show_pte(struct mm_struct *mm, unsigned long addr)
78 pr_alert("pgd = %p\n", mm->pgd);
79 pgd = pgd_offset(mm, addr);
80 pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
87 if (pgd_none(*pgd) || pgd_bad(*pgd))
90 pud = pud_offset(pgd, addr);
91 printk(", *pud=%016llx", pud_val(*pud));
92 if (pud_none(*pud) || pud_bad(*pud))
95 pmd = pmd_offset(pud, addr);
96 printk(", *pmd=%016llx", pmd_val(*pmd));
97 if (pmd_none(*pmd) || pmd_bad(*pmd))
100 pte = pte_offset_map(pmd, addr);
101 printk(", *pte=%016llx", pte_val(*pte));
108 #ifdef CONFIG_ARM64_HW_AFDBM
110 * This function sets the access flags (dirty, accessed), as well as write
111 * permission, and only to a more permissive setting.
113 * It needs to cope with hardware update of the accessed/dirty state by other
114 * agents in the system and can safely skip the __sync_icache_dcache() call as,
115 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
117 * Returns whether or not the PTE actually changed.
119 int ptep_set_access_flags(struct vm_area_struct *vma,
120 unsigned long address, pte_t *ptep,
121 pte_t entry, int dirty)
126 if (pte_same(*ptep, entry))
129 /* only preserve the access flags and write permission */
130 pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
133 * PTE_RDONLY is cleared by default in the asm below, so set it in
134 * back if necessary (read-only or clean PTE).
136 if (!pte_write(entry) || !pte_sw_dirty(entry))
137 pte_val(entry) |= PTE_RDONLY;
140 * Setting the flags must be done atomically to avoid racing with the
141 * hardware update of the access/dirty state.
143 asm volatile("// ptep_set_access_flags\n"
144 " prfm pstl1strm, %2\n"
146 " and %0, %0, %3 // clear PTE_RDONLY\n"
147 " orr %0, %0, %4 // set flags\n"
148 " stxr %w1, %0, %2\n"
150 : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
151 : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
153 flush_tlb_fix_spurious_fault(vma, address);
159 * The kernel tried to access some page that wasn't present.
161 static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
162 unsigned int esr, struct pt_regs *regs)
165 * Are we prepared to handle this kernel fault?
167 if (fixup_exception(regs))
171 * No handler, we'll have to terminate things with extreme prejudice.
174 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
175 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
176 "paging request", addr);
179 die("Oops", regs, esr);
185 * Something tried to access memory that isn't in our memory map. User mode
186 * accesses just cause a SIGSEGV
188 static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
189 unsigned int esr, unsigned int sig, int code,
190 struct pt_regs *regs)
194 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
195 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
196 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
198 show_pte(tsk->mm, addr);
202 tsk->thread.fault_address = addr;
203 tsk->thread.fault_code = esr;
207 si.si_addr = (void __user *)addr;
208 force_sig_info(sig, &si, tsk);
211 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
213 struct task_struct *tsk = current;
214 struct mm_struct *mm = tsk->active_mm;
217 * If we are in kernel mode at this point, we have no context to
218 * handle this fault with.
221 __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
223 __do_kernel_fault(mm, addr, esr, regs);
226 #define VM_FAULT_BADMAP 0x010000
227 #define VM_FAULT_BADACCESS 0x020000
229 #define ESR_LNX_EXEC (1 << 24)
231 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
232 unsigned int mm_flags, unsigned long vm_flags,
233 struct task_struct *tsk)
235 struct vm_area_struct *vma;
238 vma = find_vma(mm, addr);
239 fault = VM_FAULT_BADMAP;
242 if (unlikely(vma->vm_start > addr))
246 * Ok, we have a good vm_area for this memory access, so we can handle
251 * Check that the permissions on the VMA allow for the fault which
252 * occurred. If we encountered a write or exec fault, we must have
253 * appropriate permissions, otherwise we allow any permission.
255 if (!(vma->vm_flags & vm_flags)) {
256 fault = VM_FAULT_BADACCESS;
260 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
263 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
269 static inline int permission_fault(unsigned int esr)
271 unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
272 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
274 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
277 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
278 struct pt_regs *regs)
280 struct task_struct *tsk;
281 struct mm_struct *mm;
282 int fault, sig, code;
283 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
284 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
286 if (notify_page_fault(regs, esr))
292 /* Enable interrupts if they were enabled in the parent context. */
293 if (interrupts_enabled(regs))
297 * If we're in an interrupt or have no user context, we must not take
300 if (faulthandler_disabled() || !mm)
304 mm_flags |= FAULT_FLAG_USER;
306 if (esr & ESR_LNX_EXEC) {
308 } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
310 mm_flags |= FAULT_FLAG_WRITE;
313 if (permission_fault(esr) && (addr < USER_DS)) {
314 if (get_fs() == KERNEL_DS)
315 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
317 if (!search_exception_tables(regs->pc))
318 die("Accessing user space memory outside uaccess.h routines", regs, esr);
322 * As per x86, we may deadlock here. However, since the kernel only
323 * validly references user space from well defined areas of the code,
324 * we can bug out early if this is from code which shouldn't.
326 if (!down_read_trylock(&mm->mmap_sem)) {
327 if (!user_mode(regs) && !search_exception_tables(regs->pc))
330 down_read(&mm->mmap_sem);
333 * The above down_read_trylock() might have succeeded in which
334 * case, we'll have missed the might_sleep() from down_read().
337 #ifdef CONFIG_DEBUG_VM
338 if (!user_mode(regs) && !search_exception_tables(regs->pc))
343 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
346 * If we need to retry but a fatal signal is pending, handle the
347 * signal first. We do not need to release the mmap_sem because it
348 * would already be released in __lock_page_or_retry in mm/filemap.c.
350 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
354 * Major/minor page fault accounting is only done on the initial
355 * attempt. If we go through a retry, it is extremely likely that the
356 * page will be found in page cache at that point.
359 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
360 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
361 if (fault & VM_FAULT_MAJOR) {
363 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
367 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
370 if (fault & VM_FAULT_RETRY) {
372 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
375 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
376 mm_flags |= FAULT_FLAG_TRIED;
381 up_read(&mm->mmap_sem);
384 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
386 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
387 VM_FAULT_BADACCESS))))
391 * If we are in kernel mode at this point, we have no context to
392 * handle this fault with.
394 if (!user_mode(regs))
397 if (fault & VM_FAULT_OOM) {
399 * We ran out of memory, call the OOM killer, and return to
400 * userspace (which will retry the fault, or kill us if we got
403 pagefault_out_of_memory();
407 if (fault & VM_FAULT_SIGBUS) {
409 * We had some memory, but were unable to successfully fix up
416 * Something tried to access memory that isn't in our memory
420 code = fault == VM_FAULT_BADACCESS ?
421 SEGV_ACCERR : SEGV_MAPERR;
424 __do_user_fault(tsk, addr, esr, sig, code, regs);
428 __do_kernel_fault(mm, addr, esr, regs);
433 * First Level Translation Fault Handler
435 * We enter here because the first level page table doesn't contain a valid
436 * entry for the address.
438 * If the address is in kernel space (>= TASK_SIZE), then we are probably
439 * faulting in the vmalloc() area.
441 * If the init_task's first level page tables contains the relevant entry, we
442 * copy the it to this task. If not, we send the process a signal, fixup the
443 * exception, or oops the kernel.
445 * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
446 * or a critical region, and should only copy the information from the master
447 * page table, nothing more.
449 static int __kprobes do_translation_fault(unsigned long addr,
451 struct pt_regs *regs)
453 if (addr < TASK_SIZE)
454 return do_page_fault(addr, esr, regs);
456 do_bad_area(addr, esr, regs);
461 * This abort handler always returns "fault".
463 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
468 static struct fault_info {
469 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
474 { do_bad, SIGBUS, 0, "ttbr address size fault" },
475 { do_bad, SIGBUS, 0, "level 1 address size fault" },
476 { do_bad, SIGBUS, 0, "level 2 address size fault" },
477 { do_bad, SIGBUS, 0, "level 3 address size fault" },
478 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
479 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
480 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
481 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
482 { do_bad, SIGBUS, 0, "unknown 8" },
483 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
484 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
485 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
486 { do_bad, SIGBUS, 0, "unknown 12" },
487 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
488 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
489 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
490 { do_bad, SIGBUS, 0, "synchronous external abort" },
491 { do_bad, SIGBUS, 0, "unknown 17" },
492 { do_bad, SIGBUS, 0, "unknown 18" },
493 { do_bad, SIGBUS, 0, "unknown 19" },
494 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
495 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
496 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
497 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
498 { do_bad, SIGBUS, 0, "synchronous parity error" },
499 { do_bad, SIGBUS, 0, "unknown 25" },
500 { do_bad, SIGBUS, 0, "unknown 26" },
501 { do_bad, SIGBUS, 0, "unknown 27" },
502 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
503 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
504 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
505 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
506 { do_bad, SIGBUS, 0, "unknown 32" },
507 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
508 { do_bad, SIGBUS, 0, "unknown 34" },
509 { do_bad, SIGBUS, 0, "unknown 35" },
510 { do_bad, SIGBUS, 0, "unknown 36" },
511 { do_bad, SIGBUS, 0, "unknown 37" },
512 { do_bad, SIGBUS, 0, "unknown 38" },
513 { do_bad, SIGBUS, 0, "unknown 39" },
514 { do_bad, SIGBUS, 0, "unknown 40" },
515 { do_bad, SIGBUS, 0, "unknown 41" },
516 { do_bad, SIGBUS, 0, "unknown 42" },
517 { do_bad, SIGBUS, 0, "unknown 43" },
518 { do_bad, SIGBUS, 0, "unknown 44" },
519 { do_bad, SIGBUS, 0, "unknown 45" },
520 { do_bad, SIGBUS, 0, "unknown 46" },
521 { do_bad, SIGBUS, 0, "unknown 47" },
522 { do_bad, SIGBUS, 0, "TLB conflict abort" },
523 { do_bad, SIGBUS, 0, "unknown 49" },
524 { do_bad, SIGBUS, 0, "unknown 50" },
525 { do_bad, SIGBUS, 0, "unknown 51" },
526 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
527 { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
528 { do_bad, SIGBUS, 0, "unknown 54" },
529 { do_bad, SIGBUS, 0, "unknown 55" },
530 { do_bad, SIGBUS, 0, "unknown 56" },
531 { do_bad, SIGBUS, 0, "unknown 57" },
532 { do_bad, SIGBUS, 0, "unknown 58" },
533 { do_bad, SIGBUS, 0, "unknown 59" },
534 { do_bad, SIGBUS, 0, "unknown 60" },
535 { do_bad, SIGBUS, 0, "section domain fault" },
536 { do_bad, SIGBUS, 0, "page domain fault" },
537 { do_bad, SIGBUS, 0, "unknown 63" },
540 static const char *fault_name(unsigned int esr)
542 const struct fault_info *inf = fault_info + (esr & 63);
547 * Dispatch a data abort to the relevant handler.
549 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
550 struct pt_regs *regs)
552 const struct fault_info *inf = fault_info + (esr & 63);
555 if (!inf->fn(addr, esr, regs))
558 pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
559 inf->name, esr, addr);
561 info.si_signo = inf->sig;
563 info.si_code = inf->code;
564 info.si_addr = (void __user *)addr;
565 arm64_notify_die("", regs, &info, esr);
569 * Handle stack alignment exceptions.
571 asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
573 struct pt_regs *regs)
576 struct task_struct *tsk = current;
578 if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
579 pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
580 tsk->comm, task_pid_nr(tsk),
581 esr_get_class_string(esr), (void *)regs->pc,
584 info.si_signo = SIGBUS;
586 info.si_code = BUS_ADRALN;
587 info.si_addr = (void __user *)addr;
588 arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
591 int __init early_brk64(unsigned long addr, unsigned int esr,
592 struct pt_regs *regs);
595 * __refdata because early_brk64 is __init, but the reference to it is
596 * clobbered at arch_initcall time.
597 * See traps.c and debug-monitors.c:debug_traps_init().
599 static struct fault_info __refdata debug_fault_info[] = {
600 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
601 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
602 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
603 { do_bad, SIGBUS, 0, "unknown 3" },
604 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
605 { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
606 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
607 { do_bad, SIGBUS, 0, "unknown 7" },
610 void __init hook_debug_fault_code(int nr,
611 int (*fn)(unsigned long, unsigned int, struct pt_regs *),
612 int sig, int code, const char *name)
614 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
616 debug_fault_info[nr].fn = fn;
617 debug_fault_info[nr].sig = sig;
618 debug_fault_info[nr].code = code;
619 debug_fault_info[nr].name = name;
622 asmlinkage int __exception do_debug_exception(unsigned long addr,
624 struct pt_regs *regs)
626 const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
629 if (!inf->fn(addr, esr, regs))
632 pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
633 inf->name, esr, addr);
635 info.si_signo = inf->sig;
637 info.si_code = inf->code;
638 info.si_addr = (void __user *)addr;
639 arm64_notify_die("", regs, &info, 0);
643 NOKPROBE_SYMBOL(do_debug_exception);
645 #ifdef CONFIG_ARM64_PAN
646 int cpu_enable_pan(void *__unused)
649 * We modify PSTATE. This won't work from irq context as the PSTATE
650 * is discarded once we return from the exception.
652 WARN_ON_ONCE(in_interrupt());
654 config_sctlr_el1(SCTLR_EL1_SPAN, 0);
655 asm(SET_PSTATE_PAN(1));
658 #endif /* CONFIG_ARM64_PAN */
660 #ifdef CONFIG_ARM64_UAO
662 * Kernel threads have fs=KERNEL_DS by default, and don't need to call
663 * set_fs(), devtmpfs in particular relies on this behaviour.
664 * We need to enable the feature at runtime (instead of adding it to
665 * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
667 int cpu_enable_uao(void *__unused)
669 asm(SET_PSTATE_UAO(1));
671 #endif /* CONFIG_ARM64_UAO */