2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
15 #include <linux/signal.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/perf_event.h>
20 #include <linux/interrupt.h>
21 #include <linux/kprobes.h>
22 #include <linux/kdebug.h>
23 #include <linux/percpu.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
29 #include <asm/uaccess.h>
32 #include <asm/sections.h>
33 #include <asm/mmu_context.h>
35 int show_unhandled_signals = 1;
37 static inline __kprobes int notify_page_fault(struct pt_regs *regs)
41 /* kprobe_running() needs smp_processor_id() */
42 if (kprobes_built_in() && !user_mode(regs)) {
44 if (kprobe_running() && kprobe_fault_handler(regs, 0))
51 static void __kprobes unhandled_fault(unsigned long address,
52 struct task_struct *tsk,
55 if ((unsigned long) address < PAGE_SIZE) {
56 printk(KERN_ALERT "Unable to handle kernel NULL "
57 "pointer dereference\n");
59 printk(KERN_ALERT "Unable to handle kernel paging request "
60 "at virtual address %016lx\n", (unsigned long)address);
62 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
64 CTX_HWBITS(tsk->mm->context) :
65 CTX_HWBITS(tsk->active_mm->context)));
66 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
67 (tsk->mm ? (unsigned long) tsk->mm->pgd :
68 (unsigned long) tsk->active_mm->pgd));
69 die_if_kernel("Oops", regs);
72 static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
74 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
76 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
77 printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
78 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
80 unhandled_fault(regs->tpc, current, regs);
84 * We now make sure that mmap_sem is held in all paths that call
85 * this. Additionally, to prevent kswapd from ripping ptes from
86 * under us, raise interrupts around the time that we look at the
87 * pte, kswapd will have to wait to get his smp ipi response from
88 * us. vmtruncate likewise. This saves us having to get pte lock.
90 static unsigned int get_user_insn(unsigned long tpc)
92 pgd_t *pgdp = pgd_offset(current->mm, tpc);
99 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
101 pudp = pud_offset(pgdp, tpc);
102 if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
105 /* This disables preemption for us as well. */
108 pmdp = pmd_offset(pudp, tpc);
109 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
113 if (pmd_trans_huge(*pmdp)) {
114 if (pmd_trans_splitting(*pmdp))
117 pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
118 pa += tpc & ~HPAGE_MASK;
120 /* Use phys bypass so we don't pollute dtlb/dcache. */
121 __asm__ __volatile__("lduwa [%1] %2, %0"
123 : "r" (pa), "i" (ASI_PHYS_USE_EC));
127 ptep = pte_offset_map(pmdp, tpc);
129 if (pte_present(pte)) {
130 pa = (pte_pfn(pte) << PAGE_SHIFT);
131 pa += (tpc & ~PAGE_MASK);
133 /* Use phys bypass so we don't pollute dtlb/dcache. */
134 __asm__ __volatile__("lduwa [%1] %2, %0"
136 : "r" (pa), "i" (ASI_PHYS_USE_EC));
147 show_signal_msg(struct pt_regs *regs, int sig, int code,
148 unsigned long address, struct task_struct *tsk)
150 if (!unhandled_signal(tsk, sig))
153 if (!printk_ratelimit())
156 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
157 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
158 tsk->comm, task_pid_nr(tsk), address,
159 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
160 (void *)regs->u_regs[UREG_FP], code);
162 print_vma_addr(KERN_CONT " in ", regs->tpc);
164 printk(KERN_CONT "\n");
167 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
168 unsigned long fault_addr, unsigned int insn,
177 if (fault_code & FAULT_CODE_ITLB) {
180 /* If we were able to probe the faulting instruction, use it
181 * to compute a precise fault address. Otherwise use the fault
182 * time provided address which may only have page granularity.
185 addr = compute_effective_address(regs, insn, 0);
189 info.si_addr = (void __user *) addr;
192 if (unlikely(show_unhandled_signals))
193 show_signal_msg(regs, sig, code, addr, current);
195 force_sig_info(sig, &info, current);
198 extern int handle_ldf_stq(u32, struct pt_regs *);
199 extern int handle_ld_nf(u32, struct pt_regs *);
201 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
204 if (!regs->tpc || (regs->tpc & 0x3))
206 if (regs->tstate & TSTATE_PRIV) {
207 insn = *(unsigned int *) regs->tpc;
209 insn = get_user_insn(regs->tpc);
215 static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
216 int fault_code, unsigned int insn,
217 unsigned long address)
219 unsigned char asi = ASI_P;
221 if ((!insn) && (regs->tstate & TSTATE_PRIV))
224 /* If user insn could be read (thus insn is zero), that
225 * is fine. We will just gun down the process with a signal
229 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
230 (insn & 0xc0800000) == 0xc0800000) {
232 asi = (regs->tstate >> 24);
235 if ((asi & 0xf2) == 0x82) {
236 if (insn & 0x1000000) {
237 handle_ldf_stq(insn, regs);
239 /* This was a non-faulting load. Just clear the
240 * destination register(s) and continue with the next
243 handle_ld_nf(insn, regs);
249 /* Is this in ex_table? */
250 if (regs->tstate & TSTATE_PRIV) {
251 const struct exception_table_entry *entry;
253 entry = search_exception_tables(regs->tpc);
255 regs->tpc = entry->fixup;
256 regs->tnpc = regs->tpc + 4;
260 /* The si_code was set to make clear whether
261 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
263 do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
268 unhandled_fault (address, current, regs);
271 static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
276 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
277 "64-bit TPC [%lx]\n",
278 current->comm, current->pid,
283 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
285 struct mm_struct *mm = current->mm;
286 struct vm_area_struct *vma;
287 unsigned int insn = 0;
288 int si_code, fault_code, fault;
289 unsigned long address, mm_rss;
290 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
292 fault_code = get_thread_fault_code();
294 if (notify_page_fault(regs))
297 si_code = SEGV_MAPERR;
298 address = current_thread_info()->fault_address;
300 if ((fault_code & FAULT_CODE_ITLB) &&
301 (fault_code & FAULT_CODE_DTLB))
304 if (test_thread_flag(TIF_32BIT)) {
305 if (!(regs->tstate & TSTATE_PRIV)) {
306 if (unlikely((regs->tpc >> 32) != 0)) {
307 bogus_32bit_fault_tpc(regs);
311 if (unlikely((address >> 32) != 0))
315 if (regs->tstate & TSTATE_PRIV) {
316 unsigned long tpc = regs->tpc;
318 /* Sanity check the PC. */
319 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
320 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
321 /* Valid, no problems... */
323 bad_kernel_pc(regs, address);
329 * If we're in an interrupt or have no user
330 * context, we must not take the fault..
332 if (in_atomic() || !mm)
335 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
337 if (!down_read_trylock(&mm->mmap_sem)) {
338 if ((regs->tstate & TSTATE_PRIV) &&
339 !search_exception_tables(regs->tpc)) {
340 insn = get_fault_insn(regs, insn);
341 goto handle_kernel_fault;
345 down_read(&mm->mmap_sem);
348 vma = find_vma(mm, address);
352 /* Pure DTLB misses do not tell us whether the fault causing
353 * load/store/atomic was a write or not, it only says that there
354 * was no match. So in such a case we (carefully) read the
355 * instruction to try and figure this out. It's an optimization
356 * so it's ok if we can't do this.
358 * Special hack, window spill/fill knows the exact fault type.
361 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
362 (vma->vm_flags & VM_WRITE) != 0) {
363 insn = get_fault_insn(regs, 0);
366 /* All loads, stores and atomics have bits 30 and 31 both set
367 * in the instruction. Bit 21 is set in all stores, but we
368 * have to avoid prefetches which also have bit 21 set.
370 if ((insn & 0xc0200000) == 0xc0200000 &&
371 (insn & 0x01780000) != 0x01680000) {
372 /* Don't bother updating thread struct value,
373 * because update_mmu_cache only cares which tlb
374 * the access came from.
376 fault_code |= FAULT_CODE_WRITE;
381 if (vma->vm_start <= address)
383 if (!(vma->vm_flags & VM_GROWSDOWN))
385 if (!(fault_code & FAULT_CODE_WRITE)) {
386 /* Non-faulting loads shouldn't expand stack. */
387 insn = get_fault_insn(regs, insn);
388 if ((insn & 0xc0800000) == 0xc0800000) {
392 asi = (regs->tstate >> 24);
395 if ((asi & 0xf2) == 0x82)
399 if (expand_stack(vma, address))
402 * Ok, we have a good vm_area for this memory access, so
406 si_code = SEGV_ACCERR;
408 /* If we took a ITLB miss on a non-executable page, catch
411 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
412 BUG_ON(address != regs->tpc);
413 BUG_ON(regs->tstate & TSTATE_PRIV);
417 if (fault_code & FAULT_CODE_WRITE) {
418 if (!(vma->vm_flags & VM_WRITE))
421 /* Spitfire has an icache which does not snoop
422 * processor stores. Later processors do...
424 if (tlb_type == spitfire &&
425 (vma->vm_flags & VM_EXEC) != 0 &&
426 vma->vm_file != NULL)
427 set_thread_fault_code(fault_code |
428 FAULT_CODE_BLKCOMMIT);
430 /* Allow reads even for write-only mappings */
431 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
435 flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
436 fault = handle_mm_fault(mm, vma, address, flags);
438 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
441 if (unlikely(fault & VM_FAULT_ERROR)) {
442 if (fault & VM_FAULT_OOM)
444 else if (fault & VM_FAULT_SIGBUS)
449 if (flags & FAULT_FLAG_ALLOW_RETRY) {
450 if (fault & VM_FAULT_MAJOR) {
452 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
456 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
459 if (fault & VM_FAULT_RETRY) {
460 flags &= ~FAULT_FLAG_ALLOW_RETRY;
461 flags |= FAULT_FLAG_TRIED;
463 /* No need to up_read(&mm->mmap_sem) as we would
464 * have already released it in __lock_page_or_retry
471 up_read(&mm->mmap_sem);
473 mm_rss = get_mm_rss(mm);
474 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
475 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
477 if (unlikely(mm_rss >
478 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
479 tsb_grow(mm, MM_TSB_BASE, mm_rss);
480 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
481 mm_rss = mm->context.huge_pte_count;
482 if (unlikely(mm_rss >
483 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
484 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
485 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
494 * Something tried to access memory that isn't in our memory map..
495 * Fix it, but check if it's kernel or user first..
498 insn = get_fault_insn(regs, insn);
499 up_read(&mm->mmap_sem);
502 do_kernel_fault(regs, si_code, fault_code, insn, address);
506 * We ran out of memory, or some other thing happened to us that made
507 * us unable to handle the page fault gracefully.
510 insn = get_fault_insn(regs, insn);
511 up_read(&mm->mmap_sem);
512 if (!(regs->tstate & TSTATE_PRIV)) {
513 pagefault_out_of_memory();
516 goto handle_kernel_fault;
519 insn = get_fault_insn(regs, 0);
520 goto handle_kernel_fault;
523 insn = get_fault_insn(regs, insn);
524 up_read(&mm->mmap_sem);
527 * Send a sigbus, regardless of whether we were in kernel
530 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
532 /* Kernel mode? Handle exceptions or die */
533 if (regs->tstate & TSTATE_PRIV)
534 goto handle_kernel_fault;