2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/kdebug.h>
26 #include <asm/system.h>
28 #include <asm/pgtable.h>
29 #include <asm/memreg.h>
30 #include <asm/openprom.h>
31 #include <asm/oplib.h>
33 #include <asm/traps.h>
34 #include <asm/uaccess.h>
36 extern int prom_node_root;
38 int show_unhandled_signals = 1;
40 /* At boot time we determine these two values necessary for setting
41 * up the segment maps and page table entries (pte's).
44 int num_segmaps, num_contexts;
47 /* various Virtual Address Cache parameters we find at boot time... */
49 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
50 int vac_entries_per_context, vac_entries_per_segment;
51 int vac_entries_per_page;
53 /* Return how much physical memory we have. */
54 unsigned long probe_memory(void)
56 unsigned long total = 0;
59 for (i = 0; sp_banks[i].num_bytes; i++)
60 total += sp_banks[i].num_bytes;
65 extern void sun4c_complete_all_stores(void);
67 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
68 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
69 unsigned long svaddr, unsigned long aerr,
72 sun4c_complete_all_stores();
73 printk("FAULT: NMI received\n");
74 printk("SREGS: Synchronous Error %08lx\n", serr);
75 printk(" Synchronous Vaddr %08lx\n", svaddr);
76 printk(" Asynchronous Error %08lx\n", aerr);
77 printk(" Asynchronous Vaddr %08lx\n", avaddr);
79 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
80 printk("REGISTER DUMP:\n");
85 static void unhandled_fault(unsigned long, struct task_struct *,
86 struct pt_regs *) __attribute__ ((noreturn));
88 static void unhandled_fault(unsigned long address, struct task_struct *tsk,
91 if((unsigned long) address < PAGE_SIZE) {
93 "Unable to handle kernel NULL pointer dereference\n");
95 printk(KERN_ALERT "Unable to handle kernel paging request "
96 "at virtual address %08lx\n", address);
98 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
99 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
100 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
101 (tsk->mm ? (unsigned long) tsk->mm->pgd :
102 (unsigned long) tsk->active_mm->pgd));
103 die_if_kernel("Oops", regs);
106 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
107 unsigned long address)
114 i = search_extables_range(ret_pc, &g2);
117 /* load & store will be handled by fixup */
121 /* store will be handled by fixup, load will bump out */
122 /* for _to_ macros */
123 insn = *((unsigned int *) pc);
124 if ((insn >> 21) & 1)
129 /* load will be handled by fixup, store will bump out */
130 /* for _from_ macros */
131 insn = *((unsigned int *) pc);
132 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
140 memset(®s, 0, sizeof (regs));
143 __asm__ __volatile__(
147 "nop\n" : "=r" (regs.psr));
148 unhandled_fault(address, current, ®s);
155 show_signal_msg(struct pt_regs *regs, int sig, int code,
156 unsigned long address, struct task_struct *tsk)
158 if (!unhandled_signal(tsk, sig))
161 if (!printk_ratelimit())
164 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
165 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
166 tsk->comm, task_pid_nr(tsk), address,
167 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
168 (void *)regs->u_regs[UREG_FP], code);
170 print_vma_addr(KERN_CONT " in ", regs->pc);
172 printk(KERN_CONT "\n");
175 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
183 info.si_addr = (void __user *) addr;
186 if (unlikely(show_unhandled_signals))
187 show_signal_msg(regs, sig, info.si_code,
190 force_sig_info (sig, &info, current);
193 extern unsigned long safe_compute_effective_address(struct pt_regs *,
196 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
203 if (regs->psr & PSR_PS) {
204 insn = *(unsigned int *) regs->pc;
206 __get_user(insn, (unsigned int *) regs->pc);
209 return safe_compute_effective_address(regs, insn);
212 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
215 unsigned long addr = compute_si_addr(regs, text_fault);
217 __do_fault_siginfo(code, sig, regs, addr);
220 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
221 unsigned long address)
223 struct vm_area_struct *vma;
224 struct task_struct *tsk = current;
225 struct mm_struct *mm = tsk->mm;
228 int from_user = !(regs->psr & PSR_PS);
235 * We fault-in kernel-space virtual memory on-demand. The
236 * 'reference' page table is init_mm.pgd.
238 * NOTE! We MUST NOT take any locks for this case. We may
239 * be in an interrupt or a critical region, and should
240 * only copy the information from the master page table,
243 if (!ARCH_SUN4C && address >= TASK_SIZE)
249 * If we're in an interrupt or have no user
250 * context, we must not take the fault..
252 if (in_atomic() || !mm)
255 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
257 down_read(&mm->mmap_sem);
260 * The kernel referencing a bad kernel pointer can lock up
261 * a sun4c machine completely, so we must attempt recovery.
263 if(!from_user && address >= PAGE_OFFSET)
266 vma = find_vma(mm, address);
269 if(vma->vm_start <= address)
271 if(!(vma->vm_flags & VM_GROWSDOWN))
273 if(expand_stack(vma, address))
276 * Ok, we have a good vm_area for this memory access, so
282 if(!(vma->vm_flags & VM_WRITE))
285 /* Allow reads even for write-only mappings */
286 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
291 * If for any reason at all we couldn't handle the fault,
292 * make sure we exit gracefully rather than endlessly redo
295 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
296 if (unlikely(fault & VM_FAULT_ERROR)) {
297 if (fault & VM_FAULT_OOM)
299 else if (fault & VM_FAULT_SIGBUS)
303 if (fault & VM_FAULT_MAJOR) {
305 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
309 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
312 up_read(&mm->mmap_sem);
316 * Something tried to access memory that isn't in our memory map..
317 * Fix it, but check if it's kernel or user first..
320 up_read(&mm->mmap_sem);
322 bad_area_nosemaphore:
323 /* User mode accesses just cause a SIGSEGV */
325 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
329 /* Is this in ex_table? */
331 g2 = regs->u_regs[UREG_G2];
333 fixup = search_extables_range(regs->pc, &g2);
334 if (fixup > 10) { /* Values below are reserved for other things */
335 extern const unsigned __memset_start[];
336 extern const unsigned __memset_end[];
337 extern const unsigned __csum_partial_copy_start[];
338 extern const unsigned __csum_partial_copy_end[];
340 #ifdef DEBUG_EXCEPTIONS
341 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
342 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
343 regs->pc, fixup, g2);
345 if ((regs->pc >= (unsigned long)__memset_start &&
346 regs->pc < (unsigned long)__memset_end) ||
347 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
348 regs->pc < (unsigned long)__csum_partial_copy_end)) {
349 regs->u_regs[UREG_I4] = address;
350 regs->u_regs[UREG_I5] = regs->pc;
352 regs->u_regs[UREG_G2] = g2;
354 regs->npc = regs->pc + 4;
359 unhandled_fault (address, tsk, regs);
363 * We ran out of memory, or some other thing happened to us that made
364 * us unable to handle the page fault gracefully.
367 up_read(&mm->mmap_sem);
369 pagefault_out_of_memory();
375 up_read(&mm->mmap_sem);
376 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
383 * Synchronize this task's top level page-table
384 * with the 'reference' page table.
386 int offset = pgd_index(address);
390 pgd = tsk->active_mm->pgd + offset;
391 pgd_k = init_mm.pgd + offset;
393 if (!pgd_present(*pgd)) {
394 if (!pgd_present(*pgd_k))
395 goto bad_area_nosemaphore;
396 pgd_val(*pgd) = pgd_val(*pgd_k);
400 pmd = pmd_offset(pgd, address);
401 pmd_k = pmd_offset(pgd_k, address);
403 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
404 goto bad_area_nosemaphore;
410 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
411 unsigned long address)
413 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
414 unsigned long,pte_t *);
415 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
416 struct task_struct *tsk = current;
417 struct mm_struct *mm = tsk->mm;
424 !(regs->psr & PSR_PS)) {
425 unsigned int insn, __user *ip;
427 ip = (unsigned int __user *)regs->pc;
428 if (!get_user(insn, ip)) {
429 if ((insn & 0xc1680000) == 0xc0680000)
435 /* We are oopsing. */
436 do_sparc_fault(regs, text_fault, write, address);
437 BUG(); /* P3 Oops already, you bitch */
440 pgdp = pgd_offset(mm, address);
441 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
443 if (pgd_val(*pgdp)) {
445 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
446 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
449 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
450 _SUN4C_PAGE_MODIFIED |
454 local_irq_save(flags);
455 if (sun4c_get_segmap(address) != invalid_segment) {
456 sun4c_put_pte(address, pte_val(*ptep));
457 local_irq_restore(flags);
460 local_irq_restore(flags);
463 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
464 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
467 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
470 local_irq_save(flags);
471 if (sun4c_get_segmap(address) != invalid_segment) {
472 sun4c_put_pte(address, pte_val(*ptep));
473 local_irq_restore(flags);
476 local_irq_restore(flags);
481 /* This conditional is 'interesting'. */
482 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
483 && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
484 /* Note: It is safe to not grab the MMAP semaphore here because
485 * we know that update_mmu_cache() will not sleep for
486 * any reason (at least not in the current implementation)
487 * and therefore there is no danger of another thread getting
488 * on the CPU and doing a shrink_mmap() on this vma.
490 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
493 do_sparc_fault(regs, text_fault, write, address);
496 /* This always deals with user addresses. */
497 static void force_user_fault(unsigned long address, int write)
499 struct vm_area_struct *vma;
500 struct task_struct *tsk = current;
501 struct mm_struct *mm = tsk->mm;
506 down_read(&mm->mmap_sem);
507 vma = find_vma(mm, address);
510 if(vma->vm_start <= address)
512 if(!(vma->vm_flags & VM_GROWSDOWN))
514 if(expand_stack(vma, address))
519 if(!(vma->vm_flags & VM_WRITE))
522 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
525 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
526 case VM_FAULT_SIGBUS:
530 up_read(&mm->mmap_sem);
533 up_read(&mm->mmap_sem);
534 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
538 up_read(&mm->mmap_sem);
539 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
542 void window_overflow_fault(void)
546 sp = current_thread_info()->rwbuf_stkptrs[0];
547 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
548 force_user_fault(sp + 0x38, 1);
549 force_user_fault(sp, 1);
552 void window_underflow_fault(unsigned long sp)
554 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
555 force_user_fault(sp + 0x38, 0);
556 force_user_fault(sp, 0);
559 void window_ret_fault(struct pt_regs *regs)
563 sp = regs->u_regs[UREG_FP];
564 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
565 force_user_fault(sp + 0x38, 0);
566 force_user_fault(sp, 0);