Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kernel / process.c
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/mmu.h>
47 #include <asm/prom.h>
48 #include <asm/machdep.h>
49 #include <asm/time.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/debug.h>
54 #ifdef CONFIG_PPC64
55 #include <asm/firmware.h>
56 #endif
57 #include <linux/kprobes.h>
58 #include <linux/kdebug.h>
59
60 extern unsigned long _get_SP(void);
61
62 #ifndef CONFIG_SMP
63 struct task_struct *last_task_used_math = NULL;
64 struct task_struct *last_task_used_altivec = NULL;
65 struct task_struct *last_task_used_vsx = NULL;
66 struct task_struct *last_task_used_spe = NULL;
67 #endif
68
69 /*
70  * Make sure the floating-point register state in the
71  * the thread_struct is up to date for task tsk.
72  */
73 void flush_fp_to_thread(struct task_struct *tsk)
74 {
75         if (tsk->thread.regs) {
76                 /*
77                  * We need to disable preemption here because if we didn't,
78                  * another process could get scheduled after the regs->msr
79                  * test but before we have finished saving the FP registers
80                  * to the thread_struct.  That process could take over the
81                  * FPU, and then when we get scheduled again we would store
82                  * bogus values for the remaining FP registers.
83                  */
84                 preempt_disable();
85                 if (tsk->thread.regs->msr & MSR_FP) {
86 #ifdef CONFIG_SMP
87                         /*
88                          * This should only ever be called for current or
89                          * for a stopped child process.  Since we save away
90                          * the FP register state on context switch on SMP,
91                          * there is something wrong if a stopped child appears
92                          * to still have its FP state in the CPU registers.
93                          */
94                         BUG_ON(tsk != current);
95 #endif
96                         giveup_fpu(tsk);
97                 }
98                 preempt_enable();
99         }
100 }
101 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
102
103 void enable_kernel_fp(void)
104 {
105         WARN_ON(preemptible());
106
107 #ifdef CONFIG_SMP
108         if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109                 giveup_fpu(current);
110         else
111                 giveup_fpu(NULL);       /* just enables FP for kernel */
112 #else
113         giveup_fpu(last_task_used_math);
114 #endif /* CONFIG_SMP */
115 }
116 EXPORT_SYMBOL(enable_kernel_fp);
117
118 #ifdef CONFIG_ALTIVEC
119 void enable_kernel_altivec(void)
120 {
121         WARN_ON(preemptible());
122
123 #ifdef CONFIG_SMP
124         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125                 giveup_altivec(current);
126         else
127                 giveup_altivec_notask();
128 #else
129         giveup_altivec(last_task_used_altivec);
130 #endif /* CONFIG_SMP */
131 }
132 EXPORT_SYMBOL(enable_kernel_altivec);
133
134 /*
135  * Make sure the VMX/Altivec register state in the
136  * the thread_struct is up to date for task tsk.
137  */
138 void flush_altivec_to_thread(struct task_struct *tsk)
139 {
140         if (tsk->thread.regs) {
141                 preempt_disable();
142                 if (tsk->thread.regs->msr & MSR_VEC) {
143 #ifdef CONFIG_SMP
144                         BUG_ON(tsk != current);
145 #endif
146                         giveup_altivec(tsk);
147                 }
148                 preempt_enable();
149         }
150 }
151 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
152 #endif /* CONFIG_ALTIVEC */
153
154 #ifdef CONFIG_VSX
155 #if 0
156 /* not currently used, but some crazy RAID module might want to later */
157 void enable_kernel_vsx(void)
158 {
159         WARN_ON(preemptible());
160
161 #ifdef CONFIG_SMP
162         if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163                 giveup_vsx(current);
164         else
165                 giveup_vsx(NULL);       /* just enable vsx for kernel - force */
166 #else
167         giveup_vsx(last_task_used_vsx);
168 #endif /* CONFIG_SMP */
169 }
170 EXPORT_SYMBOL(enable_kernel_vsx);
171 #endif
172
173 void giveup_vsx(struct task_struct *tsk)
174 {
175         giveup_fpu(tsk);
176         giveup_altivec(tsk);
177         __giveup_vsx(tsk);
178 }
179
180 void flush_vsx_to_thread(struct task_struct *tsk)
181 {
182         if (tsk->thread.regs) {
183                 preempt_disable();
184                 if (tsk->thread.regs->msr & MSR_VSX) {
185 #ifdef CONFIG_SMP
186                         BUG_ON(tsk != current);
187 #endif
188                         giveup_vsx(tsk);
189                 }
190                 preempt_enable();
191         }
192 }
193 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
194 #endif /* CONFIG_VSX */
195
196 #ifdef CONFIG_SPE
197
198 void enable_kernel_spe(void)
199 {
200         WARN_ON(preemptible());
201
202 #ifdef CONFIG_SMP
203         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204                 giveup_spe(current);
205         else
206                 giveup_spe(NULL);       /* just enable SPE for kernel - force */
207 #else
208         giveup_spe(last_task_used_spe);
209 #endif /* __SMP __ */
210 }
211 EXPORT_SYMBOL(enable_kernel_spe);
212
213 void flush_spe_to_thread(struct task_struct *tsk)
214 {
215         if (tsk->thread.regs) {
216                 preempt_disable();
217                 if (tsk->thread.regs->msr & MSR_SPE) {
218 #ifdef CONFIG_SMP
219                         BUG_ON(tsk != current);
220 #endif
221                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
222                         giveup_spe(tsk);
223                 }
224                 preempt_enable();
225         }
226 }
227 #endif /* CONFIG_SPE */
228
229 #ifndef CONFIG_SMP
230 /*
231  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
232  * and the current task has some state, discard it.
233  */
234 void discard_lazy_cpu_state(void)
235 {
236         preempt_disable();
237         if (last_task_used_math == current)
238                 last_task_used_math = NULL;
239 #ifdef CONFIG_ALTIVEC
240         if (last_task_used_altivec == current)
241                 last_task_used_altivec = NULL;
242 #endif /* CONFIG_ALTIVEC */
243 #ifdef CONFIG_VSX
244         if (last_task_used_vsx == current)
245                 last_task_used_vsx = NULL;
246 #endif /* CONFIG_VSX */
247 #ifdef CONFIG_SPE
248         if (last_task_used_spe == current)
249                 last_task_used_spe = NULL;
250 #endif
251         preempt_enable();
252 }
253 #endif /* CONFIG_SMP */
254
255 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
256 void do_send_trap(struct pt_regs *regs, unsigned long address,
257                   unsigned long error_code, int signal_code, int breakpt)
258 {
259         siginfo_t info;
260
261         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
262                         11, SIGSEGV) == NOTIFY_STOP)
263                 return;
264
265         /* Deliver the signal to userspace */
266         info.si_signo = SIGTRAP;
267         info.si_errno = breakpt;        /* breakpoint or watchpoint id */
268         info.si_code = signal_code;
269         info.si_addr = (void __user *)address;
270         force_sig_info(SIGTRAP, &info, current);
271 }
272 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
273 void do_dabr(struct pt_regs *regs, unsigned long address,
274                     unsigned long error_code)
275 {
276         siginfo_t info;
277
278         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
279                         11, SIGSEGV) == NOTIFY_STOP)
280                 return;
281
282         if (debugger_dabr_match(regs))
283                 return;
284
285         /* Clear the DABR */
286         set_dabr(0);
287
288         /* Deliver the signal to userspace */
289         info.si_signo = SIGTRAP;
290         info.si_errno = 0;
291         info.si_code = TRAP_HWBKPT;
292         info.si_addr = (void __user *)address;
293         force_sig_info(SIGTRAP, &info, current);
294 }
295 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
296
297 static DEFINE_PER_CPU(unsigned long, current_dabr);
298
299 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
300 /*
301  * Set the debug registers back to their default "safe" values.
302  */
303 static void set_debug_reg_defaults(struct thread_struct *thread)
304 {
305         thread->iac1 = thread->iac2 = 0;
306 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
307         thread->iac3 = thread->iac4 = 0;
308 #endif
309         thread->dac1 = thread->dac2 = 0;
310 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
311         thread->dvc1 = thread->dvc2 = 0;
312 #endif
313         thread->dbcr0 = 0;
314 #ifdef CONFIG_BOOKE
315         /*
316          * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
317          */
318         thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |   \
319                         DBCR1_IAC3US | DBCR1_IAC4US;
320         /*
321          * Force Data Address Compare User/Supervisor bits to be User-only
322          * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
323          */
324         thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
325 #else
326         thread->dbcr1 = 0;
327 #endif
328 }
329
330 static void prime_debug_regs(struct thread_struct *thread)
331 {
332         mtspr(SPRN_IAC1, thread->iac1);
333         mtspr(SPRN_IAC2, thread->iac2);
334 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
335         mtspr(SPRN_IAC3, thread->iac3);
336         mtspr(SPRN_IAC4, thread->iac4);
337 #endif
338         mtspr(SPRN_DAC1, thread->dac1);
339         mtspr(SPRN_DAC2, thread->dac2);
340 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
341         mtspr(SPRN_DVC1, thread->dvc1);
342         mtspr(SPRN_DVC2, thread->dvc2);
343 #endif
344         mtspr(SPRN_DBCR0, thread->dbcr0);
345         mtspr(SPRN_DBCR1, thread->dbcr1);
346 #ifdef CONFIG_BOOKE
347         mtspr(SPRN_DBCR2, thread->dbcr2);
348 #endif
349 }
350 /*
351  * Unless neither the old or new thread are making use of the
352  * debug registers, set the debug registers from the values
353  * stored in the new thread.
354  */
355 static void switch_booke_debug_regs(struct thread_struct *new_thread)
356 {
357         if ((current->thread.dbcr0 & DBCR0_IDM)
358                 || (new_thread->dbcr0 & DBCR0_IDM))
359                         prime_debug_regs(new_thread);
360 }
361 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
362 #ifndef CONFIG_HAVE_HW_BREAKPOINT
363 static void set_debug_reg_defaults(struct thread_struct *thread)
364 {
365         if (thread->dabr) {
366                 thread->dabr = 0;
367                 set_dabr(0);
368         }
369 }
370 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
371 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
372
373 int set_dabr(unsigned long dabr)
374 {
375         __get_cpu_var(current_dabr) = dabr;
376
377         if (ppc_md.set_dabr)
378                 return ppc_md.set_dabr(dabr);
379
380         /* XXX should we have a CPU_FTR_HAS_DABR ? */
381 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
382         mtspr(SPRN_DAC1, dabr);
383 #ifdef CONFIG_PPC_47x
384         isync();
385 #endif
386 #elif defined(CONFIG_PPC_BOOK3S)
387         mtspr(SPRN_DABR, dabr);
388 #endif
389
390
391         return 0;
392 }
393
394 #ifdef CONFIG_PPC64
395 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
396 #endif
397
398 struct task_struct *__switch_to(struct task_struct *prev,
399         struct task_struct *new)
400 {
401         struct thread_struct *new_thread, *old_thread;
402         unsigned long flags;
403         struct task_struct *last;
404 #ifdef CONFIG_PPC_BOOK3S_64
405         struct ppc64_tlb_batch *batch;
406 #endif
407
408 #ifdef CONFIG_SMP
409         /* avoid complexity of lazy save/restore of fpu
410          * by just saving it every time we switch out if
411          * this task used the fpu during the last quantum.
412          *
413          * If it tries to use the fpu again, it'll trap and
414          * reload its fp regs.  So we don't have to do a restore
415          * every switch, just a save.
416          *  -- Cort
417          */
418         if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
419                 giveup_fpu(prev);
420 #ifdef CONFIG_ALTIVEC
421         /*
422          * If the previous thread used altivec in the last quantum
423          * (thus changing altivec regs) then save them.
424          * We used to check the VRSAVE register but not all apps
425          * set it, so we don't rely on it now (and in fact we need
426          * to save & restore VSCR even if VRSAVE == 0).  -- paulus
427          *
428          * On SMP we always save/restore altivec regs just to avoid the
429          * complexity of changing processors.
430          *  -- Cort
431          */
432         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
433                 giveup_altivec(prev);
434 #endif /* CONFIG_ALTIVEC */
435 #ifdef CONFIG_VSX
436         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
437                 /* VMX and FPU registers are already save here */
438                 __giveup_vsx(prev);
439 #endif /* CONFIG_VSX */
440 #ifdef CONFIG_SPE
441         /*
442          * If the previous thread used spe in the last quantum
443          * (thus changing spe regs) then save them.
444          *
445          * On SMP we always save/restore spe regs just to avoid the
446          * complexity of changing processors.
447          */
448         if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
449                 giveup_spe(prev);
450 #endif /* CONFIG_SPE */
451
452 #else  /* CONFIG_SMP */
453 #ifdef CONFIG_ALTIVEC
454         /* Avoid the trap.  On smp this this never happens since
455          * we don't set last_task_used_altivec -- Cort
456          */
457         if (new->thread.regs && last_task_used_altivec == new)
458                 new->thread.regs->msr |= MSR_VEC;
459 #endif /* CONFIG_ALTIVEC */
460 #ifdef CONFIG_VSX
461         if (new->thread.regs && last_task_used_vsx == new)
462                 new->thread.regs->msr |= MSR_VSX;
463 #endif /* CONFIG_VSX */
464 #ifdef CONFIG_SPE
465         /* Avoid the trap.  On smp this this never happens since
466          * we don't set last_task_used_spe
467          */
468         if (new->thread.regs && last_task_used_spe == new)
469                 new->thread.regs->msr |= MSR_SPE;
470 #endif /* CONFIG_SPE */
471
472 #endif /* CONFIG_SMP */
473
474 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
475         switch_booke_debug_regs(&new->thread);
476 #else
477 /*
478  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
479  * schedule DABR
480  */
481 #ifndef CONFIG_HAVE_HW_BREAKPOINT
482         if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
483                 set_dabr(new->thread.dabr);
484 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
485 #endif
486
487
488         new_thread = &new->thread;
489         old_thread = &current->thread;
490
491 #ifdef CONFIG_PPC64
492         /*
493          * Collect processor utilization data per process
494          */
495         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
496                 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
497                 long unsigned start_tb, current_tb;
498                 start_tb = old_thread->start_tb;
499                 cu->current_tb = current_tb = mfspr(SPRN_PURR);
500                 old_thread->accum_tb += (current_tb - start_tb);
501                 new_thread->start_tb = current_tb;
502         }
503 #endif /* CONFIG_PPC64 */
504
505 #ifdef CONFIG_PPC_BOOK3S_64
506         batch = &__get_cpu_var(ppc64_tlb_batch);
507         if (batch->active) {
508                 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
509                 if (batch->index)
510                         __flush_tlb_pending(batch);
511                 batch->active = 0;
512         }
513 #endif /* CONFIG_PPC_BOOK3S_64 */
514
515         local_irq_save(flags);
516
517         account_system_vtime(current);
518         account_process_vtime(current);
519
520         /*
521          * We can't take a PMU exception inside _switch() since there is a
522          * window where the kernel stack SLB and the kernel stack are out
523          * of sync. Hard disable here.
524          */
525         hard_irq_disable();
526         last = _switch(old_thread, new_thread);
527
528 #ifdef CONFIG_PPC_BOOK3S_64
529         if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
530                 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
531                 batch = &__get_cpu_var(ppc64_tlb_batch);
532                 batch->active = 1;
533         }
534 #endif /* CONFIG_PPC_BOOK3S_64 */
535
536         local_irq_restore(flags);
537
538         return last;
539 }
540
541 static int instructions_to_print = 16;
542
543 static void show_instructions(struct pt_regs *regs)
544 {
545         int i;
546         unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
547                         sizeof(int));
548
549         printk("Instruction dump:");
550
551         for (i = 0; i < instructions_to_print; i++) {
552                 int instr;
553
554                 if (!(i % 8))
555                         printk("\n");
556
557 #if !defined(CONFIG_BOOKE)
558                 /* If executing with the IMMU off, adjust pc rather
559                  * than print XXXXXXXX.
560                  */
561                 if (!(regs->msr & MSR_IR))
562                         pc = (unsigned long)phys_to_virt(pc);
563 #endif
564
565                 /* We use __get_user here *only* to avoid an OOPS on a
566                  * bad address because the pc *should* only be a
567                  * kernel address.
568                  */
569                 if (!__kernel_text_address(pc) ||
570                      __get_user(instr, (unsigned int __user *)pc)) {
571                         printk(KERN_CONT "XXXXXXXX ");
572                 } else {
573                         if (regs->nip == pc)
574                                 printk(KERN_CONT "<%08x> ", instr);
575                         else
576                                 printk(KERN_CONT "%08x ", instr);
577                 }
578
579                 pc += sizeof(int);
580         }
581
582         printk("\n");
583 }
584
585 static struct regbit {
586         unsigned long bit;
587         const char *name;
588 } msr_bits[] = {
589 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
590         {MSR_SF,        "SF"},
591         {MSR_HV,        "HV"},
592 #endif
593         {MSR_VEC,       "VEC"},
594         {MSR_VSX,       "VSX"},
595 #ifdef CONFIG_BOOKE
596         {MSR_CE,        "CE"},
597 #endif
598         {MSR_EE,        "EE"},
599         {MSR_PR,        "PR"},
600         {MSR_FP,        "FP"},
601         {MSR_ME,        "ME"},
602 #ifdef CONFIG_BOOKE
603         {MSR_DE,        "DE"},
604 #else
605         {MSR_SE,        "SE"},
606         {MSR_BE,        "BE"},
607 #endif
608         {MSR_IR,        "IR"},
609         {MSR_DR,        "DR"},
610         {MSR_PMM,       "PMM"},
611 #ifndef CONFIG_BOOKE
612         {MSR_RI,        "RI"},
613         {MSR_LE,        "LE"},
614 #endif
615         {0,             NULL}
616 };
617
618 static void printbits(unsigned long val, struct regbit *bits)
619 {
620         const char *sep = "";
621
622         printk("<");
623         for (; bits->bit; ++bits)
624                 if (val & bits->bit) {
625                         printk("%s%s", sep, bits->name);
626                         sep = ",";
627                 }
628         printk(">");
629 }
630
631 #ifdef CONFIG_PPC64
632 #define REG             "%016lx"
633 #define REGS_PER_LINE   4
634 #define LAST_VOLATILE   13
635 #else
636 #define REG             "%08lx"
637 #define REGS_PER_LINE   8
638 #define LAST_VOLATILE   12
639 #endif
640
641 void show_regs(struct pt_regs * regs)
642 {
643         int i, trap;
644
645         printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
646                regs->nip, regs->link, regs->ctr);
647         printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
648                regs, regs->trap, print_tainted(), init_utsname()->release);
649         printk("MSR: "REG" ", regs->msr);
650         printbits(regs->msr, msr_bits);
651         printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
652 #ifdef CONFIG_PPC64
653         printk("SOFTE: %ld\n", regs->softe);
654 #endif
655         trap = TRAP(regs);
656         if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
657                 printk("CFAR: "REG"\n", regs->orig_gpr3);
658         if (trap == 0x300 || trap == 0x600)
659 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
660                 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
661 #else
662                 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
663 #endif
664         printk("TASK = %p[%d] '%s' THREAD: %p",
665                current, task_pid_nr(current), current->comm, task_thread_info(current));
666
667 #ifdef CONFIG_SMP
668         printk(" CPU: %d", raw_smp_processor_id());
669 #endif /* CONFIG_SMP */
670
671         for (i = 0;  i < 32;  i++) {
672                 if ((i % REGS_PER_LINE) == 0)
673                         printk("\nGPR%02d: ", i);
674                 printk(REG " ", regs->gpr[i]);
675                 if (i == LAST_VOLATILE && !FULL_REGS(regs))
676                         break;
677         }
678         printk("\n");
679 #ifdef CONFIG_KALLSYMS
680         /*
681          * Lookup NIP late so we have the best change of getting the
682          * above info out without failing
683          */
684         printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
685         printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
686 #endif
687         show_stack(current, (unsigned long *) regs->gpr[1]);
688         if (!user_mode(regs))
689                 show_instructions(regs);
690 }
691
692 void exit_thread(void)
693 {
694         discard_lazy_cpu_state();
695 }
696
697 void flush_thread(void)
698 {
699         discard_lazy_cpu_state();
700
701 #ifdef CONFIG_HAVE_HW_BREAKPOINT
702         flush_ptrace_hw_breakpoint(current);
703 #else /* CONFIG_HAVE_HW_BREAKPOINT */
704         set_debug_reg_defaults(&current->thread);
705 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
706 }
707
708 void
709 release_thread(struct task_struct *t)
710 {
711 }
712
713 /*
714  * this gets called so that we can store coprocessor state into memory and
715  * copy the current task into the new thread.
716  */
717 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
718 {
719         flush_fp_to_thread(src);
720         flush_altivec_to_thread(src);
721         flush_vsx_to_thread(src);
722         flush_spe_to_thread(src);
723 #ifdef CONFIG_HAVE_HW_BREAKPOINT
724         flush_ptrace_hw_breakpoint(src);
725 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
726
727         *dst = *src;
728         return 0;
729 }
730
731 /*
732  * Copy a thread..
733  */
734 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
735
736 int copy_thread(unsigned long clone_flags, unsigned long usp,
737                 unsigned long unused, struct task_struct *p,
738                 struct pt_regs *regs)
739 {
740         struct pt_regs *childregs, *kregs;
741         extern void ret_from_fork(void);
742         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
743
744         CHECK_FULL_REGS(regs);
745         /* Copy registers */
746         sp -= sizeof(struct pt_regs);
747         childregs = (struct pt_regs *) sp;
748         *childregs = *regs;
749         if ((childregs->msr & MSR_PR) == 0) {
750                 /* for kernel thread, set `current' and stackptr in new task */
751                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
752 #ifdef CONFIG_PPC32
753                 childregs->gpr[2] = (unsigned long) p;
754 #else
755                 clear_tsk_thread_flag(p, TIF_32BIT);
756 #endif
757                 p->thread.regs = NULL;  /* no user register state */
758         } else {
759                 childregs->gpr[1] = usp;
760                 p->thread.regs = childregs;
761                 if (clone_flags & CLONE_SETTLS) {
762 #ifdef CONFIG_PPC64
763                         if (!is_32bit_task())
764                                 childregs->gpr[13] = childregs->gpr[6];
765                         else
766 #endif
767                                 childregs->gpr[2] = childregs->gpr[6];
768                 }
769         }
770         childregs->gpr[3] = 0;  /* Result from fork() */
771         sp -= STACK_FRAME_OVERHEAD;
772
773         /*
774          * The way this works is that at some point in the future
775          * some task will call _switch to switch to the new task.
776          * That will pop off the stack frame created below and start
777          * the new task running at ret_from_fork.  The new task will
778          * do some house keeping and then return from the fork or clone
779          * system call, using the stack frame created above.
780          */
781         sp -= sizeof(struct pt_regs);
782         kregs = (struct pt_regs *) sp;
783         sp -= STACK_FRAME_OVERHEAD;
784         p->thread.ksp = sp;
785         p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
786                                 _ALIGN_UP(sizeof(struct thread_info), 16);
787
788 #ifdef CONFIG_PPC_STD_MMU_64
789         if (mmu_has_feature(MMU_FTR_SLB)) {
790                 unsigned long sp_vsid;
791                 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
792
793                 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
794                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
795                                 << SLB_VSID_SHIFT_1T;
796                 else
797                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
798                                 << SLB_VSID_SHIFT;
799                 sp_vsid |= SLB_VSID_KERNEL | llp;
800                 p->thread.ksp_vsid = sp_vsid;
801         }
802 #endif /* CONFIG_PPC_STD_MMU_64 */
803 #ifdef CONFIG_PPC64 
804         if (cpu_has_feature(CPU_FTR_DSCR)) {
805                 if (current->thread.dscr_inherit) {
806                         p->thread.dscr_inherit = 1;
807                         p->thread.dscr = current->thread.dscr;
808                 } else if (0 != dscr_default) {
809                         p->thread.dscr_inherit = 1;
810                         p->thread.dscr = dscr_default;
811                 } else {
812                         p->thread.dscr_inherit = 0;
813                         p->thread.dscr = 0;
814                 }
815         }
816 #endif
817
818         /*
819          * The PPC64 ABI makes use of a TOC to contain function 
820          * pointers.  The function (ret_from_except) is actually a pointer
821          * to the TOC entry.  The first entry is a pointer to the actual
822          * function.
823          */
824 #ifdef CONFIG_PPC64
825         kregs->nip = *((unsigned long *)ret_from_fork);
826 #else
827         kregs->nip = (unsigned long)ret_from_fork;
828 #endif
829
830         return 0;
831 }
832
833 /*
834  * Set up a thread for executing a new program
835  */
836 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
837 {
838 #ifdef CONFIG_PPC64
839         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
840 #endif
841
842         /*
843          * If we exec out of a kernel thread then thread.regs will not be
844          * set.  Do it now.
845          */
846         if (!current->thread.regs) {
847                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
848                 current->thread.regs = regs - 1;
849         }
850
851         memset(regs->gpr, 0, sizeof(regs->gpr));
852         regs->ctr = 0;
853         regs->link = 0;
854         regs->xer = 0;
855         regs->ccr = 0;
856         regs->gpr[1] = sp;
857
858         /*
859          * We have just cleared all the nonvolatile GPRs, so make
860          * FULL_REGS(regs) return true.  This is necessary to allow
861          * ptrace to examine the thread immediately after exec.
862          */
863         regs->trap &= ~1UL;
864
865 #ifdef CONFIG_PPC32
866         regs->mq = 0;
867         regs->nip = start;
868         regs->msr = MSR_USER;
869 #else
870         if (!is_32bit_task()) {
871                 unsigned long entry, toc;
872
873                 /* start is a relocated pointer to the function descriptor for
874                  * the elf _start routine.  The first entry in the function
875                  * descriptor is the entry address of _start and the second
876                  * entry is the TOC value we need to use.
877                  */
878                 __get_user(entry, (unsigned long __user *)start);
879                 __get_user(toc, (unsigned long __user *)start+1);
880
881                 /* Check whether the e_entry function descriptor entries
882                  * need to be relocated before we can use them.
883                  */
884                 if (load_addr != 0) {
885                         entry += load_addr;
886                         toc   += load_addr;
887                 }
888                 regs->nip = entry;
889                 regs->gpr[2] = toc;
890                 regs->msr = MSR_USER64;
891         } else {
892                 regs->nip = start;
893                 regs->gpr[2] = 0;
894                 regs->msr = MSR_USER32;
895         }
896 #endif
897
898         discard_lazy_cpu_state();
899 #ifdef CONFIG_VSX
900         current->thread.used_vsr = 0;
901 #endif
902         memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
903         current->thread.fpscr.val = 0;
904 #ifdef CONFIG_ALTIVEC
905         memset(current->thread.vr, 0, sizeof(current->thread.vr));
906         memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
907         current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
908         current->thread.vrsave = 0;
909         current->thread.used_vr = 0;
910 #endif /* CONFIG_ALTIVEC */
911 #ifdef CONFIG_SPE
912         memset(current->thread.evr, 0, sizeof(current->thread.evr));
913         current->thread.acc = 0;
914         current->thread.spefscr = 0;
915         current->thread.used_spe = 0;
916 #endif /* CONFIG_SPE */
917 }
918
919 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
920                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
921
922 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
923 {
924         struct pt_regs *regs = tsk->thread.regs;
925
926         /* This is a bit hairy.  If we are an SPE enabled  processor
927          * (have embedded fp) we store the IEEE exception enable flags in
928          * fpexc_mode.  fpexc_mode is also used for setting FP exception
929          * mode (asyn, precise, disabled) for 'Classic' FP. */
930         if (val & PR_FP_EXC_SW_ENABLE) {
931 #ifdef CONFIG_SPE
932                 if (cpu_has_feature(CPU_FTR_SPE)) {
933                         tsk->thread.fpexc_mode = val &
934                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
935                         return 0;
936                 } else {
937                         return -EINVAL;
938                 }
939 #else
940                 return -EINVAL;
941 #endif
942         }
943
944         /* on a CONFIG_SPE this does not hurt us.  The bits that
945          * __pack_fe01 use do not overlap with bits used for
946          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
947          * on CONFIG_SPE implementations are reserved so writing to
948          * them does not change anything */
949         if (val > PR_FP_EXC_PRECISE)
950                 return -EINVAL;
951         tsk->thread.fpexc_mode = __pack_fe01(val);
952         if (regs != NULL && (regs->msr & MSR_FP) != 0)
953                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
954                         | tsk->thread.fpexc_mode;
955         return 0;
956 }
957
958 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
959 {
960         unsigned int val;
961
962         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
963 #ifdef CONFIG_SPE
964                 if (cpu_has_feature(CPU_FTR_SPE))
965                         val = tsk->thread.fpexc_mode;
966                 else
967                         return -EINVAL;
968 #else
969                 return -EINVAL;
970 #endif
971         else
972                 val = __unpack_fe01(tsk->thread.fpexc_mode);
973         return put_user(val, (unsigned int __user *) adr);
974 }
975
976 int set_endian(struct task_struct *tsk, unsigned int val)
977 {
978         struct pt_regs *regs = tsk->thread.regs;
979
980         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
981             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
982                 return -EINVAL;
983
984         if (regs == NULL)
985                 return -EINVAL;
986
987         if (val == PR_ENDIAN_BIG)
988                 regs->msr &= ~MSR_LE;
989         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
990                 regs->msr |= MSR_LE;
991         else
992                 return -EINVAL;
993
994         return 0;
995 }
996
997 int get_endian(struct task_struct *tsk, unsigned long adr)
998 {
999         struct pt_regs *regs = tsk->thread.regs;
1000         unsigned int val;
1001
1002         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1003             !cpu_has_feature(CPU_FTR_REAL_LE))
1004                 return -EINVAL;
1005
1006         if (regs == NULL)
1007                 return -EINVAL;
1008
1009         if (regs->msr & MSR_LE) {
1010                 if (cpu_has_feature(CPU_FTR_REAL_LE))
1011                         val = PR_ENDIAN_LITTLE;
1012                 else
1013                         val = PR_ENDIAN_PPC_LITTLE;
1014         } else
1015                 val = PR_ENDIAN_BIG;
1016
1017         return put_user(val, (unsigned int __user *)adr);
1018 }
1019
1020 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1021 {
1022         tsk->thread.align_ctl = val;
1023         return 0;
1024 }
1025
1026 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1027 {
1028         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1029 }
1030
1031 #define TRUNC_PTR(x)    ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1032
1033 int sys_clone(unsigned long clone_flags, unsigned long usp,
1034               int __user *parent_tidp, void __user *child_threadptr,
1035               int __user *child_tidp, int p6,
1036               struct pt_regs *regs)
1037 {
1038         CHECK_FULL_REGS(regs);
1039         if (usp == 0)
1040                 usp = regs->gpr[1];     /* stack pointer for child */
1041 #ifdef CONFIG_PPC64
1042         if (is_32bit_task()) {
1043                 parent_tidp = TRUNC_PTR(parent_tidp);
1044                 child_tidp = TRUNC_PTR(child_tidp);
1045         }
1046 #endif
1047         return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1048 }
1049
1050 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1051              unsigned long p4, unsigned long p5, unsigned long p6,
1052              struct pt_regs *regs)
1053 {
1054         CHECK_FULL_REGS(regs);
1055         return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1056 }
1057
1058 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1059               unsigned long p4, unsigned long p5, unsigned long p6,
1060               struct pt_regs *regs)
1061 {
1062         CHECK_FULL_REGS(regs);
1063         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1064                         regs, 0, NULL, NULL);
1065 }
1066
1067 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1068                unsigned long a3, unsigned long a4, unsigned long a5,
1069                struct pt_regs *regs)
1070 {
1071         int error;
1072         char *filename;
1073
1074         filename = getname((const char __user *) a0);
1075         error = PTR_ERR(filename);
1076         if (IS_ERR(filename))
1077                 goto out;
1078         flush_fp_to_thread(current);
1079         flush_altivec_to_thread(current);
1080         flush_spe_to_thread(current);
1081         error = do_execve(filename,
1082                           (const char __user *const __user *) a1,
1083                           (const char __user *const __user *) a2, regs);
1084         putname(filename);
1085 out:
1086         return error;
1087 }
1088
1089 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1090                                   unsigned long nbytes)
1091 {
1092         unsigned long stack_page;
1093         unsigned long cpu = task_cpu(p);
1094
1095         /*
1096          * Avoid crashing if the stack has overflowed and corrupted
1097          * task_cpu(p), which is in the thread_info struct.
1098          */
1099         if (cpu < NR_CPUS && cpu_possible(cpu)) {
1100                 stack_page = (unsigned long) hardirq_ctx[cpu];
1101                 if (sp >= stack_page + sizeof(struct thread_struct)
1102                     && sp <= stack_page + THREAD_SIZE - nbytes)
1103                         return 1;
1104
1105                 stack_page = (unsigned long) softirq_ctx[cpu];
1106                 if (sp >= stack_page + sizeof(struct thread_struct)
1107                     && sp <= stack_page + THREAD_SIZE - nbytes)
1108                         return 1;
1109         }
1110         return 0;
1111 }
1112
1113 int validate_sp(unsigned long sp, struct task_struct *p,
1114                        unsigned long nbytes)
1115 {
1116         unsigned long stack_page = (unsigned long)task_stack_page(p);
1117
1118         if (sp >= stack_page + sizeof(struct thread_struct)
1119             && sp <= stack_page + THREAD_SIZE - nbytes)
1120                 return 1;
1121
1122         return valid_irq_stack(sp, p, nbytes);
1123 }
1124
1125 EXPORT_SYMBOL(validate_sp);
1126
1127 unsigned long get_wchan(struct task_struct *p)
1128 {
1129         unsigned long ip, sp;
1130         int count = 0;
1131
1132         if (!p || p == current || p->state == TASK_RUNNING)
1133                 return 0;
1134
1135         sp = p->thread.ksp;
1136         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1137                 return 0;
1138
1139         do {
1140                 sp = *(unsigned long *)sp;
1141                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1142                         return 0;
1143                 if (count > 0) {
1144                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1145                         if (!in_sched_functions(ip))
1146                                 return ip;
1147                 }
1148         } while (count++ < 16);
1149         return 0;
1150 }
1151
1152 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1153
1154 void show_stack(struct task_struct *tsk, unsigned long *stack)
1155 {
1156         unsigned long sp, ip, lr, newsp;
1157         int count = 0;
1158         int firstframe = 1;
1159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1160         int curr_frame = current->curr_ret_stack;
1161         extern void return_to_handler(void);
1162         unsigned long rth = (unsigned long)return_to_handler;
1163         unsigned long mrth = -1;
1164 #ifdef CONFIG_PPC64
1165         extern void mod_return_to_handler(void);
1166         rth = *(unsigned long *)rth;
1167         mrth = (unsigned long)mod_return_to_handler;
1168         mrth = *(unsigned long *)mrth;
1169 #endif
1170 #endif
1171
1172         sp = (unsigned long) stack;
1173         if (tsk == NULL)
1174                 tsk = current;
1175         if (sp == 0) {
1176                 if (tsk == current)
1177                         asm("mr %0,1" : "=r" (sp));
1178                 else
1179                         sp = tsk->thread.ksp;
1180         }
1181
1182         lr = 0;
1183         printk("Call Trace:\n");
1184         do {
1185                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1186                         return;
1187
1188                 stack = (unsigned long *) sp;
1189                 newsp = stack[0];
1190                 ip = stack[STACK_FRAME_LR_SAVE];
1191                 if (!firstframe || ip != lr) {
1192                         printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1193 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1194                         if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1195                                 printk(" (%pS)",
1196                                        (void *)current->ret_stack[curr_frame].ret);
1197                                 curr_frame--;
1198                         }
1199 #endif
1200                         if (firstframe)
1201                                 printk(" (unreliable)");
1202                         printk("\n");
1203                 }
1204                 firstframe = 0;
1205
1206                 /*
1207                  * See if this is an exception frame.
1208                  * We look for the "regshere" marker in the current frame.
1209                  */
1210                 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1211                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1212                         struct pt_regs *regs = (struct pt_regs *)
1213                                 (sp + STACK_FRAME_OVERHEAD);
1214                         lr = regs->link;
1215                         printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1216                                regs->trap, (void *)regs->nip, (void *)lr);
1217                         firstframe = 1;
1218                 }
1219
1220                 sp = newsp;
1221         } while (count++ < kstack_depth_to_print);
1222 }
1223
1224 void dump_stack(void)
1225 {
1226         show_stack(current, NULL);
1227 }
1228 EXPORT_SYMBOL(dump_stack);
1229
1230 #ifdef CONFIG_PPC64
1231 /* Called with hard IRQs off */
1232 void __ppc64_runlatch_on(void)
1233 {
1234         struct thread_info *ti = current_thread_info();
1235         unsigned long ctrl;
1236
1237         ctrl = mfspr(SPRN_CTRLF);
1238         ctrl |= CTRL_RUNLATCH;
1239         mtspr(SPRN_CTRLT, ctrl);
1240
1241         ti->local_flags |= _TLF_RUNLATCH;
1242 }
1243
1244 /* Called with hard IRQs off */
1245 void __ppc64_runlatch_off(void)
1246 {
1247         struct thread_info *ti = current_thread_info();
1248         unsigned long ctrl;
1249
1250         ti->local_flags &= ~_TLF_RUNLATCH;
1251
1252         ctrl = mfspr(SPRN_CTRLF);
1253         ctrl &= ~CTRL_RUNLATCH;
1254         mtspr(SPRN_CTRLT, ctrl);
1255 }
1256 #endif /* CONFIG_PPC64 */
1257
1258 unsigned long arch_align_stack(unsigned long sp)
1259 {
1260         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1261                 sp -= get_random_int() & ~PAGE_MASK;
1262         return sp & ~0xf;
1263 }
1264
1265 static inline unsigned long brk_rnd(void)
1266 {
1267         unsigned long rnd = 0;
1268
1269         /* 8MB for 32bit, 1GB for 64bit */
1270         if (is_32bit_task())
1271                 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1272         else
1273                 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1274
1275         return rnd << PAGE_SHIFT;
1276 }
1277
1278 unsigned long arch_randomize_brk(struct mm_struct *mm)
1279 {
1280         unsigned long base = mm->brk;
1281         unsigned long ret;
1282
1283 #ifdef CONFIG_PPC_STD_MMU_64
1284         /*
1285          * If we are using 1TB segments and we are allowed to randomise
1286          * the heap, we can put it above 1TB so it is backed by a 1TB
1287          * segment. Otherwise the heap will be in the bottom 1TB
1288          * which always uses 256MB segments and this may result in a
1289          * performance penalty.
1290          */
1291         if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1292                 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1293 #endif
1294
1295         ret = PAGE_ALIGN(base + brk_rnd());
1296
1297         if (ret < mm->brk)
1298                 return mm->brk;
1299
1300         return ret;
1301 }
1302
1303 unsigned long randomize_et_dyn(unsigned long base)
1304 {
1305         unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1306
1307         if (ret < base)
1308                 return base;
1309
1310         return ret;
1311 }