fc6c84db0601bf87e9684e9bdf59279093d37dde
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / process.c
1 #include <linux/errno.h>
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
9 #include <linux/pm.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <trace/events/power.h>
13 #include <asm/system.h>
14 #include <asm/apic.h>
15 #include <asm/syscalls.h>
16 #include <asm/idle.h>
17 #include <asm/uaccess.h>
18 #include <asm/i387.h>
19 #include <asm/ds.h>
20
21 unsigned long idle_halt;
22 EXPORT_SYMBOL(idle_halt);
23 unsigned long idle_nomwait;
24 EXPORT_SYMBOL(idle_nomwait);
25
26 struct kmem_cache *task_xstate_cachep;
27
28 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
29 {
30         *dst = *src;
31         if (src->thread.xstate) {
32                 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
33                                                       GFP_KERNEL);
34                 if (!dst->thread.xstate)
35                         return -ENOMEM;
36                 WARN_ON((unsigned long)dst->thread.xstate & 15);
37                 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
38         }
39         return 0;
40 }
41
42 void free_thread_xstate(struct task_struct *tsk)
43 {
44         if (tsk->thread.xstate) {
45                 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
46                 tsk->thread.xstate = NULL;
47         }
48
49         WARN(tsk->thread.ds_ctx, "leaking DS context\n");
50 }
51
52 void free_thread_info(struct thread_info *ti)
53 {
54         free_thread_xstate(ti->task);
55         free_pages((unsigned long)ti, get_order(THREAD_SIZE));
56 }
57
58 void arch_task_cache_init(void)
59 {
60         task_xstate_cachep =
61                 kmem_cache_create("task_xstate", xstate_size,
62                                   __alignof__(union thread_xstate),
63                                   SLAB_PANIC | SLAB_NOTRACK, NULL);
64 }
65
66 /*
67  * Free current thread data structures etc..
68  */
69 void exit_thread(void)
70 {
71         struct task_struct *me = current;
72         struct thread_struct *t = &me->thread;
73         unsigned long *bp = t->io_bitmap_ptr;
74
75         if (bp) {
76                 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
77
78                 t->io_bitmap_ptr = NULL;
79                 clear_thread_flag(TIF_IO_BITMAP);
80                 /*
81                  * Careful, clear this in the TSS too:
82                  */
83                 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
84                 t->io_bitmap_max = 0;
85                 put_cpu();
86                 kfree(bp);
87         }
88 }
89
90 void flush_thread(void)
91 {
92         struct task_struct *tsk = current;
93
94         clear_tsk_thread_flag(tsk, TIF_DEBUG);
95
96         tsk->thread.debugreg0 = 0;
97         tsk->thread.debugreg1 = 0;
98         tsk->thread.debugreg2 = 0;
99         tsk->thread.debugreg3 = 0;
100         tsk->thread.debugreg6 = 0;
101         tsk->thread.debugreg7 = 0;
102         memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
103         /*
104          * Forget coprocessor state..
105          */
106         tsk->fpu_counter = 0;
107         clear_fpu(tsk);
108         clear_used_math();
109 }
110
111 static void hard_disable_TSC(void)
112 {
113         write_cr4(read_cr4() | X86_CR4_TSD);
114 }
115
116 void disable_TSC(void)
117 {
118         preempt_disable();
119         if (!test_and_set_thread_flag(TIF_NOTSC))
120                 /*
121                  * Must flip the CPU state synchronously with
122                  * TIF_NOTSC in the current running context.
123                  */
124                 hard_disable_TSC();
125         preempt_enable();
126 }
127
128 static void hard_enable_TSC(void)
129 {
130         write_cr4(read_cr4() & ~X86_CR4_TSD);
131 }
132
133 static void enable_TSC(void)
134 {
135         preempt_disable();
136         if (test_and_clear_thread_flag(TIF_NOTSC))
137                 /*
138                  * Must flip the CPU state synchronously with
139                  * TIF_NOTSC in the current running context.
140                  */
141                 hard_enable_TSC();
142         preempt_enable();
143 }
144
145 int get_tsc_mode(unsigned long adr)
146 {
147         unsigned int val;
148
149         if (test_thread_flag(TIF_NOTSC))
150                 val = PR_TSC_SIGSEGV;
151         else
152                 val = PR_TSC_ENABLE;
153
154         return put_user(val, (unsigned int __user *)adr);
155 }
156
157 int set_tsc_mode(unsigned int val)
158 {
159         if (val == PR_TSC_SIGSEGV)
160                 disable_TSC();
161         else if (val == PR_TSC_ENABLE)
162                 enable_TSC();
163         else
164                 return -EINVAL;
165
166         return 0;
167 }
168
169 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
170                       struct tss_struct *tss)
171 {
172         struct thread_struct *prev, *next;
173
174         prev = &prev_p->thread;
175         next = &next_p->thread;
176
177         if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
178             test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
179                 ds_switch_to(prev_p, next_p);
180         else if (next->debugctlmsr != prev->debugctlmsr)
181                 update_debugctlmsr(next->debugctlmsr);
182
183         if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
184                 set_debugreg(next->debugreg0, 0);
185                 set_debugreg(next->debugreg1, 1);
186                 set_debugreg(next->debugreg2, 2);
187                 set_debugreg(next->debugreg3, 3);
188                 /* no 4 and 5 */
189                 set_debugreg(next->debugreg6, 6);
190                 set_debugreg(next->debugreg7, 7);
191         }
192
193         if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
194             test_tsk_thread_flag(next_p, TIF_NOTSC)) {
195                 /* prev and next are different */
196                 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
197                         hard_disable_TSC();
198                 else
199                         hard_enable_TSC();
200         }
201
202         if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
203                 /*
204                  * Copy the relevant range of the IO bitmap.
205                  * Normally this is 128 bytes or less:
206                  */
207                 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
208                        max(prev->io_bitmap_max, next->io_bitmap_max));
209         } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
210                 /*
211                  * Clear any possible leftover bits:
212                  */
213                 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
214         }
215 }
216
217 int sys_fork(struct pt_regs *regs)
218 {
219         return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
220 }
221
222 /*
223  * This is trivial, and on the face of it looks like it
224  * could equally well be done in user mode.
225  *
226  * Not so, for quite unobvious reasons - register pressure.
227  * In user mode vfork() cannot have a stack frame, and if
228  * done by calling the "clone()" system call directly, you
229  * do not have enough call-clobbered registers to hold all
230  * the information you need.
231  */
232 int sys_vfork(struct pt_regs *regs)
233 {
234         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
235                        NULL, NULL);
236 }
237
238
239 /*
240  * Idle related variables and functions
241  */
242 unsigned long boot_option_idle_override = 0;
243 EXPORT_SYMBOL(boot_option_idle_override);
244
245 /*
246  * Powermanagement idle function, if any..
247  */
248 void (*pm_idle)(void);
249 EXPORT_SYMBOL(pm_idle);
250
251 #ifdef CONFIG_X86_32
252 /*
253  * This halt magic was a workaround for ancient floppy DMA
254  * wreckage. It should be safe to remove.
255  */
256 static int hlt_counter;
257 void disable_hlt(void)
258 {
259         hlt_counter++;
260 }
261 EXPORT_SYMBOL(disable_hlt);
262
263 void enable_hlt(void)
264 {
265         hlt_counter--;
266 }
267 EXPORT_SYMBOL(enable_hlt);
268
269 static inline int hlt_use_halt(void)
270 {
271         return (!hlt_counter && boot_cpu_data.hlt_works_ok);
272 }
273 #else
274 static inline int hlt_use_halt(void)
275 {
276         return 1;
277 }
278 #endif
279
280 /*
281  * We use this if we don't have any better
282  * idle routine..
283  */
284 void default_idle(void)
285 {
286         if (hlt_use_halt()) {
287                 trace_power_start(POWER_CSTATE, 1);
288                 current_thread_info()->status &= ~TS_POLLING;
289                 /*
290                  * TS_POLLING-cleared state must be visible before we
291                  * test NEED_RESCHED:
292                  */
293                 smp_mb();
294
295                 if (!need_resched())
296                         safe_halt();    /* enables interrupts racelessly */
297                 else
298                         local_irq_enable();
299                 current_thread_info()->status |= TS_POLLING;
300         } else {
301                 local_irq_enable();
302                 /* loop is done by the caller */
303                 cpu_relax();
304         }
305 }
306 #ifdef CONFIG_APM_MODULE
307 EXPORT_SYMBOL(default_idle);
308 #endif
309
310 void stop_this_cpu(void *dummy)
311 {
312         local_irq_disable();
313         /*
314          * Remove this CPU:
315          */
316         set_cpu_online(smp_processor_id(), false);
317         disable_local_APIC();
318
319         for (;;) {
320                 if (hlt_works(smp_processor_id()))
321                         halt();
322         }
323 }
324
325 static void do_nothing(void *unused)
326 {
327 }
328
329 /*
330  * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
331  * pm_idle and update to new pm_idle value. Required while changing pm_idle
332  * handler on SMP systems.
333  *
334  * Caller must have changed pm_idle to the new value before the call. Old
335  * pm_idle value will not be used by any CPU after the return of this function.
336  */
337 void cpu_idle_wait(void)
338 {
339         smp_mb();
340         /* kick all the CPUs so that they exit out of pm_idle */
341         smp_call_function(do_nothing, NULL, 1);
342 }
343 EXPORT_SYMBOL_GPL(cpu_idle_wait);
344
345 /*
346  * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
347  * which can obviate IPI to trigger checking of need_resched.
348  * We execute MONITOR against need_resched and enter optimized wait state
349  * through MWAIT. Whenever someone changes need_resched, we would be woken
350  * up from MWAIT (without an IPI).
351  *
352  * New with Core Duo processors, MWAIT can take some hints based on CPU
353  * capability.
354  */
355 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
356 {
357         trace_power_start(POWER_CSTATE, (ax>>4)+1);
358         if (!need_resched()) {
359                 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
360                         clflush((void *)&current_thread_info()->flags);
361
362                 __monitor((void *)&current_thread_info()->flags, 0, 0);
363                 smp_mb();
364                 if (!need_resched())
365                         __mwait(ax, cx);
366         }
367 }
368
369 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
370 static void mwait_idle(void)
371 {
372         if (!need_resched()) {
373                 trace_power_start(POWER_CSTATE, 1);
374                 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
375                         clflush((void *)&current_thread_info()->flags);
376
377                 __monitor((void *)&current_thread_info()->flags, 0, 0);
378                 smp_mb();
379                 if (!need_resched())
380                         __sti_mwait(0, 0);
381                 else
382                         local_irq_enable();
383         } else
384                 local_irq_enable();
385 }
386
387 /*
388  * On SMP it's slightly faster (but much more power-consuming!)
389  * to poll the ->work.need_resched flag instead of waiting for the
390  * cross-CPU IPI to arrive. Use this option with caution.
391  */
392 static void poll_idle(void)
393 {
394         trace_power_start(POWER_CSTATE, 0);
395         local_irq_enable();
396         while (!need_resched())
397                 cpu_relax();
398         trace_power_end(0);
399 }
400
401 /*
402  * mwait selection logic:
403  *
404  * It depends on the CPU. For AMD CPUs that support MWAIT this is
405  * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
406  * then depend on a clock divisor and current Pstate of the core. If
407  * all cores of a processor are in halt state (C1) the processor can
408  * enter the C1E (C1 enhanced) state. If mwait is used this will never
409  * happen.
410  *
411  * idle=mwait overrides this decision and forces the usage of mwait.
412  */
413 static int __cpuinitdata force_mwait;
414
415 #define MWAIT_INFO                      0x05
416 #define MWAIT_ECX_EXTENDED_INFO         0x01
417 #define MWAIT_EDX_C1                    0xf0
418
419 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
420 {
421         u32 eax, ebx, ecx, edx;
422
423         if (force_mwait)
424                 return 1;
425
426         if (c->cpuid_level < MWAIT_INFO)
427                 return 0;
428
429         cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
430         /* Check, whether EDX has extended info about MWAIT */
431         if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
432                 return 1;
433
434         /*
435          * edx enumeratios MONITOR/MWAIT extensions. Check, whether
436          * C1  supports MWAIT
437          */
438         return (edx & MWAIT_EDX_C1);
439 }
440
441 static cpumask_var_t c1e_mask;
442 static int c1e_detected;
443
444 void c1e_remove_cpu(int cpu)
445 {
446         if (c1e_mask != NULL)
447                 cpumask_clear_cpu(cpu, c1e_mask);
448 }
449
450 /*
451  * C1E aware idle routine. We check for C1E active in the interrupt
452  * pending message MSR. If we detect C1E, then we handle it the same
453  * way as C3 power states (local apic timer and TSC stop)
454  */
455 static void c1e_idle(void)
456 {
457         if (need_resched())
458                 return;
459
460         if (!c1e_detected) {
461                 u32 lo, hi;
462
463                 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
464                 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
465                         c1e_detected = 1;
466                         if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
467                                 mark_tsc_unstable("TSC halt in AMD C1E");
468                         printk(KERN_INFO "System has AMD C1E enabled\n");
469                         set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
470                 }
471         }
472
473         if (c1e_detected) {
474                 int cpu = smp_processor_id();
475
476                 if (!cpumask_test_cpu(cpu, c1e_mask)) {
477                         cpumask_set_cpu(cpu, c1e_mask);
478                         /*
479                          * Force broadcast so ACPI can not interfere.
480                          */
481                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
482                                            &cpu);
483                         printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
484                                cpu);
485                 }
486                 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
487
488                 default_idle();
489
490                 /*
491                  * The switch back from broadcast mode needs to be
492                  * called with interrupts disabled.
493                  */
494                  local_irq_disable();
495                  clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
496                  local_irq_enable();
497         } else
498                 default_idle();
499 }
500
501 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
502 {
503 #ifdef CONFIG_SMP
504         if (pm_idle == poll_idle && smp_num_siblings > 1) {
505                 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
506                         " performance may degrade.\n");
507         }
508 #endif
509         if (pm_idle)
510                 return;
511
512         if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
513                 /*
514                  * One CPU supports mwait => All CPUs supports mwait
515                  */
516                 printk(KERN_INFO "using mwait in idle threads.\n");
517                 pm_idle = mwait_idle;
518         } else if (cpu_has_amd_erratum(amd_erratum_400)) {
519                 /* E400: APIC timer interrupt does not wake up CPU from C1e */
520                 printk(KERN_INFO "using C1E aware idle routine\n");
521                 pm_idle = c1e_idle;
522         } else
523                 pm_idle = default_idle;
524 }
525
526 void __init init_c1e_mask(void)
527 {
528         /* If we're using c1e_idle, we need to allocate c1e_mask. */
529         if (pm_idle == c1e_idle)
530                 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
531 }
532
533 static int __init idle_setup(char *str)
534 {
535         if (!str)
536                 return -EINVAL;
537
538         if (!strcmp(str, "poll")) {
539                 printk("using polling idle threads.\n");
540                 pm_idle = poll_idle;
541         } else if (!strcmp(str, "mwait"))
542                 force_mwait = 1;
543         else if (!strcmp(str, "halt")) {
544                 /*
545                  * When the boot option of idle=halt is added, halt is
546                  * forced to be used for CPU idle. In such case CPU C2/C3
547                  * won't be used again.
548                  * To continue to load the CPU idle driver, don't touch
549                  * the boot_option_idle_override.
550                  */
551                 pm_idle = default_idle;
552                 idle_halt = 1;
553                 return 0;
554         } else if (!strcmp(str, "nomwait")) {
555                 /*
556                  * If the boot option of "idle=nomwait" is added,
557                  * it means that mwait will be disabled for CPU C2/C3
558                  * states. In such case it won't touch the variable
559                  * of boot_option_idle_override.
560                  */
561                 idle_nomwait = 1;
562                 return 0;
563         } else
564                 return -1;
565
566         boot_option_idle_override = 1;
567         return 0;
568 }
569 early_param("idle", idle_setup);
570
571 unsigned long arch_align_stack(unsigned long sp)
572 {
573         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
574                 sp -= get_random_int() % 8192;
575         return sp & ~0xf;
576 }
577
578 unsigned long arch_randomize_brk(struct mm_struct *mm)
579 {
580         unsigned long range_end = mm->brk + 0x02000000;
581         return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
582 }
583