cgroup: superblock can't be released with active dentries
[firefly-linux-kernel-4.4.55.git] / arch / x86 / xen / smp.c
1 /*
2  * Xen SMP support
3  *
4  * This file implements the Xen versions of smp_ops.  SMP under Xen is
5  * very straightforward.  Bringing a CPU up is simply a matter of
6  * loading its initial context and setting it running.
7  *
8  * IPIs are handled through the Xen event mechanism.
9  *
10  * Because virtual CPUs can be scheduled onto any real CPU, there's no
11  * useful topology information for the kernel to make use of.  As a
12  * result, all CPUs are treated as if they're single-core and
13  * single-threaded.
14  */
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19
20 #include <asm/paravirt.h>
21 #include <asm/desc.h>
22 #include <asm/pgtable.h>
23 #include <asm/cpu.h>
24
25 #include <xen/interface/xen.h>
26 #include <xen/interface/vcpu.h>
27
28 #include <asm/xen/interface.h>
29 #include <asm/xen/hypercall.h>
30
31 #include <xen/xen.h>
32 #include <xen/page.h>
33 #include <xen/events.h>
34
35 #include <xen/hvc-console.h>
36 #include "xen-ops.h"
37 #include "mmu.h"
38
39 cpumask_var_t xen_cpu_initialized_map;
40
41 static DEFINE_PER_CPU(int, xen_resched_irq);
42 static DEFINE_PER_CPU(int, xen_callfunc_irq);
43 static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
44 static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
45
46 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
47 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
48
49 /*
50  * Reschedule call back.
51  */
52 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
53 {
54         inc_irq_stat(irq_resched_count);
55         scheduler_ipi();
56
57         return IRQ_HANDLED;
58 }
59
60 static void __cpuinit cpu_bringup(void)
61 {
62         int cpu;
63
64         cpu_init();
65         touch_softlockup_watchdog();
66         preempt_disable();
67
68         xen_enable_sysenter();
69         xen_enable_syscall();
70
71         cpu = smp_processor_id();
72         smp_store_cpu_info(cpu);
73         cpu_data(cpu).x86_max_cores = 1;
74         set_cpu_sibling_map(cpu);
75
76         xen_setup_cpu_clockevents();
77
78         notify_cpu_starting(cpu);
79
80         ipi_call_lock();
81         set_cpu_online(cpu, true);
82         ipi_call_unlock();
83
84         this_cpu_write(cpu_state, CPU_ONLINE);
85
86         wmb();
87
88         /* We can take interrupts now: we're officially "up". */
89         local_irq_enable();
90
91         wmb();                  /* make sure everything is out */
92 }
93
94 static void __cpuinit cpu_bringup_and_idle(void)
95 {
96         cpu_bringup();
97         cpu_idle();
98 }
99
100 static int xen_smp_intr_init(unsigned int cpu)
101 {
102         int rc;
103         const char *resched_name, *callfunc_name, *debug_name;
104
105         resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
106         rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
107                                     cpu,
108                                     xen_reschedule_interrupt,
109                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
110                                     resched_name,
111                                     NULL);
112         if (rc < 0)
113                 goto fail;
114         per_cpu(xen_resched_irq, cpu) = rc;
115
116         callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
117         rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
118                                     cpu,
119                                     xen_call_function_interrupt,
120                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
121                                     callfunc_name,
122                                     NULL);
123         if (rc < 0)
124                 goto fail;
125         per_cpu(xen_callfunc_irq, cpu) = rc;
126
127         debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
128         rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
129                                      IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
130                                      debug_name, NULL);
131         if (rc < 0)
132                 goto fail;
133         per_cpu(xen_debug_irq, cpu) = rc;
134
135         callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
136         rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
137                                     cpu,
138                                     xen_call_function_single_interrupt,
139                                     IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
140                                     callfunc_name,
141                                     NULL);
142         if (rc < 0)
143                 goto fail;
144         per_cpu(xen_callfuncsingle_irq, cpu) = rc;
145
146         return 0;
147
148  fail:
149         if (per_cpu(xen_resched_irq, cpu) >= 0)
150                 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
151         if (per_cpu(xen_callfunc_irq, cpu) >= 0)
152                 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
153         if (per_cpu(xen_debug_irq, cpu) >= 0)
154                 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
155         if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
156                 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
157                                        NULL);
158
159         return rc;
160 }
161
162 static void __init xen_fill_possible_map(void)
163 {
164         int i, rc;
165
166         if (xen_initial_domain())
167                 return;
168
169         for (i = 0; i < nr_cpu_ids; i++) {
170                 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
171                 if (rc >= 0) {
172                         num_processors++;
173                         set_cpu_possible(i, true);
174                 }
175         }
176 }
177
178 static void __init xen_filter_cpu_maps(void)
179 {
180         int i, rc;
181         unsigned int subtract = 0;
182
183         if (!xen_initial_domain())
184                 return;
185
186         num_processors = 0;
187         disabled_cpus = 0;
188         for (i = 0; i < nr_cpu_ids; i++) {
189                 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
190                 if (rc >= 0) {
191                         num_processors++;
192                         set_cpu_possible(i, true);
193                 } else {
194                         set_cpu_possible(i, false);
195                         set_cpu_present(i, false);
196                         subtract++;
197                 }
198         }
199 #ifdef CONFIG_HOTPLUG_CPU
200         /* This is akin to using 'nr_cpus' on the Linux command line.
201          * Which is OK as when we use 'dom0_max_vcpus=X' we can only
202          * have up to X, while nr_cpu_ids is greater than X. This
203          * normally is not a problem, except when CPU hotplugging
204          * is involved and then there might be more than X CPUs
205          * in the guest - which will not work as there is no
206          * hypercall to expand the max number of VCPUs an already
207          * running guest has. So cap it up to X. */
208         if (subtract)
209                 nr_cpu_ids = nr_cpu_ids - subtract;
210 #endif
211
212 }
213
214 static void __init xen_smp_prepare_boot_cpu(void)
215 {
216         BUG_ON(smp_processor_id() != 0);
217         native_smp_prepare_boot_cpu();
218
219         /* We've switched to the "real" per-cpu gdt, so make sure the
220            old memory can be recycled */
221         make_lowmem_page_readwrite(xen_initial_gdt);
222
223         xen_filter_cpu_maps();
224         xen_setup_vcpu_info_placement();
225 }
226
227 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
228 {
229         unsigned cpu;
230         unsigned int i;
231
232         if (skip_ioapic_setup) {
233                 char *m = (max_cpus == 0) ?
234                         "The nosmp parameter is incompatible with Xen; " \
235                         "use Xen dom0_max_vcpus=1 parameter" :
236                         "The noapic parameter is incompatible with Xen";
237
238                 xen_raw_printk(m);
239                 panic(m);
240         }
241         xen_init_lock_cpu(0);
242
243         smp_store_cpu_info(0);
244         cpu_data(0).x86_max_cores = 1;
245
246         for_each_possible_cpu(i) {
247                 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
248                 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
249                 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
250         }
251         set_cpu_sibling_map(0);
252
253         if (xen_smp_intr_init(0))
254                 BUG();
255
256         if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
257                 panic("could not allocate xen_cpu_initialized_map\n");
258
259         cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
260
261         /* Restrict the possible_map according to max_cpus. */
262         while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
263                 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
264                         continue;
265                 set_cpu_possible(cpu, false);
266         }
267
268         for_each_possible_cpu(cpu)
269                 set_cpu_present(cpu, true);
270 }
271
272 static int __cpuinit
273 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
274 {
275         struct vcpu_guest_context *ctxt;
276         struct desc_struct *gdt;
277         unsigned long gdt_mfn;
278
279         if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
280                 return 0;
281
282         ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
283         if (ctxt == NULL)
284                 return -ENOMEM;
285
286         gdt = get_cpu_gdt_table(cpu);
287
288         ctxt->flags = VGCF_IN_KERNEL;
289         ctxt->user_regs.ds = __USER_DS;
290         ctxt->user_regs.es = __USER_DS;
291         ctxt->user_regs.ss = __KERNEL_DS;
292 #ifdef CONFIG_X86_32
293         ctxt->user_regs.fs = __KERNEL_PERCPU;
294         ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
295 #else
296         ctxt->gs_base_kernel = per_cpu_offset(cpu);
297 #endif
298         ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
299         ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
300
301         memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
302
303         xen_copy_trap_info(ctxt->trap_ctxt);
304
305         ctxt->ldt_ents = 0;
306
307         BUG_ON((unsigned long)gdt & ~PAGE_MASK);
308
309         gdt_mfn = arbitrary_virt_to_mfn(gdt);
310         make_lowmem_page_readonly(gdt);
311         make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
312
313         ctxt->gdt_frames[0] = gdt_mfn;
314         ctxt->gdt_ents      = GDT_ENTRIES;
315
316         ctxt->user_regs.cs = __KERNEL_CS;
317         ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
318
319         ctxt->kernel_ss = __KERNEL_DS;
320         ctxt->kernel_sp = idle->thread.sp0;
321
322 #ifdef CONFIG_X86_32
323         ctxt->event_callback_cs     = __KERNEL_CS;
324         ctxt->failsafe_callback_cs  = __KERNEL_CS;
325 #endif
326         ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
327         ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
328
329         per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
330         ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
331
332         if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
333                 BUG();
334
335         kfree(ctxt);
336         return 0;
337 }
338
339 static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
340 {
341         int rc;
342
343         per_cpu(current_task, cpu) = idle;
344 #ifdef CONFIG_X86_32
345         irq_ctx_init(cpu);
346 #else
347         clear_tsk_thread_flag(idle, TIF_FORK);
348         per_cpu(kernel_stack, cpu) =
349                 (unsigned long)task_stack_page(idle) -
350                 KERNEL_STACK_OFFSET + THREAD_SIZE;
351 #endif
352         xen_setup_runstate_info(cpu);
353         xen_setup_timer(cpu);
354         xen_init_lock_cpu(cpu);
355
356         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
357
358         /* make sure interrupts start blocked */
359         per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
360
361         rc = cpu_initialize_context(cpu, idle);
362         if (rc)
363                 return rc;
364
365         if (num_online_cpus() == 1)
366                 alternatives_smp_switch(1);
367
368         rc = xen_smp_intr_init(cpu);
369         if (rc)
370                 return rc;
371
372         rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
373         BUG_ON(rc);
374
375         while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
376                 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
377                 barrier();
378         }
379
380         return 0;
381 }
382
383 static void xen_smp_cpus_done(unsigned int max_cpus)
384 {
385 }
386
387 #ifdef CONFIG_HOTPLUG_CPU
388 static int xen_cpu_disable(void)
389 {
390         unsigned int cpu = smp_processor_id();
391         if (cpu == 0)
392                 return -EBUSY;
393
394         cpu_disable_common();
395
396         load_cr3(swapper_pg_dir);
397         return 0;
398 }
399
400 static void xen_cpu_die(unsigned int cpu)
401 {
402         while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
403                 current->state = TASK_UNINTERRUPTIBLE;
404                 schedule_timeout(HZ/10);
405         }
406         unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
407         unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
408         unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
409         unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
410         xen_uninit_lock_cpu(cpu);
411         xen_teardown_timer(cpu);
412
413         if (num_online_cpus() == 1)
414                 alternatives_smp_switch(0);
415 }
416
417 static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
418 {
419         play_dead_common();
420         HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
421         cpu_bringup();
422         /*
423          * Balance out the preempt calls - as we are running in cpu_idle
424          * loop which has been called at bootup from cpu_bringup_and_idle.
425          * The cpucpu_bringup_and_idle called cpu_bringup which made a
426          * preempt_disable() So this preempt_enable will balance it out.
427          */
428         preempt_enable();
429 }
430
431 #else /* !CONFIG_HOTPLUG_CPU */
432 static int xen_cpu_disable(void)
433 {
434         return -ENOSYS;
435 }
436
437 static void xen_cpu_die(unsigned int cpu)
438 {
439         BUG();
440 }
441
442 static void xen_play_dead(void)
443 {
444         BUG();
445 }
446
447 #endif
448 static void stop_self(void *v)
449 {
450         int cpu = smp_processor_id();
451
452         /* make sure we're not pinning something down */
453         load_cr3(swapper_pg_dir);
454         /* should set up a minimal gdt */
455
456         set_cpu_online(cpu, false);
457
458         HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
459         BUG();
460 }
461
462 static void xen_stop_other_cpus(int wait)
463 {
464         smp_call_function(stop_self, NULL, wait);
465 }
466
467 static void xen_smp_send_reschedule(int cpu)
468 {
469         xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
470 }
471
472 static void xen_send_IPI_mask(const struct cpumask *mask,
473                               enum ipi_vector vector)
474 {
475         unsigned cpu;
476
477         for_each_cpu_and(cpu, mask, cpu_online_mask)
478                 xen_send_IPI_one(cpu, vector);
479 }
480
481 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
482 {
483         int cpu;
484
485         xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
486
487         /* Make sure other vcpus get a chance to run if they need to. */
488         for_each_cpu(cpu, mask) {
489                 if (xen_vcpu_stolen(cpu)) {
490                         HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
491                         break;
492                 }
493         }
494 }
495
496 static void xen_smp_send_call_function_single_ipi(int cpu)
497 {
498         xen_send_IPI_mask(cpumask_of(cpu),
499                           XEN_CALL_FUNCTION_SINGLE_VECTOR);
500 }
501
502 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
503 {
504         irq_enter();
505         generic_smp_call_function_interrupt();
506         inc_irq_stat(irq_call_count);
507         irq_exit();
508
509         return IRQ_HANDLED;
510 }
511
512 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
513 {
514         irq_enter();
515         generic_smp_call_function_single_interrupt();
516         inc_irq_stat(irq_call_count);
517         irq_exit();
518
519         return IRQ_HANDLED;
520 }
521
522 static const struct smp_ops xen_smp_ops __initconst = {
523         .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
524         .smp_prepare_cpus = xen_smp_prepare_cpus,
525         .smp_cpus_done = xen_smp_cpus_done,
526
527         .cpu_up = xen_cpu_up,
528         .cpu_die = xen_cpu_die,
529         .cpu_disable = xen_cpu_disable,
530         .play_dead = xen_play_dead,
531
532         .stop_other_cpus = xen_stop_other_cpus,
533         .smp_send_reschedule = xen_smp_send_reschedule,
534
535         .send_call_func_ipi = xen_smp_send_call_function_ipi,
536         .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
537 };
538
539 void __init xen_smp_init(void)
540 {
541         smp_ops = xen_smp_ops;
542         xen_fill_possible_map();
543         xen_init_spinlocks();
544 }
545
546 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
547 {
548         native_smp_prepare_cpus(max_cpus);
549         WARN_ON(xen_smp_intr_init(0));
550
551         xen_init_lock_cpu(0);
552 }
553
554 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
555 {
556         int rc;
557         rc = native_cpu_up(cpu, tidle);
558         WARN_ON (xen_smp_intr_init(cpu));
559         return rc;
560 }
561
562 static void xen_hvm_cpu_die(unsigned int cpu)
563 {
564         unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
565         unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
566         unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
567         unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
568         native_cpu_die(cpu);
569 }
570
571 void __init xen_hvm_smp_init(void)
572 {
573         if (!xen_have_vector_callback)
574                 return;
575         smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
576         smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
577         smp_ops.cpu_up = xen_hvm_cpu_up;
578         smp_ops.cpu_die = xen_hvm_cpu_die;
579         smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
580         smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
581 }