x86, hyperv: Mark the Hyper-V clocksource as being continuous
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / kvm.c
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22
23 #include <linux/context_tracking.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <asm/timer.h>
38 #include <asm/cpu.h>
39 #include <asm/traps.h>
40 #include <asm/desc.h>
41 #include <asm/tlbflush.h>
42 #include <asm/idle.h>
43 #include <asm/apic.h>
44 #include <asm/apicdef.h>
45 #include <asm/hypervisor.h>
46 #include <asm/kvm_guest.h>
47
48 static int kvmapf = 1;
49
50 static int parse_no_kvmapf(char *arg)
51 {
52         kvmapf = 0;
53         return 0;
54 }
55
56 early_param("no-kvmapf", parse_no_kvmapf);
57
58 static int steal_acc = 1;
59 static int parse_no_stealacc(char *arg)
60 {
61         steal_acc = 0;
62         return 0;
63 }
64
65 early_param("no-steal-acc", parse_no_stealacc);
66
67 static int kvmclock_vsyscall = 1;
68 static int parse_no_kvmclock_vsyscall(char *arg)
69 {
70         kvmclock_vsyscall = 0;
71         return 0;
72 }
73
74 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
75
76 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
77 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
78 static int has_steal_clock = 0;
79
80 /*
81  * No need for any "IO delay" on KVM
82  */
83 static void kvm_io_delay(void)
84 {
85 }
86
87 #define KVM_TASK_SLEEP_HASHBITS 8
88 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
89
90 struct kvm_task_sleep_node {
91         struct hlist_node link;
92         wait_queue_head_t wq;
93         u32 token;
94         int cpu;
95         bool halted;
96 };
97
98 static struct kvm_task_sleep_head {
99         spinlock_t lock;
100         struct hlist_head list;
101 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
102
103 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
104                                                   u32 token)
105 {
106         struct hlist_node *p;
107
108         hlist_for_each(p, &b->list) {
109                 struct kvm_task_sleep_node *n =
110                         hlist_entry(p, typeof(*n), link);
111                 if (n->token == token)
112                         return n;
113         }
114
115         return NULL;
116 }
117
118 void kvm_async_pf_task_wait(u32 token)
119 {
120         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
121         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
122         struct kvm_task_sleep_node n, *e;
123         DEFINE_WAIT(wait);
124
125         rcu_irq_enter();
126
127         spin_lock(&b->lock);
128         e = _find_apf_task(b, token);
129         if (e) {
130                 /* dummy entry exist -> wake up was delivered ahead of PF */
131                 hlist_del(&e->link);
132                 kfree(e);
133                 spin_unlock(&b->lock);
134
135                 rcu_irq_exit();
136                 return;
137         }
138
139         n.token = token;
140         n.cpu = smp_processor_id();
141         n.halted = is_idle_task(current) || preempt_count() > 1;
142         init_waitqueue_head(&n.wq);
143         hlist_add_head(&n.link, &b->list);
144         spin_unlock(&b->lock);
145
146         for (;;) {
147                 if (!n.halted)
148                         prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
149                 if (hlist_unhashed(&n.link))
150                         break;
151
152                 if (!n.halted) {
153                         local_irq_enable();
154                         schedule();
155                         local_irq_disable();
156                 } else {
157                         /*
158                          * We cannot reschedule. So halt.
159                          */
160                         rcu_irq_exit();
161                         native_safe_halt();
162                         rcu_irq_enter();
163                         local_irq_disable();
164                 }
165         }
166         if (!n.halted)
167                 finish_wait(&n.wq, &wait);
168
169         rcu_irq_exit();
170         return;
171 }
172 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
173
174 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
175 {
176         hlist_del_init(&n->link);
177         if (n->halted)
178                 smp_send_reschedule(n->cpu);
179         else if (waitqueue_active(&n->wq))
180                 wake_up(&n->wq);
181 }
182
183 static void apf_task_wake_all(void)
184 {
185         int i;
186
187         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
188                 struct hlist_node *p, *next;
189                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
190                 spin_lock(&b->lock);
191                 hlist_for_each_safe(p, next, &b->list) {
192                         struct kvm_task_sleep_node *n =
193                                 hlist_entry(p, typeof(*n), link);
194                         if (n->cpu == smp_processor_id())
195                                 apf_task_wake_one(n);
196                 }
197                 spin_unlock(&b->lock);
198         }
199 }
200
201 void kvm_async_pf_task_wake(u32 token)
202 {
203         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
204         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
205         struct kvm_task_sleep_node *n;
206
207         if (token == ~0) {
208                 apf_task_wake_all();
209                 return;
210         }
211
212 again:
213         spin_lock(&b->lock);
214         n = _find_apf_task(b, token);
215         if (!n) {
216                 /*
217                  * async PF was not yet handled.
218                  * Add dummy entry for the token.
219                  */
220                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
221                 if (!n) {
222                         /*
223                          * Allocation failed! Busy wait while other cpu
224                          * handles async PF.
225                          */
226                         spin_unlock(&b->lock);
227                         cpu_relax();
228                         goto again;
229                 }
230                 n->token = token;
231                 n->cpu = smp_processor_id();
232                 init_waitqueue_head(&n->wq);
233                 hlist_add_head(&n->link, &b->list);
234         } else
235                 apf_task_wake_one(n);
236         spin_unlock(&b->lock);
237         return;
238 }
239 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
240
241 u32 kvm_read_and_reset_pf_reason(void)
242 {
243         u32 reason = 0;
244
245         if (__get_cpu_var(apf_reason).enabled) {
246                 reason = __get_cpu_var(apf_reason).reason;
247                 __get_cpu_var(apf_reason).reason = 0;
248         }
249
250         return reason;
251 }
252 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
253
254 dotraplinkage void __kprobes
255 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
256 {
257         enum ctx_state prev_state;
258
259         switch (kvm_read_and_reset_pf_reason()) {
260         default:
261                 do_page_fault(regs, error_code);
262                 break;
263         case KVM_PV_REASON_PAGE_NOT_PRESENT:
264                 /* page is swapped out by the host. */
265                 prev_state = exception_enter();
266                 exit_idle();
267                 kvm_async_pf_task_wait((u32)read_cr2());
268                 exception_exit(prev_state);
269                 break;
270         case KVM_PV_REASON_PAGE_READY:
271                 rcu_irq_enter();
272                 exit_idle();
273                 kvm_async_pf_task_wake((u32)read_cr2());
274                 rcu_irq_exit();
275                 break;
276         }
277 }
278
279 static void __init paravirt_ops_setup(void)
280 {
281         pv_info.name = "KVM";
282
283         /*
284          * KVM isn't paravirt in the sense of paravirt_enabled.  A KVM
285          * guest kernel works like a bare metal kernel with additional
286          * features, and paravirt_enabled is about features that are
287          * missing.
288          */
289         pv_info.paravirt_enabled = 0;
290
291         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
292                 pv_cpu_ops.io_delay = kvm_io_delay;
293
294 #ifdef CONFIG_X86_IO_APIC
295         no_timer_check = 1;
296 #endif
297 }
298
299 static void kvm_register_steal_time(void)
300 {
301         int cpu = smp_processor_id();
302         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
303
304         if (!has_steal_clock)
305                 return;
306
307         memset(st, 0, sizeof(*st));
308
309         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
310         pr_info("kvm-stealtime: cpu %d, msr %llx\n",
311                 cpu, (unsigned long long) slow_virt_to_phys(st));
312 }
313
314 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
315
316 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
317 {
318         /**
319          * This relies on __test_and_clear_bit to modify the memory
320          * in a way that is atomic with respect to the local CPU.
321          * The hypervisor only accesses this memory from the local CPU so
322          * there's no need for lock or memory barriers.
323          * An optimization barrier is implied in apic write.
324          */
325         if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
326                 return;
327         apic_write(APIC_EOI, APIC_EOI_ACK);
328 }
329
330 void __cpuinit kvm_guest_cpu_init(void)
331 {
332         if (!kvm_para_available())
333                 return;
334
335         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
336                 u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
337
338 #ifdef CONFIG_PREEMPT
339                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
340 #endif
341                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
342                 __get_cpu_var(apf_reason).enabled = 1;
343                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
344                        smp_processor_id());
345         }
346
347         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
348                 unsigned long pa;
349                 /* Size alignment is implied but just to make it explicit. */
350                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
351                 __get_cpu_var(kvm_apic_eoi) = 0;
352                 pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
353                         | KVM_MSR_ENABLED;
354                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
355         }
356
357         if (has_steal_clock)
358                 kvm_register_steal_time();
359 }
360
361 static void kvm_pv_disable_apf(void)
362 {
363         if (!__get_cpu_var(apf_reason).enabled)
364                 return;
365
366         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
367         __get_cpu_var(apf_reason).enabled = 0;
368
369         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
370                smp_processor_id());
371 }
372
373 static void kvm_pv_guest_cpu_reboot(void *unused)
374 {
375         /*
376          * We disable PV EOI before we load a new kernel by kexec,
377          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
378          * New kernel can re-enable when it boots.
379          */
380         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
381                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
382         kvm_pv_disable_apf();
383         kvm_disable_steal_time();
384 }
385
386 static int kvm_pv_reboot_notify(struct notifier_block *nb,
387                                 unsigned long code, void *unused)
388 {
389         if (code == SYS_RESTART)
390                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
391         return NOTIFY_DONE;
392 }
393
394 static struct notifier_block kvm_pv_reboot_nb = {
395         .notifier_call = kvm_pv_reboot_notify,
396 };
397
398 static u64 kvm_steal_clock(int cpu)
399 {
400         u64 steal;
401         struct kvm_steal_time *src;
402         int version;
403
404         src = &per_cpu(steal_time, cpu);
405         do {
406                 version = src->version;
407                 rmb();
408                 steal = src->steal;
409                 rmb();
410         } while ((version & 1) || (version != src->version));
411
412         return steal;
413 }
414
415 void kvm_disable_steal_time(void)
416 {
417         if (!has_steal_clock)
418                 return;
419
420         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
421 }
422
423 #ifdef CONFIG_SMP
424 static void __init kvm_smp_prepare_boot_cpu(void)
425 {
426         WARN_ON(kvm_register_clock("primary cpu clock"));
427         kvm_guest_cpu_init();
428         native_smp_prepare_boot_cpu();
429 }
430
431 static void __cpuinit kvm_guest_cpu_online(void *dummy)
432 {
433         kvm_guest_cpu_init();
434 }
435
436 static void kvm_guest_cpu_offline(void *dummy)
437 {
438         kvm_disable_steal_time();
439         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
440                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
441         kvm_pv_disable_apf();
442         apf_task_wake_all();
443 }
444
445 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
446                                     unsigned long action, void *hcpu)
447 {
448         int cpu = (unsigned long)hcpu;
449         switch (action) {
450         case CPU_ONLINE:
451         case CPU_DOWN_FAILED:
452         case CPU_ONLINE_FROZEN:
453                 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
454                 break;
455         case CPU_DOWN_PREPARE:
456         case CPU_DOWN_PREPARE_FROZEN:
457                 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
458                 break;
459         default:
460                 break;
461         }
462         return NOTIFY_OK;
463 }
464
465 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
466         .notifier_call  = kvm_cpu_notify,
467 };
468 #endif
469
470 static void __init kvm_apf_trap_init(void)
471 {
472         set_intr_gate(14, &async_page_fault);
473 }
474
475 void __init kvm_guest_init(void)
476 {
477         int i;
478
479         if (!kvm_para_available())
480                 return;
481
482         paravirt_ops_setup();
483         register_reboot_notifier(&kvm_pv_reboot_nb);
484         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
485                 spin_lock_init(&async_pf_sleepers[i].lock);
486         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
487                 x86_init.irqs.trap_init = kvm_apf_trap_init;
488
489         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
490                 has_steal_clock = 1;
491                 pv_time_ops.steal_clock = kvm_steal_clock;
492         }
493
494         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
495                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
496
497         if (kvmclock_vsyscall)
498                 kvm_setup_vsyscall_timeinfo();
499
500 #ifdef CONFIG_SMP
501         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
502         register_cpu_notifier(&kvm_cpu_notifier);
503 #else
504         kvm_guest_cpu_init();
505 #endif
506 }
507
508 static bool __init kvm_detect(void)
509 {
510         if (!kvm_para_available())
511                 return false;
512         return true;
513 }
514
515 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
516         .name                   = "KVM",
517         .detect                 = kvm_detect,
518         .x2apic_available       = kvm_para_available,
519 };
520 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
521
522 static __init int activate_jump_labels(void)
523 {
524         if (has_steal_clock) {
525                 static_key_slow_inc(&paravirt_steal_enabled);
526                 if (steal_acc)
527                         static_key_slow_inc(&paravirt_steal_rq_enabled);
528         }
529
530         return 0;
531 }
532 arch_initcall(activate_jump_labels);