Merge branch 'fixes' of git://github.com/hzhuang1/linux into fixes
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / kvm.c
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/kvm_para.h>
26 #include <linux/cpu.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/hardirq.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/hash.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/kprobes.h>
36 #include <asm/timer.h>
37 #include <asm/cpu.h>
38 #include <asm/traps.h>
39 #include <asm/desc.h>
40 #include <asm/tlbflush.h>
41
42 static int kvmapf = 1;
43
44 static int parse_no_kvmapf(char *arg)
45 {
46         kvmapf = 0;
47         return 0;
48 }
49
50 early_param("no-kvmapf", parse_no_kvmapf);
51
52 static int steal_acc = 1;
53 static int parse_no_stealacc(char *arg)
54 {
55         steal_acc = 0;
56         return 0;
57 }
58
59 early_param("no-steal-acc", parse_no_stealacc);
60
61 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
62 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
63 static int has_steal_clock = 0;
64
65 /*
66  * No need for any "IO delay" on KVM
67  */
68 static void kvm_io_delay(void)
69 {
70 }
71
72 #define KVM_TASK_SLEEP_HASHBITS 8
73 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
74
75 struct kvm_task_sleep_node {
76         struct hlist_node link;
77         wait_queue_head_t wq;
78         u32 token;
79         int cpu;
80         bool halted;
81         struct mm_struct *mm;
82 };
83
84 static struct kvm_task_sleep_head {
85         spinlock_t lock;
86         struct hlist_head list;
87 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
88
89 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
90                                                   u32 token)
91 {
92         struct hlist_node *p;
93
94         hlist_for_each(p, &b->list) {
95                 struct kvm_task_sleep_node *n =
96                         hlist_entry(p, typeof(*n), link);
97                 if (n->token == token)
98                         return n;
99         }
100
101         return NULL;
102 }
103
104 void kvm_async_pf_task_wait(u32 token)
105 {
106         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
107         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
108         struct kvm_task_sleep_node n, *e;
109         DEFINE_WAIT(wait);
110         int cpu, idle;
111
112         cpu = get_cpu();
113         idle = idle_cpu(cpu);
114         put_cpu();
115
116         spin_lock(&b->lock);
117         e = _find_apf_task(b, token);
118         if (e) {
119                 /* dummy entry exist -> wake up was delivered ahead of PF */
120                 hlist_del(&e->link);
121                 kfree(e);
122                 spin_unlock(&b->lock);
123                 return;
124         }
125
126         n.token = token;
127         n.cpu = smp_processor_id();
128         n.mm = current->active_mm;
129         n.halted = idle || preempt_count() > 1;
130         atomic_inc(&n.mm->mm_count);
131         init_waitqueue_head(&n.wq);
132         hlist_add_head(&n.link, &b->list);
133         spin_unlock(&b->lock);
134
135         for (;;) {
136                 if (!n.halted)
137                         prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
138                 if (hlist_unhashed(&n.link))
139                         break;
140
141                 if (!n.halted) {
142                         local_irq_enable();
143                         schedule();
144                         local_irq_disable();
145                 } else {
146                         /*
147                          * We cannot reschedule. So halt.
148                          */
149                         native_safe_halt();
150                         local_irq_disable();
151                 }
152         }
153         if (!n.halted)
154                 finish_wait(&n.wq, &wait);
155
156         return;
157 }
158 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
159
160 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
161 {
162         hlist_del_init(&n->link);
163         if (!n->mm)
164                 return;
165         mmdrop(n->mm);
166         if (n->halted)
167                 smp_send_reschedule(n->cpu);
168         else if (waitqueue_active(&n->wq))
169                 wake_up(&n->wq);
170 }
171
172 static void apf_task_wake_all(void)
173 {
174         int i;
175
176         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
177                 struct hlist_node *p, *next;
178                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
179                 spin_lock(&b->lock);
180                 hlist_for_each_safe(p, next, &b->list) {
181                         struct kvm_task_sleep_node *n =
182                                 hlist_entry(p, typeof(*n), link);
183                         if (n->cpu == smp_processor_id())
184                                 apf_task_wake_one(n);
185                 }
186                 spin_unlock(&b->lock);
187         }
188 }
189
190 void kvm_async_pf_task_wake(u32 token)
191 {
192         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
193         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
194         struct kvm_task_sleep_node *n;
195
196         if (token == ~0) {
197                 apf_task_wake_all();
198                 return;
199         }
200
201 again:
202         spin_lock(&b->lock);
203         n = _find_apf_task(b, token);
204         if (!n) {
205                 /*
206                  * async PF was not yet handled.
207                  * Add dummy entry for the token.
208                  */
209                 n = kmalloc(sizeof(*n), GFP_ATOMIC);
210                 if (!n) {
211                         /*
212                          * Allocation failed! Busy wait while other cpu
213                          * handles async PF.
214                          */
215                         spin_unlock(&b->lock);
216                         cpu_relax();
217                         goto again;
218                 }
219                 n->token = token;
220                 n->cpu = smp_processor_id();
221                 n->mm = NULL;
222                 init_waitqueue_head(&n->wq);
223                 hlist_add_head(&n->link, &b->list);
224         } else
225                 apf_task_wake_one(n);
226         spin_unlock(&b->lock);
227         return;
228 }
229 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
230
231 u32 kvm_read_and_reset_pf_reason(void)
232 {
233         u32 reason = 0;
234
235         if (__get_cpu_var(apf_reason).enabled) {
236                 reason = __get_cpu_var(apf_reason).reason;
237                 __get_cpu_var(apf_reason).reason = 0;
238         }
239
240         return reason;
241 }
242 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
243
244 dotraplinkage void __kprobes
245 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
246 {
247         switch (kvm_read_and_reset_pf_reason()) {
248         default:
249                 do_page_fault(regs, error_code);
250                 break;
251         case KVM_PV_REASON_PAGE_NOT_PRESENT:
252                 /* page is swapped out by the host. */
253                 kvm_async_pf_task_wait((u32)read_cr2());
254                 break;
255         case KVM_PV_REASON_PAGE_READY:
256                 kvm_async_pf_task_wake((u32)read_cr2());
257                 break;
258         }
259 }
260
261 static void __init paravirt_ops_setup(void)
262 {
263         pv_info.name = "KVM";
264         pv_info.paravirt_enabled = 1;
265
266         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
267                 pv_cpu_ops.io_delay = kvm_io_delay;
268
269 #ifdef CONFIG_X86_IO_APIC
270         no_timer_check = 1;
271 #endif
272 }
273
274 static void kvm_register_steal_time(void)
275 {
276         int cpu = smp_processor_id();
277         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
278
279         if (!has_steal_clock)
280                 return;
281
282         memset(st, 0, sizeof(*st));
283
284         wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
285         printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
286                 cpu, __pa(st));
287 }
288
289 void __cpuinit kvm_guest_cpu_init(void)
290 {
291         if (!kvm_para_available())
292                 return;
293
294         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
295                 u64 pa = __pa(&__get_cpu_var(apf_reason));
296
297 #ifdef CONFIG_PREEMPT
298                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
299 #endif
300                 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
301                 __get_cpu_var(apf_reason).enabled = 1;
302                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
303                        smp_processor_id());
304         }
305
306         if (has_steal_clock)
307                 kvm_register_steal_time();
308 }
309
310 static void kvm_pv_disable_apf(void *unused)
311 {
312         if (!__get_cpu_var(apf_reason).enabled)
313                 return;
314
315         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
316         __get_cpu_var(apf_reason).enabled = 0;
317
318         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
319                smp_processor_id());
320 }
321
322 static int kvm_pv_reboot_notify(struct notifier_block *nb,
323                                 unsigned long code, void *unused)
324 {
325         if (code == SYS_RESTART)
326                 on_each_cpu(kvm_pv_disable_apf, NULL, 1);
327         return NOTIFY_DONE;
328 }
329
330 static struct notifier_block kvm_pv_reboot_nb = {
331         .notifier_call = kvm_pv_reboot_notify,
332 };
333
334 static u64 kvm_steal_clock(int cpu)
335 {
336         u64 steal;
337         struct kvm_steal_time *src;
338         int version;
339
340         src = &per_cpu(steal_time, cpu);
341         do {
342                 version = src->version;
343                 rmb();
344                 steal = src->steal;
345                 rmb();
346         } while ((version & 1) || (version != src->version));
347
348         return steal;
349 }
350
351 void kvm_disable_steal_time(void)
352 {
353         if (!has_steal_clock)
354                 return;
355
356         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
357 }
358
359 #ifdef CONFIG_SMP
360 static void __init kvm_smp_prepare_boot_cpu(void)
361 {
362 #ifdef CONFIG_KVM_CLOCK
363         WARN_ON(kvm_register_clock("primary cpu clock"));
364 #endif
365         kvm_guest_cpu_init();
366         native_smp_prepare_boot_cpu();
367 }
368
369 static void __cpuinit kvm_guest_cpu_online(void *dummy)
370 {
371         kvm_guest_cpu_init();
372 }
373
374 static void kvm_guest_cpu_offline(void *dummy)
375 {
376         kvm_disable_steal_time();
377         kvm_pv_disable_apf(NULL);
378         apf_task_wake_all();
379 }
380
381 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
382                                     unsigned long action, void *hcpu)
383 {
384         int cpu = (unsigned long)hcpu;
385         switch (action) {
386         case CPU_ONLINE:
387         case CPU_DOWN_FAILED:
388         case CPU_ONLINE_FROZEN:
389                 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
390                 break;
391         case CPU_DOWN_PREPARE:
392         case CPU_DOWN_PREPARE_FROZEN:
393                 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
394                 break;
395         default:
396                 break;
397         }
398         return NOTIFY_OK;
399 }
400
401 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
402         .notifier_call  = kvm_cpu_notify,
403 };
404 #endif
405
406 static void __init kvm_apf_trap_init(void)
407 {
408         set_intr_gate(14, &async_page_fault);
409 }
410
411 void __init kvm_guest_init(void)
412 {
413         int i;
414
415         if (!kvm_para_available())
416                 return;
417
418         paravirt_ops_setup();
419         register_reboot_notifier(&kvm_pv_reboot_nb);
420         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
421                 spin_lock_init(&async_pf_sleepers[i].lock);
422         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
423                 x86_init.irqs.trap_init = kvm_apf_trap_init;
424
425         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
426                 has_steal_clock = 1;
427                 pv_time_ops.steal_clock = kvm_steal_clock;
428         }
429
430 #ifdef CONFIG_SMP
431         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
432         register_cpu_notifier(&kvm_cpu_notifier);
433 #else
434         kvm_guest_cpu_init();
435 #endif
436 }
437
438 static __init int activate_jump_labels(void)
439 {
440         if (has_steal_clock) {
441                 static_key_slow_inc(&paravirt_steal_enabled);
442                 if (steal_acc)
443                         static_key_slow_inc(&paravirt_steal_rq_enabled);
444         }
445
446         return 0;
447 }
448 arch_initcall(activate_jump_labels);