99b270300ab1f059810a367d4df30367a7959fa3
[firefly-linux-kernel-4.4.55.git] / arch / arm / kvm / arm.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/cpu.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/fs.h>
27 #include <linux/mman.h>
28 #include <linux/sched.h>
29 #include <linux/kvm.h>
30 #include <trace/events/kvm.h>
31
32 #define CREATE_TRACE_POINTS
33 #include "trace.h"
34
35 #include <asm/uaccess.h>
36 #include <asm/ptrace.h>
37 #include <asm/mman.h>
38 #include <asm/tlbflush.h>
39 #include <asm/cacheflush.h>
40 #include <asm/virt.h>
41 #include <asm/kvm_arm.h>
42 #include <asm/kvm_asm.h>
43 #include <asm/kvm_mmu.h>
44 #include <asm/kvm_emulate.h>
45 #include <asm/kvm_coproc.h>
46 #include <asm/kvm_psci.h>
47 #include <asm/sections.h>
48
49 #ifdef REQUIRES_VIRT
50 __asm__(".arch_extension        virt");
51 #endif
52
53 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54 static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
55 static unsigned long hyp_default_vectors;
56
57 /* Per-CPU variable containing the currently running vcpu. */
58 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
59
60 /* The VMID used in the VTTBR */
61 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
62 static u32 kvm_next_vmid;
63 static unsigned int kvm_vmid_bits __read_mostly;
64 static DEFINE_SPINLOCK(kvm_vmid_lock);
65
66 static bool vgic_present;
67
68 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
69 {
70         BUG_ON(preemptible());
71         __this_cpu_write(kvm_arm_running_vcpu, vcpu);
72 }
73
74 /**
75  * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
76  * Must be called from non-preemptible context
77  */
78 struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
79 {
80         BUG_ON(preemptible());
81         return __this_cpu_read(kvm_arm_running_vcpu);
82 }
83
84 /**
85  * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
86  */
87 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
88 {
89         return &kvm_arm_running_vcpu;
90 }
91
92 int kvm_arch_hardware_enable(void)
93 {
94         return 0;
95 }
96
97 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
98 {
99         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
100 }
101
102 int kvm_arch_hardware_setup(void)
103 {
104         return 0;
105 }
106
107 void kvm_arch_check_processor_compat(void *rtn)
108 {
109         *(int *)rtn = 0;
110 }
111
112
113 /**
114  * kvm_arch_init_vm - initializes a VM data structure
115  * @kvm:        pointer to the KVM struct
116  */
117 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
118 {
119         int ret = 0;
120
121         if (type)
122                 return -EINVAL;
123
124         ret = kvm_alloc_stage2_pgd(kvm);
125         if (ret)
126                 goto out_fail_alloc;
127
128         ret = create_hyp_mappings(kvm, kvm + 1);
129         if (ret)
130                 goto out_free_stage2_pgd;
131
132         kvm_vgic_early_init(kvm);
133         kvm_timer_init(kvm);
134
135         /* Mark the initial VMID generation invalid */
136         kvm->arch.vmid_gen = 0;
137
138         /* The maximum number of VCPUs is limited by the host's GIC model */
139         kvm->arch.max_vcpus = vgic_present ?
140                                 kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
141
142         return ret;
143 out_free_stage2_pgd:
144         kvm_free_stage2_pgd(kvm);
145 out_fail_alloc:
146         return ret;
147 }
148
149 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
150 {
151         return VM_FAULT_SIGBUS;
152 }
153
154
155 /**
156  * kvm_arch_destroy_vm - destroy the VM data structure
157  * @kvm:        pointer to the KVM struct
158  */
159 void kvm_arch_destroy_vm(struct kvm *kvm)
160 {
161         int i;
162
163         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
164                 if (kvm->vcpus[i]) {
165                         kvm_arch_vcpu_free(kvm->vcpus[i]);
166                         kvm->vcpus[i] = NULL;
167                 }
168         }
169
170         kvm_vgic_destroy(kvm);
171 }
172
173 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
174 {
175         int r;
176         switch (ext) {
177         case KVM_CAP_IRQCHIP:
178                 r = vgic_present;
179                 break;
180         case KVM_CAP_IOEVENTFD:
181         case KVM_CAP_DEVICE_CTRL:
182         case KVM_CAP_USER_MEMORY:
183         case KVM_CAP_SYNC_MMU:
184         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
185         case KVM_CAP_ONE_REG:
186         case KVM_CAP_ARM_PSCI:
187         case KVM_CAP_ARM_PSCI_0_2:
188         case KVM_CAP_READONLY_MEM:
189         case KVM_CAP_MP_STATE:
190                 r = 1;
191                 break;
192         case KVM_CAP_COALESCED_MMIO:
193                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
194                 break;
195         case KVM_CAP_ARM_SET_DEVICE_ADDR:
196                 r = 1;
197                 break;
198         case KVM_CAP_NR_VCPUS:
199                 r = num_online_cpus();
200                 break;
201         case KVM_CAP_MAX_VCPUS:
202                 r = KVM_MAX_VCPUS;
203                 break;
204         default:
205                 r = kvm_arch_dev_ioctl_check_extension(ext);
206                 break;
207         }
208         return r;
209 }
210
211 long kvm_arch_dev_ioctl(struct file *filp,
212                         unsigned int ioctl, unsigned long arg)
213 {
214         return -EINVAL;
215 }
216
217
218 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
219 {
220         int err;
221         struct kvm_vcpu *vcpu;
222
223         if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
224                 err = -EBUSY;
225                 goto out;
226         }
227
228         if (id >= kvm->arch.max_vcpus) {
229                 err = -EINVAL;
230                 goto out;
231         }
232
233         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
234         if (!vcpu) {
235                 err = -ENOMEM;
236                 goto out;
237         }
238
239         err = kvm_vcpu_init(vcpu, kvm, id);
240         if (err)
241                 goto free_vcpu;
242
243         err = create_hyp_mappings(vcpu, vcpu + 1);
244         if (err)
245                 goto vcpu_uninit;
246
247         return vcpu;
248 vcpu_uninit:
249         kvm_vcpu_uninit(vcpu);
250 free_vcpu:
251         kmem_cache_free(kvm_vcpu_cache, vcpu);
252 out:
253         return ERR_PTR(err);
254 }
255
256 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
257 {
258         kvm_vgic_vcpu_early_init(vcpu);
259 }
260
261 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
262 {
263         kvm_mmu_free_memory_caches(vcpu);
264         kvm_timer_vcpu_terminate(vcpu);
265         kvm_vgic_vcpu_destroy(vcpu);
266         kmem_cache_free(kvm_vcpu_cache, vcpu);
267 }
268
269 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
270 {
271         kvm_arch_vcpu_free(vcpu);
272 }
273
274 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
275 {
276         return kvm_timer_should_fire(vcpu);
277 }
278
279 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
280 {
281         kvm_timer_schedule(vcpu);
282 }
283
284 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
285 {
286         kvm_timer_unschedule(vcpu);
287 }
288
289 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
290 {
291         /* Force users to call KVM_ARM_VCPU_INIT */
292         vcpu->arch.target = -1;
293         bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
294
295         /* Set up the timer */
296         kvm_timer_vcpu_init(vcpu);
297
298         kvm_arm_reset_debug_ptr(vcpu);
299
300         return 0;
301 }
302
303 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
304 {
305         vcpu->cpu = cpu;
306         vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
307
308         kvm_arm_set_running_vcpu(vcpu);
309 }
310
311 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
312 {
313         /*
314          * The arch-generic KVM code expects the cpu field of a vcpu to be -1
315          * if the vcpu is no longer assigned to a cpu.  This is used for the
316          * optimized make_all_cpus_request path.
317          */
318         vcpu->cpu = -1;
319
320         kvm_arm_set_running_vcpu(NULL);
321 }
322
323 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
324                                     struct kvm_mp_state *mp_state)
325 {
326         if (vcpu->arch.power_off)
327                 mp_state->mp_state = KVM_MP_STATE_STOPPED;
328         else
329                 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
330
331         return 0;
332 }
333
334 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
335                                     struct kvm_mp_state *mp_state)
336 {
337         switch (mp_state->mp_state) {
338         case KVM_MP_STATE_RUNNABLE:
339                 vcpu->arch.power_off = false;
340                 break;
341         case KVM_MP_STATE_STOPPED:
342                 vcpu->arch.power_off = true;
343                 break;
344         default:
345                 return -EINVAL;
346         }
347
348         return 0;
349 }
350
351 /**
352  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
353  * @v:          The VCPU pointer
354  *
355  * If the guest CPU is not waiting for interrupts or an interrupt line is
356  * asserted, the CPU is by definition runnable.
357  */
358 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
359 {
360         return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
361                 && !v->arch.power_off && !v->arch.pause);
362 }
363
364 /* Just ensure a guest exit from a particular CPU */
365 static void exit_vm_noop(void *info)
366 {
367 }
368
369 void force_vm_exit(const cpumask_t *mask)
370 {
371         smp_call_function_many(mask, exit_vm_noop, NULL, true);
372 }
373
374 /**
375  * need_new_vmid_gen - check that the VMID is still valid
376  * @kvm: The VM's VMID to checkt
377  *
378  * return true if there is a new generation of VMIDs being used
379  *
380  * The hardware supports only 256 values with the value zero reserved for the
381  * host, so we check if an assigned value belongs to a previous generation,
382  * which which requires us to assign a new value. If we're the first to use a
383  * VMID for the new generation, we must flush necessary caches and TLBs on all
384  * CPUs.
385  */
386 static bool need_new_vmid_gen(struct kvm *kvm)
387 {
388         return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
389 }
390
391 /**
392  * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
393  * @kvm The guest that we are about to run
394  *
395  * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
396  * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
397  * caches and TLBs.
398  */
399 static void update_vttbr(struct kvm *kvm)
400 {
401         phys_addr_t pgd_phys;
402         u64 vmid;
403
404         if (!need_new_vmid_gen(kvm))
405                 return;
406
407         spin_lock(&kvm_vmid_lock);
408
409         /*
410          * We need to re-check the vmid_gen here to ensure that if another vcpu
411          * already allocated a valid vmid for this vm, then this vcpu should
412          * use the same vmid.
413          */
414         if (!need_new_vmid_gen(kvm)) {
415                 spin_unlock(&kvm_vmid_lock);
416                 return;
417         }
418
419         /* First user of a new VMID generation? */
420         if (unlikely(kvm_next_vmid == 0)) {
421                 atomic64_inc(&kvm_vmid_gen);
422                 kvm_next_vmid = 1;
423
424                 /*
425                  * On SMP we know no other CPUs can use this CPU's or each
426                  * other's VMID after force_vm_exit returns since the
427                  * kvm_vmid_lock blocks them from reentry to the guest.
428                  */
429                 force_vm_exit(cpu_all_mask);
430                 /*
431                  * Now broadcast TLB + ICACHE invalidation over the inner
432                  * shareable domain to make sure all data structures are
433                  * clean.
434                  */
435                 kvm_call_hyp(__kvm_flush_vm_context);
436         }
437
438         kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
439         kvm->arch.vmid = kvm_next_vmid;
440         kvm_next_vmid++;
441         kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
442
443         /* update vttbr to be used with the new vmid */
444         pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
445         BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
446         vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
447         kvm->arch.vttbr = pgd_phys | vmid;
448
449         spin_unlock(&kvm_vmid_lock);
450 }
451
452 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
453 {
454         struct kvm *kvm = vcpu->kvm;
455         int ret;
456
457         if (likely(vcpu->arch.has_run_once))
458                 return 0;
459
460         vcpu->arch.has_run_once = true;
461
462         /*
463          * Map the VGIC hardware resources before running a vcpu the first
464          * time on this VM.
465          */
466         if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
467                 ret = kvm_vgic_map_resources(kvm);
468                 if (ret)
469                         return ret;
470         }
471
472         /*
473          * Enable the arch timers only if we have an in-kernel VGIC
474          * and it has been properly initialized, since we cannot handle
475          * interrupts from the virtual timer with a userspace gic.
476          */
477         if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
478                 kvm_timer_enable(kvm);
479
480         return 0;
481 }
482
483 bool kvm_arch_intc_initialized(struct kvm *kvm)
484 {
485         return vgic_initialized(kvm);
486 }
487
488 static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
489 static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
490
491 static void kvm_arm_halt_guest(struct kvm *kvm)
492 {
493         int i;
494         struct kvm_vcpu *vcpu;
495
496         kvm_for_each_vcpu(i, vcpu, kvm)
497                 vcpu->arch.pause = true;
498         force_vm_exit(cpu_all_mask);
499 }
500
501 static void kvm_arm_resume_guest(struct kvm *kvm)
502 {
503         int i;
504         struct kvm_vcpu *vcpu;
505
506         kvm_for_each_vcpu(i, vcpu, kvm) {
507                 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
508
509                 vcpu->arch.pause = false;
510                 wake_up_interruptible(wq);
511         }
512 }
513
514 static void vcpu_sleep(struct kvm_vcpu *vcpu)
515 {
516         wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
517
518         wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
519                                        (!vcpu->arch.pause)));
520 }
521
522 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
523 {
524         return vcpu->arch.target >= 0;
525 }
526
527 /**
528  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
529  * @vcpu:       The VCPU pointer
530  * @run:        The kvm_run structure pointer used for userspace state exchange
531  *
532  * This function is called through the VCPU_RUN ioctl called from user space. It
533  * will execute VM code in a loop until the time slice for the process is used
534  * or some emulation is needed from user space in which case the function will
535  * return with return value 0 and with the kvm_run structure filled in with the
536  * required data for the requested emulation.
537  */
538 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
539 {
540         int ret;
541         sigset_t sigsaved;
542
543         if (unlikely(!kvm_vcpu_initialized(vcpu)))
544                 return -ENOEXEC;
545
546         ret = kvm_vcpu_first_run_init(vcpu);
547         if (ret)
548                 return ret;
549
550         if (run->exit_reason == KVM_EXIT_MMIO) {
551                 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
552                 if (ret)
553                         return ret;
554         }
555
556         if (vcpu->sigset_active)
557                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
558
559         ret = 1;
560         run->exit_reason = KVM_EXIT_UNKNOWN;
561         while (ret > 0) {
562                 /*
563                  * Check conditions before entering the guest
564                  */
565                 cond_resched();
566
567                 update_vttbr(vcpu->kvm);
568
569                 if (vcpu->arch.power_off || vcpu->arch.pause)
570                         vcpu_sleep(vcpu);
571
572                 /*
573                  * Preparing the interrupts to be injected also
574                  * involves poking the GIC, which must be done in a
575                  * non-preemptible context.
576                  */
577                 preempt_disable();
578                 kvm_timer_flush_hwstate(vcpu);
579                 kvm_vgic_flush_hwstate(vcpu);
580
581                 local_irq_disable();
582
583                 /*
584                  * Re-check atomic conditions
585                  */
586                 if (signal_pending(current)) {
587                         ret = -EINTR;
588                         run->exit_reason = KVM_EXIT_INTR;
589                 }
590
591                 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
592                         vcpu->arch.power_off || vcpu->arch.pause) {
593                         local_irq_enable();
594                         kvm_timer_sync_hwstate(vcpu);
595                         kvm_vgic_sync_hwstate(vcpu);
596                         preempt_enable();
597                         continue;
598                 }
599
600                 kvm_arm_setup_debug(vcpu);
601
602                 /**************************************************************
603                  * Enter the guest
604                  */
605                 trace_kvm_entry(*vcpu_pc(vcpu));
606                 __kvm_guest_enter();
607                 vcpu->mode = IN_GUEST_MODE;
608
609                 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
610
611                 vcpu->mode = OUTSIDE_GUEST_MODE;
612                 /*
613                  * Back from guest
614                  *************************************************************/
615
616                 kvm_arm_clear_debug(vcpu);
617
618                 /*
619                  * We may have taken a host interrupt in HYP mode (ie
620                  * while executing the guest). This interrupt is still
621                  * pending, as we haven't serviced it yet!
622                  *
623                  * We're now back in SVC mode, with interrupts
624                  * disabled.  Enabling the interrupts now will have
625                  * the effect of taking the interrupt again, in SVC
626                  * mode this time.
627                  */
628                 local_irq_enable();
629
630                 /*
631                  * We do local_irq_enable() before calling kvm_guest_exit() so
632                  * that if a timer interrupt hits while running the guest we
633                  * account that tick as being spent in the guest.  We enable
634                  * preemption after calling kvm_guest_exit() so that if we get
635                  * preempted we make sure ticks after that is not counted as
636                  * guest time.
637                  */
638                 kvm_guest_exit();
639                 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
640
641                 /*
642                  * We must sync the timer state before the vgic state so that
643                  * the vgic can properly sample the updated state of the
644                  * interrupt line.
645                  */
646                 kvm_timer_sync_hwstate(vcpu);
647
648                 kvm_vgic_sync_hwstate(vcpu);
649
650                 preempt_enable();
651
652                 ret = handle_exit(vcpu, run, ret);
653         }
654
655         if (vcpu->sigset_active)
656                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
657         return ret;
658 }
659
660 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
661 {
662         int bit_index;
663         bool set;
664         unsigned long *ptr;
665
666         if (number == KVM_ARM_IRQ_CPU_IRQ)
667                 bit_index = __ffs(HCR_VI);
668         else /* KVM_ARM_IRQ_CPU_FIQ */
669                 bit_index = __ffs(HCR_VF);
670
671         ptr = (unsigned long *)&vcpu->arch.irq_lines;
672         if (level)
673                 set = test_and_set_bit(bit_index, ptr);
674         else
675                 set = test_and_clear_bit(bit_index, ptr);
676
677         /*
678          * If we didn't change anything, no need to wake up or kick other CPUs
679          */
680         if (set == level)
681                 return 0;
682
683         /*
684          * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
685          * trigger a world-switch round on the running physical CPU to set the
686          * virtual IRQ/FIQ fields in the HCR appropriately.
687          */
688         kvm_vcpu_kick(vcpu);
689
690         return 0;
691 }
692
693 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
694                           bool line_status)
695 {
696         u32 irq = irq_level->irq;
697         unsigned int irq_type, vcpu_idx, irq_num;
698         int nrcpus = atomic_read(&kvm->online_vcpus);
699         struct kvm_vcpu *vcpu = NULL;
700         bool level = irq_level->level;
701
702         irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
703         vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
704         irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
705
706         trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
707
708         switch (irq_type) {
709         case KVM_ARM_IRQ_TYPE_CPU:
710                 if (irqchip_in_kernel(kvm))
711                         return -ENXIO;
712
713                 if (vcpu_idx >= nrcpus)
714                         return -EINVAL;
715
716                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
717                 if (!vcpu)
718                         return -EINVAL;
719
720                 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
721                         return -EINVAL;
722
723                 return vcpu_interrupt_line(vcpu, irq_num, level);
724         case KVM_ARM_IRQ_TYPE_PPI:
725                 if (!irqchip_in_kernel(kvm))
726                         return -ENXIO;
727
728                 if (vcpu_idx >= nrcpus)
729                         return -EINVAL;
730
731                 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
732                 if (!vcpu)
733                         return -EINVAL;
734
735                 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
736                         return -EINVAL;
737
738                 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
739         case KVM_ARM_IRQ_TYPE_SPI:
740                 if (!irqchip_in_kernel(kvm))
741                         return -ENXIO;
742
743                 if (irq_num < VGIC_NR_PRIVATE_IRQS)
744                         return -EINVAL;
745
746                 return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
747         }
748
749         return -EINVAL;
750 }
751
752 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
753                                const struct kvm_vcpu_init *init)
754 {
755         unsigned int i;
756         int phys_target = kvm_target_cpu();
757
758         if (init->target != phys_target)
759                 return -EINVAL;
760
761         /*
762          * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
763          * use the same target.
764          */
765         if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
766                 return -EINVAL;
767
768         /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
769         for (i = 0; i < sizeof(init->features) * 8; i++) {
770                 bool set = (init->features[i / 32] & (1 << (i % 32)));
771
772                 if (set && i >= KVM_VCPU_MAX_FEATURES)
773                         return -ENOENT;
774
775                 /*
776                  * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
777                  * use the same feature set.
778                  */
779                 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
780                     test_bit(i, vcpu->arch.features) != set)
781                         return -EINVAL;
782
783                 if (set)
784                         set_bit(i, vcpu->arch.features);
785         }
786
787         vcpu->arch.target = phys_target;
788
789         /* Now we know what it is, we can reset it. */
790         return kvm_reset_vcpu(vcpu);
791 }
792
793
794 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
795                                          struct kvm_vcpu_init *init)
796 {
797         int ret;
798
799         ret = kvm_vcpu_set_target(vcpu, init);
800         if (ret)
801                 return ret;
802
803         /*
804          * Ensure a rebooted VM will fault in RAM pages and detect if the
805          * guest MMU is turned off and flush the caches as needed.
806          */
807         if (vcpu->arch.has_run_once)
808                 stage2_unmap_vm(vcpu->kvm);
809
810         vcpu_reset_hcr(vcpu);
811
812         /*
813          * Handle the "start in power-off" case.
814          */
815         if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
816                 vcpu->arch.power_off = true;
817         else
818                 vcpu->arch.power_off = false;
819
820         return 0;
821 }
822
823 long kvm_arch_vcpu_ioctl(struct file *filp,
824                          unsigned int ioctl, unsigned long arg)
825 {
826         struct kvm_vcpu *vcpu = filp->private_data;
827         void __user *argp = (void __user *)arg;
828
829         switch (ioctl) {
830         case KVM_ARM_VCPU_INIT: {
831                 struct kvm_vcpu_init init;
832
833                 if (copy_from_user(&init, argp, sizeof(init)))
834                         return -EFAULT;
835
836                 return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
837         }
838         case KVM_SET_ONE_REG:
839         case KVM_GET_ONE_REG: {
840                 struct kvm_one_reg reg;
841
842                 if (unlikely(!kvm_vcpu_initialized(vcpu)))
843                         return -ENOEXEC;
844
845                 if (copy_from_user(&reg, argp, sizeof(reg)))
846                         return -EFAULT;
847                 if (ioctl == KVM_SET_ONE_REG)
848                         return kvm_arm_set_reg(vcpu, &reg);
849                 else
850                         return kvm_arm_get_reg(vcpu, &reg);
851         }
852         case KVM_GET_REG_LIST: {
853                 struct kvm_reg_list __user *user_list = argp;
854                 struct kvm_reg_list reg_list;
855                 unsigned n;
856
857                 if (unlikely(!kvm_vcpu_initialized(vcpu)))
858                         return -ENOEXEC;
859
860                 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
861                         return -EFAULT;
862                 n = reg_list.n;
863                 reg_list.n = kvm_arm_num_regs(vcpu);
864                 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
865                         return -EFAULT;
866                 if (n < reg_list.n)
867                         return -E2BIG;
868                 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
869         }
870         default:
871                 return -EINVAL;
872         }
873 }
874
875 /**
876  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
877  * @kvm: kvm instance
878  * @log: slot id and address to which we copy the log
879  *
880  * Steps 1-4 below provide general overview of dirty page logging. See
881  * kvm_get_dirty_log_protect() function description for additional details.
882  *
883  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
884  * always flush the TLB (step 4) even if previous step failed  and the dirty
885  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
886  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
887  * writes will be marked dirty for next log read.
888  *
889  *   1. Take a snapshot of the bit and clear it if needed.
890  *   2. Write protect the corresponding page.
891  *   3. Copy the snapshot to the userspace.
892  *   4. Flush TLB's if needed.
893  */
894 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
895 {
896         bool is_dirty = false;
897         int r;
898
899         mutex_lock(&kvm->slots_lock);
900
901         r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
902
903         if (is_dirty)
904                 kvm_flush_remote_tlbs(kvm);
905
906         mutex_unlock(&kvm->slots_lock);
907         return r;
908 }
909
910 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
911                                         struct kvm_arm_device_addr *dev_addr)
912 {
913         unsigned long dev_id, type;
914
915         dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
916                 KVM_ARM_DEVICE_ID_SHIFT;
917         type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
918                 KVM_ARM_DEVICE_TYPE_SHIFT;
919
920         switch (dev_id) {
921         case KVM_ARM_DEVICE_VGIC_V2:
922                 if (!vgic_present)
923                         return -ENXIO;
924                 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
925         default:
926                 return -ENODEV;
927         }
928 }
929
930 long kvm_arch_vm_ioctl(struct file *filp,
931                        unsigned int ioctl, unsigned long arg)
932 {
933         struct kvm *kvm = filp->private_data;
934         void __user *argp = (void __user *)arg;
935
936         switch (ioctl) {
937         case KVM_CREATE_IRQCHIP: {
938                 if (!vgic_present)
939                         return -ENXIO;
940                 return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
941         }
942         case KVM_ARM_SET_DEVICE_ADDR: {
943                 struct kvm_arm_device_addr dev_addr;
944
945                 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
946                         return -EFAULT;
947                 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
948         }
949         case KVM_ARM_PREFERRED_TARGET: {
950                 int err;
951                 struct kvm_vcpu_init init;
952
953                 err = kvm_vcpu_preferred_target(&init);
954                 if (err)
955                         return err;
956
957                 if (copy_to_user(argp, &init, sizeof(init)))
958                         return -EFAULT;
959
960                 return 0;
961         }
962         default:
963                 return -EINVAL;
964         }
965 }
966
967 static void cpu_init_stage2(void *dummy)
968 {
969         __cpu_init_stage2();
970 }
971
972 static void cpu_init_hyp_mode(void *dummy)
973 {
974         phys_addr_t boot_pgd_ptr;
975         phys_addr_t pgd_ptr;
976         unsigned long hyp_stack_ptr;
977         unsigned long stack_page;
978         unsigned long vector_ptr;
979
980         /* Switch from the HYP stub to our own HYP init vector */
981         __hyp_set_vectors(kvm_get_idmap_vector());
982
983         boot_pgd_ptr = kvm_mmu_get_boot_httbr();
984         pgd_ptr = kvm_mmu_get_httbr();
985         stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
986         hyp_stack_ptr = stack_page + PAGE_SIZE;
987         vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
988
989         __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
990         __cpu_init_stage2();
991
992         kvm_arm_init_debug();
993 }
994
995 static void cpu_hyp_reinit(void)
996 {
997         if (is_kernel_in_hyp_mode()) {
998                 /*
999                  * cpu_init_stage2() is safe to call even if the PM
1000                  * event was cancelled before the CPU was reset.
1001                  */
1002                 cpu_init_stage2(NULL);
1003         } else {
1004                 if (__hyp_get_vectors() == hyp_default_vectors)
1005                         cpu_init_hyp_mode(NULL);
1006         }
1007 }
1008
1009 static int hyp_init_cpu_notify(struct notifier_block *self,
1010                                unsigned long action, void *cpu)
1011 {
1012         switch (action) {
1013         case CPU_STARTING:
1014         case CPU_STARTING_FROZEN:
1015                 cpu_hyp_reinit();
1016         }
1017
1018         return NOTIFY_OK;
1019 }
1020
1021 static struct notifier_block hyp_init_cpu_nb = {
1022         .notifier_call = hyp_init_cpu_notify,
1023 };
1024
1025 #ifdef CONFIG_CPU_PM
1026 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1027                                     unsigned long cmd,
1028                                     void *v)
1029 {
1030         if (cmd == CPU_PM_EXIT) {
1031                 cpu_hyp_reinit();
1032                 return NOTIFY_OK;
1033         }
1034
1035         return NOTIFY_DONE;
1036 }
1037
1038 static struct notifier_block hyp_init_cpu_pm_nb = {
1039         .notifier_call = hyp_init_cpu_pm_notifier,
1040 };
1041
1042 static void __init hyp_cpu_pm_init(void)
1043 {
1044         cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1045 }
1046 #else
1047 static inline void hyp_cpu_pm_init(void)
1048 {
1049 }
1050 #endif
1051
1052 static void teardown_common_resources(void)
1053 {
1054         free_percpu(kvm_host_cpu_state);
1055 }
1056
1057 static int init_common_resources(void)
1058 {
1059         kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
1060         if (!kvm_host_cpu_state) {
1061                 kvm_err("Cannot allocate host CPU state\n");
1062                 return -ENOMEM;
1063         }
1064
1065         return 0;
1066 }
1067
1068 static int init_subsystems(void)
1069 {
1070         int err;
1071
1072         /*
1073          * Register CPU Hotplug notifier
1074          */
1075         cpu_notifier_register_begin();
1076         err = __register_cpu_notifier(&hyp_init_cpu_nb);
1077         cpu_notifier_register_done();
1078         if (err) {
1079                 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
1080                 return err;
1081         }
1082
1083         /*
1084          * Register CPU lower-power notifier
1085          */
1086         hyp_cpu_pm_init();
1087
1088         /*
1089          * Init HYP view of VGIC
1090          */
1091         err = kvm_vgic_hyp_init();
1092         switch (err) {
1093         case 0:
1094                 vgic_present = true;
1095                 break;
1096         case -ENODEV:
1097         case -ENXIO:
1098                 vgic_present = false;
1099                 break;
1100         default:
1101                 return err;
1102         }
1103
1104         /*
1105          * Init HYP architected timer support
1106          */
1107         err = kvm_timer_hyp_init();
1108         if (err)
1109                 return err;
1110
1111         kvm_perf_init();
1112         kvm_coproc_table_init();
1113
1114         return 0;
1115 }
1116
1117 static void teardown_hyp_mode(void)
1118 {
1119         int cpu;
1120
1121         if (is_kernel_in_hyp_mode())
1122                 return;
1123
1124         free_hyp_pgds();
1125         for_each_possible_cpu(cpu)
1126                 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1127 }
1128
1129 static int init_vhe_mode(void)
1130 {
1131         /*
1132          * Execute the init code on each CPU.
1133          */
1134         on_each_cpu(cpu_init_stage2, NULL, 1);
1135
1136         /* set size of VMID supported by CPU */
1137         kvm_vmid_bits = kvm_get_vmid_bits();
1138         kvm_info("%d-bit VMID\n", kvm_vmid_bits);
1139
1140         kvm_info("VHE mode initialized successfully\n");
1141         return 0;
1142 }
1143
1144 /**
1145  * Inits Hyp-mode on all online CPUs
1146  */
1147 static int init_hyp_mode(void)
1148 {
1149         int cpu;
1150         int err = 0;
1151
1152         /*
1153          * Allocate Hyp PGD and setup Hyp identity mapping
1154          */
1155         err = kvm_mmu_init();
1156         if (err)
1157                 goto out_err;
1158
1159         /*
1160          * It is probably enough to obtain the default on one
1161          * CPU. It's unlikely to be different on the others.
1162          */
1163         hyp_default_vectors = __hyp_get_vectors();
1164
1165         /*
1166          * Allocate stack pages for Hypervisor-mode
1167          */
1168         for_each_possible_cpu(cpu) {
1169                 unsigned long stack_page;
1170
1171                 stack_page = __get_free_page(GFP_KERNEL);
1172                 if (!stack_page) {
1173                         err = -ENOMEM;
1174                         goto out_err;
1175                 }
1176
1177                 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1178         }
1179
1180         /*
1181          * Map the Hyp-code called directly from the host
1182          */
1183         err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
1184                                   kvm_ksym_ref(__kvm_hyp_code_end));
1185         if (err) {
1186                 kvm_err("Cannot map world-switch code\n");
1187                 goto out_err;
1188         }
1189
1190         err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1191                                   kvm_ksym_ref(__end_rodata));
1192         if (err) {
1193                 kvm_err("Cannot map rodata section\n");
1194                 goto out_err;
1195         }
1196
1197         /*
1198          * Map the Hyp stack pages
1199          */
1200         for_each_possible_cpu(cpu) {
1201                 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
1202                 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
1203
1204                 if (err) {
1205                         kvm_err("Cannot map hyp stack\n");
1206                         goto out_err;
1207                 }
1208         }
1209
1210         for_each_possible_cpu(cpu) {
1211                 kvm_cpu_context_t *cpu_ctxt;
1212
1213                 cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
1214                 err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
1215
1216                 if (err) {
1217                         kvm_err("Cannot map host CPU state: %d\n", err);
1218                         goto out_err;
1219                 }
1220         }
1221
1222         /*
1223          * Execute the init code on each CPU.
1224          */
1225         on_each_cpu(cpu_init_hyp_mode, NULL, 1);
1226
1227 #ifndef CONFIG_HOTPLUG_CPU
1228         free_boot_hyp_pgd();
1229 #endif
1230
1231         /* set size of VMID supported by CPU */
1232         kvm_vmid_bits = kvm_get_vmid_bits();
1233         kvm_info("%d-bit VMID\n", kvm_vmid_bits);
1234
1235         kvm_info("Hyp mode initialized successfully\n");
1236
1237         return 0;
1238
1239 out_err:
1240         teardown_hyp_mode();
1241         kvm_err("error initializing Hyp mode: %d\n", err);
1242         return err;
1243 }
1244
1245 static void check_kvm_target_cpu(void *ret)
1246 {
1247         *(int *)ret = kvm_target_cpu();
1248 }
1249
1250 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1251 {
1252         struct kvm_vcpu *vcpu;
1253         int i;
1254
1255         mpidr &= MPIDR_HWID_BITMASK;
1256         kvm_for_each_vcpu(i, vcpu, kvm) {
1257                 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
1258                         return vcpu;
1259         }
1260         return NULL;
1261 }
1262
1263 /**
1264  * Initialize Hyp-mode and memory mappings on all CPUs.
1265  */
1266 int kvm_arch_init(void *opaque)
1267 {
1268         int err;
1269         int ret, cpu;
1270
1271         if (!is_hyp_mode_available()) {
1272                 kvm_err("HYP mode not available\n");
1273                 return -ENODEV;
1274         }
1275
1276         for_each_online_cpu(cpu) {
1277                 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
1278                 if (ret < 0) {
1279                         kvm_err("Error, CPU %d not supported!\n", cpu);
1280                         return -ENODEV;
1281                 }
1282         }
1283
1284         err = init_common_resources();
1285         if (err)
1286                 return err;
1287
1288         if (is_kernel_in_hyp_mode())
1289                 err = init_vhe_mode();
1290         else
1291                 err = init_hyp_mode();
1292         if (err)
1293                 goto out_err;
1294
1295         err = init_subsystems();
1296         if (err)
1297                 goto out_hyp;
1298
1299         return 0;
1300
1301 out_hyp:
1302         teardown_hyp_mode();
1303 out_err:
1304         teardown_common_resources();
1305         return err;
1306 }
1307
1308 /* NOP: Compiling as a module not supported */
1309 void kvm_arch_exit(void)
1310 {
1311         kvm_perf_teardown();
1312 }
1313
1314 static int arm_init(void)
1315 {
1316         int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1317         return rc;
1318 }
1319
1320 module_init(arm_init);