KVM: SVM: Initialize Nested Nested MMU context on VMRUN
authorJoerg Roedel <joerg.roedel@amd.com>
Fri, 10 Sep 2010 15:31:03 +0000 (17:31 +0200)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:52:46 +0000 (10:52 +0200)
This patch adds code to initialize the Nested Nested Paging
MMU context when the L1 guest executes a VMRUN instruction
and has nested paging enabled in its VMCB.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c

index 95cbeed74cf9180feba1e6c2e60a75a56e0e917a..6e248d80e350f553cee751d8c2be5c72cbe45097 100644 (file)
@@ -2962,6 +2962,7 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
 {
        mmu_free_roots(vcpu);
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_unload);
 
 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *sp,
index 9a9a4405b571cd78105c6fb3c50e84fed0943f73..3184772dedfe77f9c32a08eb483ce2b8eac1b44a 100644 (file)
@@ -294,6 +294,15 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
        force_new_asid(vcpu);
 }
 
+static int get_npt_level(void)
+{
+#ifdef CONFIG_X86_64
+       return PT64_ROOT_LEVEL;
+#else
+       return PT32E_ROOT_LEVEL;
+#endif
+}
+
 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        vcpu->arch.efer = efer;
@@ -1630,6 +1639,26 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
        nested_svm_vmexit(svm);
 }
 
+static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
+{
+       int r;
+
+       r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
+
+       vcpu->arch.mmu.set_cr3           = nested_svm_set_tdp_cr3;
+       vcpu->arch.mmu.get_cr3           = nested_svm_get_tdp_cr3;
+       vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
+       vcpu->arch.mmu.shadow_root_level = get_npt_level();
+       vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
+
+       return r;
+}
+
+static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+}
+
 static int nested_svm_check_permissions(struct vcpu_svm *svm)
 {
        if (!(svm->vcpu.arch.efer & EFER_SVME)
@@ -1998,6 +2027,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        kvm_clear_exception_queue(&svm->vcpu);
        kvm_clear_interrupt_queue(&svm->vcpu);
 
+       svm->nested.nested_cr3 = 0;
+
        /* Restore selected save entries */
        svm->vmcb->save.es = hsave->save.es;
        svm->vmcb->save.cs = hsave->save.cs;
@@ -2024,6 +2055,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
 
        nested_svm_unmap(page);
 
+       nested_svm_uninit_mmu_context(&svm->vcpu);
        kvm_mmu_reset_context(&svm->vcpu);
        kvm_mmu_load(&svm->vcpu);
 
@@ -2071,6 +2103,9 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
        if (vmcb->control.asid == 0)
                return false;
 
+       if (vmcb->control.nested_ctl && !npt_enabled)
+               return false;
+
        return true;
 }
 
@@ -2143,6 +2178,12 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        else
                svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
 
+       if (nested_vmcb->control.nested_ctl) {
+               kvm_mmu_unload(&svm->vcpu);
+               svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
+               nested_svm_init_mmu_context(&svm->vcpu);
+       }
+
        /* Load the nested guest state */
        svm->vmcb->save.es = nested_vmcb->save.es;
        svm->vmcb->save.cs = nested_vmcb->save.cs;
@@ -3415,15 +3456,6 @@ static bool svm_cpu_has_accelerated_tpr(void)
        return false;
 }
 
-static int get_npt_level(void)
-{
-#ifdef CONFIG_X86_64
-       return PT64_ROOT_LEVEL;
-#else
-       return PT32E_ROOT_LEVEL;
-#endif
-}
-
 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 {
        return 0;