KVM: MMU: audit: check whether have unsync sps after root sync
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Mon, 27 Sep 2010 10:09:29 +0000 (18:09 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:53:14 +0000 (10:53 +0200)
After root synced, all unsync sps are synced, this patch add a check to make
sure it's no unsync sps in VCPU's page table

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu_audit.c

index afde64ba118d57f34e2ff0142bf4973b3944e0d2..ba7e7646fb78b1dbf088e7a7d85b332ca1ed358b 100644 (file)
@@ -53,14 +53,18 @@ enum {
        AUDIT_PRE_PAGE_FAULT,
        AUDIT_POST_PAGE_FAULT,
        AUDIT_PRE_PTE_WRITE,
-       AUDIT_POST_PTE_WRITE
+       AUDIT_POST_PTE_WRITE,
+       AUDIT_PRE_SYNC,
+       AUDIT_POST_SYNC
 };
 
 char *audit_point_name[] = {
        "pre page fault",
        "post page fault",
        "pre pte write",
-       "post pte write"
+       "post pte write",
+       "pre sync",
+       "post sync"
 };
 
 #undef MMU_DEBUG
@@ -2516,6 +2520,8 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
 
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
+
+       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
        if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
                sp = page_header(root);
@@ -2531,6 +2537,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
                        mmu_sync_children(vcpu, sp);
                }
        }
+       trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
 }
 
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
index 66219afcc91e7429764dab46bb4bbac59391ee6d..4aee32c3cf924a150a183107dff60242cecd174f 100644 (file)
@@ -164,6 +164,14 @@ static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
                inspect_spte_has_rmap(vcpu->kvm, sptep);
 }
 
+static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
+{
+       struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+       if (audit_point == AUDIT_POST_SYNC && sp->unsync)
+               audit_printk("meet unsync sp(%p) after sync root.\n", sp);
+}
+
 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        int i;
@@ -179,7 +187,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
        }
 }
 
-void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
@@ -215,6 +223,7 @@ static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
 {
        audit_sptes_have_rmaps(vcpu, sptep, level);
        audit_mappings(vcpu, sptep, level);
+       audit_spte_after_sync(vcpu, sptep, level);
 }
 
 static void audit_vcpu_spte(struct kvm_vcpu *vcpu)