KVM: VMX: Move vmx_nmi_allowed after vmx_set_nmi_mask
authorJan Kiszka <jan.kiszka@siemens.com>
Sun, 14 Apr 2013 10:12:47 +0000 (12:12 +0200)
committerGleb Natapov <gleb@redhat.com>
Mon, 22 Apr 2013 08:10:49 +0000 (11:10 +0300)
vmx_set_nmi_mask will soon be used by vmx_nmi_allowed. No functional
changes.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kvm/vmx.c

index 6abce2d5d18e00bb14bd99cdc11b302a4ab6088b..451ab2a57ca08114ae5afac1abbec9a4115715a5 100644 (file)
@@ -4486,16 +4486,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
                        INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
 }
 
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
-{
-       if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
-               return 0;
-
-       return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-                 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
-                  | GUEST_INTR_STATE_NMI));
-}
-
 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
 {
        if (!cpu_has_virtual_nmis())
@@ -4525,6 +4515,16 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
        }
 }
 
+static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+{
+       if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
+               return 0;
+
+       return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+                 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
+                  | GUEST_INTR_STATE_NMI));
+}
+
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu)) {