KVM: Non-atomic interrupt injection
authorAvi Kivity <avi@redhat.com>
Tue, 20 Jul 2010 12:06:17 +0000 (15:06 +0300)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:52:54 +0000 (10:52 +0200)
Change the interrupt injection code to work from preemptible, interrupts
enabled context.  This works by adding a ->cancel_injection() operation
that undoes an injection in case we were not able to actually enter the guest
(this condition could never happen with atomic injection).

Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index b43686a4487781a0949014bde6a974041343e42e..80224bf5d4f8b3c6c662bc57a3b9048962ac5ea0 100644 (file)
@@ -552,6 +552,7 @@ struct kvm_x86_ops {
        void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
                                bool has_error_code, u32 error_code,
                                bool reinject);
+       void (*cancel_injection)(struct kvm_vcpu *vcpu);
        int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
        int (*nmi_allowed)(struct kvm_vcpu *vcpu);
        bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
index 1d2ea65d3537300720b31665d08da8d12c42950e..1a85fc507cf7413260a76fb2dd88a15c00cc38dd 100644 (file)
@@ -3261,6 +3261,17 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
        }
 }
 
+static void svm_cancel_injection(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb_control_area *control = &svm->vmcb->control;
+
+       control->exit_int_info = control->event_inj;
+       control->exit_int_info_err = control->event_inj_err;
+       control->event_inj = 0;
+       svm_complete_interrupts(svm);
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #else
@@ -3631,6 +3642,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .set_irq = svm_set_irq,
        .set_nmi = svm_inject_nmi,
        .queue_exception = svm_queue_exception,
+       .cancel_injection = svm_cancel_injection,
        .interrupt_allowed = svm_interrupt_allowed,
        .nmi_allowed = svm_nmi_allowed,
        .get_nmi_mask = svm_get_nmi_mask,
index 3237f6cc930d0e00b1ccd3ec25f5210f35bb7911..70af3db372d712a65ac2882f825714559e0f6378 100644 (file)
@@ -3895,6 +3895,16 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
                                  IDT_VECTORING_ERROR_CODE);
 }
 
+static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
+{
+       __vmx_complete_interrupts(to_vmx(vcpu),
+                                 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
+                                 VM_ENTRY_INSTRUCTION_LEN,
+                                 VM_ENTRY_EXCEPTION_ERROR_CODE);
+
+       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
+}
+
 /*
  * Failure to inject an interrupt should give us the information
  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
@@ -4348,6 +4358,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_irq = vmx_inject_irq,
        .set_nmi = vmx_inject_nmi,
        .queue_exception = vmx_queue_exception,
+       .cancel_injection = vmx_cancel_injection,
        .interrupt_allowed = vmx_interrupt_allowed,
        .nmi_allowed = vmx_nmi_allowed,
        .get_nmi_mask = vmx_get_nmi_mask,
index e7198036db617f5ca12d161ae36a355b45e2628e..a465bd29f38122688d240c502a056e1ac746b22a 100644 (file)
@@ -5005,7 +5005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        int r;
        bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
                vcpu->run->request_interrupt_window;
-       bool req_event;
 
        if (vcpu->requests) {
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5041,6 +5040,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (unlikely(r))
                goto out;
 
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+               inject_pending_event(vcpu);
+
+               /* enable NMI/IRQ window open exits if needed */
+               if (vcpu->arch.nmi_pending)
+                       kvm_x86_ops->enable_nmi_window(vcpu);
+               else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+                       kvm_x86_ops->enable_irq_window(vcpu);
+
+               if (kvm_lapic_enabled(vcpu)) {
+                       update_cr8_intercept(vcpu);
+                       kvm_lapic_sync_to_vapic(vcpu);
+               }
+       }
+
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5053,35 +5067,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        local_irq_disable();
 
-       req_event = kvm_check_request(KVM_REQ_EVENT, vcpu);
-
        if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
            || need_resched() || signal_pending(current)) {
-               if (req_event)
-                       kvm_make_request(KVM_REQ_EVENT, vcpu);
                atomic_set(&vcpu->guest_mode, 0);
                smp_wmb();
                local_irq_enable();
                preempt_enable();
+               kvm_x86_ops->cancel_injection(vcpu);
                r = 1;
                goto out;
        }
 
-       if (req_event || req_int_win) {
-               inject_pending_event(vcpu);
-
-               /* enable NMI/IRQ window open exits if needed */
-               if (vcpu->arch.nmi_pending)
-                       kvm_x86_ops->enable_nmi_window(vcpu);
-               else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
-                       kvm_x86_ops->enable_irq_window(vcpu);
-
-               if (kvm_lapic_enabled(vcpu)) {
-                       update_cr8_intercept(vcpu);
-                       kvm_lapic_sync_to_vapic(vcpu);
-               }
-       }
-
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 
        kvm_guest_enter();