KVM: x86: Use the correct vcpu's TSC rate to compute time scale
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kvm / x86.c
index a60bdbccff5189b5a98b9a7fcc6a3b9f7ff5eeec..2cb074f5aaedb3362c77f8a30ab5df9fd8c7d406 100644 (file)
@@ -51,6 +51,8 @@
 #include <linux/pci.h>
 #include <linux/timekeeper_internal.h>
 #include <linux/pvclock_gtod.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -64,6 +66,7 @@
 #include <asm/fpu/internal.h> /* Ugh! */
 #include <asm/pvclock.h>
 #include <asm/div64.h>
+#include <asm/irq_remapping.h>
 
 #define MAX_IO_MSRS 256
 #define KVM_MAX_MCE_BANKS 32
@@ -90,10 +93,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 
-struct kvm_x86_ops *kvm_x86_ops;
+struct kvm_x86_ops *kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
-static bool ignore_msrs = 0;
+static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
 unsigned int min_timer_period_us = 500;
@@ -102,20 +105,25 @@ module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
 static bool __read_mostly kvmclock_periodic_sync = true;
 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
 
-bool kvm_has_tsc_control;
+bool __read_mostly kvm_has_tsc_control;
 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
-u32  kvm_max_guest_tsc_khz;
+u32  __read_mostly kvm_max_guest_tsc_khz;
 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
+u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
+EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
+u64  __read_mostly kvm_max_tsc_scaling_ratio;
+EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
+static u64 __read_mostly kvm_default_tsc_scaling_ratio;
 
 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
-static u32 tsc_tolerance_ppm = 250;
+static u32 __read_mostly tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int lapic_timer_advance_ns = 0;
+unsigned int __read_mostly lapic_timer_advance_ns = 0;
 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
 
-static bool backwards_tsc_observed = false;
+static bool __read_mostly backwards_tsc_observed = false;
 
 #define KVM_NR_SHARED_MSRS 16
 
@@ -149,6 +157,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "nmi_window", VCPU_STAT(nmi_window_exits) },
        { "halt_exits", VCPU_STAT(halt_exits) },
        { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
+       { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "hypercalls", VCPU_STAT(hypercalls) },
        { "request_irq", VCPU_STAT(request_irq_exits) },
@@ -621,7 +630,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        if ((cr0 ^ old_cr0) & update_bits)
                kvm_mmu_reset_context(vcpu);
 
-       if ((cr0 ^ old_cr0) & X86_CR0_CD)
+       if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
+           kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
+           !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
                kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
 
        return 0;
@@ -787,7 +798,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS)
                return 1;
-       if (irqchip_in_kernel(vcpu->kvm))
+       if (lapic_in_kernel(vcpu))
                kvm_lapic_set_tpr(vcpu, cr8);
        else
                vcpu->arch.cr8 = cr8;
@@ -797,7 +808,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr8);
 
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
 {
-       if (irqchip_in_kernel(vcpu->kvm))
+       if (lapic_in_kernel(vcpu))
                return kvm_lapic_get_cr8(vcpu);
        else
                return vcpu->arch.cr8;
@@ -951,6 +962,9 @@ static u32 emulated_msrs[] = {
        HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
        HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
        HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
+       HV_X64_MSR_RESET,
+       HV_X64_MSR_VP_INDEX,
+       HV_X64_MSR_VP_RUNTIME,
        HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
        MSR_KVM_PV_EOI_EN,
 
@@ -1239,14 +1253,53 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
        return v;
 }
 
-static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
+static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
+{
+       u64 ratio;
+
+       /* Guest TSC same frequency as host TSC? */
+       if (!scale) {
+               vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+               return 0;
+       }
+
+       /* TSC scaling supported? */
+       if (!kvm_has_tsc_control) {
+               if (user_tsc_khz > tsc_khz) {
+                       vcpu->arch.tsc_catchup = 1;
+                       vcpu->arch.tsc_always_catchup = 1;
+                       return 0;
+               } else {
+                       WARN(1, "user requested TSC rate below hardware speed\n");
+                       return -1;
+               }
+       }
+
+       /* TSC scaling required  - calculate ratio */
+       ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
+                               user_tsc_khz, tsc_khz);
+
+       if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
+               WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
+                         user_tsc_khz);
+               return -1;
+       }
+
+       vcpu->arch.tsc_scaling_ratio = ratio;
+       return 0;
+}
+
+static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
 {
        u32 thresh_lo, thresh_hi;
        int use_scaling = 0;
 
        /* tsc_khz can be zero if TSC calibration fails */
-       if (this_tsc_khz == 0)
-               return;
+       if (this_tsc_khz == 0) {
+               /* set tsc_scaling_ratio to a safe value */
+               vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+               return -1;
+       }
 
        /* Compute a scale to convert nanoseconds in TSC cycles */
        kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
@@ -1266,7 +1319,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
                pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
                use_scaling = 1;
        }
-       kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
+       return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
 }
 
 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
@@ -1312,6 +1365,48 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
        vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
 }
 
+/*
+ * Multiply tsc by a fixed point number represented by ratio.
+ *
+ * The most significant 64-N bits (mult) of ratio represent the
+ * integral part of the fixed point number; the remaining N bits
+ * (frac) represent the fractional part, ie. ratio represents a fixed
+ * point number (mult + frac * 2^(-N)).
+ *
+ * N equals to kvm_tsc_scaling_ratio_frac_bits.
+ */
+static inline u64 __scale_tsc(u64 ratio, u64 tsc)
+{
+       return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
+}
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+{
+       u64 _tsc = tsc;
+       u64 ratio = vcpu->arch.tsc_scaling_ratio;
+
+       if (ratio != kvm_default_tsc_scaling_ratio)
+               _tsc = __scale_tsc(ratio, tsc);
+
+       return _tsc;
+}
+EXPORT_SYMBOL_GPL(kvm_scale_tsc);
+
+static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+       u64 tsc;
+
+       tsc = kvm_scale_tsc(vcpu, rdtsc());
+
+       return target_tsc - tsc;
+}
+
+u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+{
+       return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
+}
+EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1323,7 +1418,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u64 data = msr->data;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+       offset = kvm_compute_tsc_offset(vcpu, data);
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -1380,7 +1475,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
                } else {
                        u64 delta = nsec_to_cycles(vcpu, elapsed);
                        data += delta;
-                       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+                       offset = kvm_compute_tsc_offset(vcpu, data);
                        pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
                }
                matched = true;
@@ -1437,6 +1532,20 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+                                          s64 adjustment)
+{
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+       if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+               WARN_ON(adjustment < 0);
+       adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
 #ifdef CONFIG_X86_64
 
 static cycle_t read_tsc(void)
@@ -1598,7 +1707,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
-       unsigned long flags, this_tsc_khz;
+       unsigned long flags, this_tsc_khz, tgt_tsc_khz;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct kvm_arch *ka = &v->kvm->arch;
        s64 kernel_ns;
@@ -1635,7 +1744,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                kernel_ns = get_kernel_ns();
        }
 
-       tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
+       tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
 
        /*
         * We may have to catch up the TSC to match elapsed wall clock
@@ -1661,7 +1770,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                return 0;
 
        if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-               kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+               tgt_tsc_khz = kvm_has_tsc_control ?
+                       vcpu->virtual_tsc_khz : this_tsc_khz;
+               kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz,
                                   &vcpu->hv_clock.tsc_shift,
                                   &vcpu->hv_clock.tsc_to_system_mul);
                vcpu->hw_tsc_khz = this_tsc_khz;
@@ -1707,8 +1818,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                vcpu->pvclock_set_guest_stopped_request = false;
        }
 
-       pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
-
        /* If the host uses TSC clocksource, then it is stable */
        if (use_master_clock)
                pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -1898,6 +2007,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
 
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
+       accumulate_steal_time(vcpu);
+
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
@@ -2006,8 +2117,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                        &vcpu->requests);
 
                        ka->boot_vcpu_runs_old_kvmclock = tmp;
-
-                       ka->kvmclock_offset = -get_kernel_ns();
                }
 
                vcpu->arch.time = data;
@@ -2050,12 +2159,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!(data & KVM_MSR_ENABLED))
                        break;
 
-               vcpu->arch.st.last_steal = current->sched_info.run_delay;
-
-               preempt_disable();
-               accumulate_steal_time(vcpu);
-               preempt_enable();
-
                kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 
                break;
@@ -2189,6 +2292,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_LASTINTFROMIP:
        case MSR_IA32_LASTINTTOIP:
        case MSR_K8_SYSCFG:
+       case MSR_K8_TSEG_ADDR:
+       case MSR_K8_TSEG_MASK:
        case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
        case MSR_K8_INT_PENDING_MSG:
@@ -2449,6 +2554,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ENABLE_CAP_VM:
        case KVM_CAP_DISABLE_QUIRKS:
        case KVM_CAP_SET_BOOT_CPU_ID:
+       case KVM_CAP_SPLIT_IRQCHIP:
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
        case KVM_CAP_ASSIGN_DEV_IRQ:
        case KVM_CAP_PCI_2_3:
@@ -2612,7 +2718,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
-                       u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
+                       u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_x86_ops->write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
@@ -2628,7 +2734,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                vcpu->cpu = cpu;
        }
 
-       accumulate_steal_time(vcpu);
        kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
 }
 
@@ -2662,12 +2767,24 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 {
        if (irq->irq >= KVM_NR_INTERRUPTS)
                return -EINVAL;
-       if (irqchip_in_kernel(vcpu->kvm))
+
+       if (!irqchip_in_kernel(vcpu->kvm)) {
+               kvm_queue_interrupt(vcpu, irq->irq, false);
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+               return 0;
+       }
+
+       /*
+        * With in-kernel LAPIC, we only use this to inject EXTINT, so
+        * fail for in-kernel 8259.
+        */
+       if (pic_in_kernel(vcpu->kvm))
                return -ENXIO;
 
-       kvm_queue_interrupt(vcpu, irq->irq, false);
-       kvm_make_request(KVM_REQ_EVENT, vcpu);
+       if (vcpu->arch.pending_external_vector != -1)
+               return -EEXIST;
 
+       vcpu->arch.pending_external_vector = irq->irq;
        return 0;
 }
 
@@ -3176,7 +3293,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_vapic_addr va;
 
                r = -EINVAL;
-               if (!irqchip_in_kernel(vcpu->kvm))
+               if (!lapic_in_kernel(vcpu))
                        goto out;
                r = -EFAULT;
                if (copy_from_user(&va, argp, sizeof va))
@@ -3303,9 +3420,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (user_tsc_khz == 0)
                        user_tsc_khz = tsc_khz;
 
-               kvm_set_tsc_khz(vcpu, user_tsc_khz);
+               if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
+                       r = 0;
 
-               r = 0;
                goto out;
        }
        case KVM_GET_TSC_KHZ: {
@@ -3425,41 +3542,35 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
 
 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 {
-       int r = 0;
-
        mutex_lock(&kvm->arch.vpit->pit_state.lock);
        memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
-       return r;
+       return 0;
 }
 
 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
 {
-       int r = 0;
-
        mutex_lock(&kvm->arch.vpit->pit_state.lock);
        memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
        kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
-       return r;
+       return 0;
 }
 
 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
 {
-       int r = 0;
-
        mutex_lock(&kvm->arch.vpit->pit_state.lock);
        memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
                sizeof(ps->channels));
        ps->flags = kvm->arch.vpit->pit_state.flags;
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
        memset(&ps->reserved, 0, sizeof(ps->reserved));
-       return r;
+       return 0;
 }
 
 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
 {
-       int r = 0, start = 0;
+       int start = 0;
        u32 prev_legacy, cur_legacy;
        mutex_lock(&kvm->arch.vpit->pit_state.lock);
        prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3471,7 +3582,7 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
        kvm->arch.vpit->pit_state.flags = ps->flags;
        kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
-       return r;
+       return 0;
 }
 
 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
@@ -3556,6 +3667,28 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                kvm->arch.disabled_quirks = cap->args[0];
                r = 0;
                break;
+       case KVM_CAP_SPLIT_IRQCHIP: {
+               mutex_lock(&kvm->lock);
+               r = -EINVAL;
+               if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
+                       goto split_irqchip_unlock;
+               r = -EEXIST;
+               if (irqchip_in_kernel(kvm))
+                       goto split_irqchip_unlock;
+               if (atomic_read(&kvm->online_vcpus))
+                       goto split_irqchip_unlock;
+               r = kvm_setup_empty_irq_routing(kvm);
+               if (r)
+                       goto split_irqchip_unlock;
+               /* Pairs with irqchip_in_kernel. */
+               smp_wmb();
+               kvm->arch.irqchip_split = true;
+               kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+               r = 0;
+split_irqchip_unlock:
+               mutex_unlock(&kvm->lock);
+               break;
+       }
        default:
                r = -EINVAL;
                break;
@@ -3669,7 +3802,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                }
 
                r = -ENXIO;
-               if (!irqchip_in_kernel(kvm))
+               if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
                        goto get_irqchip_out;
                r = kvm_vm_ioctl_get_irqchip(kvm, chip);
                if (r)
@@ -3693,7 +3826,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                }
 
                r = -ENXIO;
-               if (!irqchip_in_kernel(kvm))
+               if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
                        goto set_irqchip_out;
                r = kvm_vm_ioctl_set_irqchip(kvm, chip);
                if (r)
@@ -4060,6 +4193,15 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
 }
 
+static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+               unsigned long addr, void *val, unsigned int bytes)
+{
+       struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+       int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
+
+       return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+}
+
 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
                                       gva_t addr, void *val,
                                       unsigned int bytes,
@@ -4795,6 +4937,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .write_gpr           = emulator_write_gpr,
        .read_std            = kvm_read_guest_virt_system,
        .write_std           = kvm_write_guest_virt_system,
+       .read_phys           = kvm_read_guest_phys_system,
        .fetch               = kvm_fetch_guest_virt,
        .read_emulated       = emulator_read_emulated,
        .write_emulated      = emulator_write_emulated,
@@ -5667,7 +5810,7 @@ void kvm_arch_exit(void)
 int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.halt_exits;
-       if (irqchip_in_kernel(vcpu->kvm)) {
+       if (lapic_in_kernel(vcpu)) {
                vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
                return 1;
        } else {
@@ -5774,9 +5917,15 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
  */
 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
 {
-       return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
-               vcpu->run->request_interrupt_window &&
-               kvm_arch_interrupt_allowed(vcpu));
+       if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm))
+               return false;
+
+       if (kvm_cpu_has_interrupt(vcpu))
+               return false;
+
+       return (irqchip_split(vcpu->kvm)
+               ? kvm_apic_accept_pic_intr(vcpu)
+               : kvm_arch_interrupt_allowed(vcpu));
 }
 
 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5787,13 +5936,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
        kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
        kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
-       if (irqchip_in_kernel(vcpu->kvm))
-               kvm_run->ready_for_interrupt_injection = 1;
-       else
+       if (!irqchip_in_kernel(vcpu->kvm))
                kvm_run->ready_for_interrupt_injection =
                        kvm_arch_interrupt_allowed(vcpu) &&
                        !kvm_cpu_has_interrupt(vcpu) &&
                        !kvm_event_needs_reinjection(vcpu);
+       else if (!pic_in_kernel(vcpu->kvm))
+               kvm_run->ready_for_interrupt_injection =
+                       kvm_apic_accept_pic_intr(vcpu) &&
+                       !kvm_cpu_has_interrupt(vcpu);
+       else
+               kvm_run->ready_for_interrupt_injection = 1;
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6144,18 +6297,18 @@ static void process_smi(struct kvm_vcpu *vcpu)
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
-       u64 eoi_exit_bitmap[4];
-       u32 tmr[8];
-
        if (!kvm_apic_hw_enabled(vcpu->arch.apic))
                return;
 
-       memset(eoi_exit_bitmap, 0, 32);
-       memset(tmr, 0, 32);
+       memset(vcpu->arch.eoi_exit_bitmap, 0, 256 / 8);
 
-       kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
-       kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
-       kvm_apic_update_tmr(vcpu, tmr);
+       if (irqchip_split(vcpu->kvm))
+               kvm_scan_ioapic_routes(vcpu, vcpu->arch.eoi_exit_bitmap);
+       else {
+               kvm_x86_ops->sync_pir_to_irr(vcpu);
+               kvm_ioapic_scan_entry(vcpu, vcpu->arch.eoi_exit_bitmap);
+       }
+       kvm_x86_ops->load_eoi_exitmap(vcpu);
 }
 
 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
@@ -6168,7 +6321,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
        struct page *page = NULL;
 
-       if (!irqchip_in_kernel(vcpu->kvm))
+       if (!lapic_in_kernel(vcpu))
                return;
 
        if (!kvm_x86_ops->set_apic_access_page_addr)
@@ -6206,7 +6359,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 {
        int r;
-       bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
+       bool req_int_win = !lapic_in_kernel(vcpu) &&
                vcpu->run->request_interrupt_window;
        bool req_immediate_exit = false;
 
@@ -6258,6 +6411,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_pmu_handle_event(vcpu);
                if (kvm_check_request(KVM_REQ_PMI, vcpu))
                        kvm_pmu_deliver_pmi(vcpu);
+               if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
+                       BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
+                       if (test_bit(vcpu->arch.pending_ioapic_eoi,
+                                    (void *) vcpu->arch.eoi_exit_bitmap)) {
+                               vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
+                               vcpu->run->eoi.vector =
+                                               vcpu->arch.pending_ioapic_eoi;
+                               r = 0;
+                               goto out;
+                       }
+               }
                if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
                        vcpu_scan_ioapic(vcpu);
                if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
@@ -6268,6 +6432,26 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        r = 0;
                        goto out;
                }
+               if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
+                       vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+                       vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
+                       r = 0;
+                       goto out;
+               }
+       }
+
+       /*
+        * KVM_REQ_EVENT is not set when posted interrupts are set by
+        * VT-d hardware, so we have to update RVI unconditionally.
+        */
+       if (kvm_lapic_enabled(vcpu)) {
+               /*
+                * Update architecture specific hints for APIC
+                * virtual interrupt delivery.
+                */
+               if (kvm_x86_ops->hwapic_irr_update)
+                       kvm_x86_ops->hwapic_irr_update(vcpu,
+                               kvm_lapic_find_highest_irr(vcpu));
        }
 
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -6286,13 +6470,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_x86_ops->enable_irq_window(vcpu);
 
                if (kvm_lapic_enabled(vcpu)) {
-                       /*
-                        * Update architecture specific hints for APIC
-                        * virtual interrupt delivery.
-                        */
-                       if (kvm_x86_ops->hwapic_irr_update)
-                               kvm_x86_ops->hwapic_irr_update(vcpu,
-                                       kvm_lapic_find_highest_irr(vcpu));
                        update_cr8_intercept(vcpu);
                        kvm_lapic_sync_to_vapic(vcpu);
                }
@@ -6376,8 +6553,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (hw_breakpoint_active())
                hw_breakpoint_restore();
 
-       vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-                                                          rdtsc());
+       vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
@@ -6428,10 +6604,15 @@ out:
 
 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 {
-       if (!kvm_arch_vcpu_runnable(vcpu)) {
+       if (!kvm_arch_vcpu_runnable(vcpu) &&
+           (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
                srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                kvm_vcpu_block(vcpu);
                vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
+               if (kvm_x86_ops->post_block)
+                       kvm_x86_ops->post_block(vcpu);
+
                if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
                        return 1;
        }
@@ -6454,6 +6635,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted);
+}
+
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
@@ -6462,11 +6649,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
        for (;;) {
-               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-                   !vcpu->arch.apf.halted)
+               if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
-               else
+               } else {
                        r = vcpu_block(kvm, vcpu);
+               }
+
                if (r <= 0)
                        break;
 
@@ -6475,8 +6663,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                        kvm_inject_pending_timer_irqs(vcpu);
 
                if (dm_request_for_irq_injection(vcpu)) {
-                       r = -EINTR;
-                       vcpu->run->exit_reason = KVM_EXIT_INTR;
+                       r = 0;
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
                        ++vcpu->stat.request_irq_exits;
                        break;
                }
@@ -6603,7 +6791,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        /* re-sync apic's tpr */
-       if (!irqchip_in_kernel(vcpu->kvm)) {
+       if (!lapic_in_kernel(vcpu)) {
                if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
                        r = -EINVAL;
                        goto out;
@@ -7276,6 +7464,20 @@ int kvm_arch_hardware_setup(void)
        if (r != 0)
                return r;
 
+       if (kvm_has_tsc_control) {
+               /*
+                * Make sure the user can only configure tsc_khz values that
+                * fit into a signed integer.
+                * A min value is not calculated needed because it will always
+                * be 1 on all machines.
+                */
+               u64 max = min(0x7fffffffULL,
+                             __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
+               kvm_max_guest_tsc_khz = max;
+
+               kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
+       }
+
        kvm_init_msr_list();
        return 0;
 }
@@ -7303,7 +7505,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 
 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
 {
-       return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+       return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu);
 }
 
 struct static_key kvm_no_apic_vcpu __read_mostly;
@@ -7372,6 +7574,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm_async_pf_hash_reset(vcpu);
        kvm_pmu_init(vcpu);
 
+       vcpu->arch.pending_external_vector = -1;
+
        return 0;
 
 fail_free_mce_banks:
@@ -7397,7 +7601,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        kvm_mmu_destroy(vcpu);
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        free_page((unsigned long)vcpu->arch.pio_data);
-       if (!irqchip_in_kernel(vcpu->kvm))
+       if (!lapic_in_kernel(vcpu))
                static_key_slow_dec(&kvm_no_apic_vcpu);
 }
 
@@ -7475,34 +7679,66 @@ void kvm_arch_sync_events(struct kvm *kvm)
        kvm_free_pit(kvm);
 }
 
-int __x86_set_memory_region(struct kvm *kvm,
-                           const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
        int i, r;
+       unsigned long hva;
+       struct kvm_memslots *slots = kvm_memslots(kvm);
+       struct kvm_memory_slot *slot, old;
 
        /* Called with kvm->slots_lock held.  */
-       BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+       if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+               return -EINVAL;
+
+       slot = id_to_memslot(slots, id);
+       if (size) {
+               if (WARN_ON(slot->npages))
+                       return -EEXIST;
+
+               /*
+                * MAP_SHARED to prevent internal slot pages from being moved
+                * by fork()/COW.
+                */
+               hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+                             MAP_SHARED | MAP_ANONYMOUS, 0);
+               if (IS_ERR((void *)hva))
+                       return PTR_ERR((void *)hva);
+       } else {
+               if (!slot->npages)
+                       return 0;
+
+               hva = 0;
+       }
 
+       old = *slot;
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-               struct kvm_userspace_memory_region m = *mem;
+               struct kvm_userspace_memory_region m;
 
-               m.slot |= i << 16;
+               m.slot = id | (i << 16);
+               m.flags = 0;
+               m.guest_phys_addr = gpa;
+               m.userspace_addr = hva;
+               m.memory_size = size;
                r = __kvm_set_memory_region(kvm, &m);
                if (r < 0)
                        return r;
        }
 
+       if (!size) {
+               r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+               WARN_ON(r < 0);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
-int x86_set_memory_region(struct kvm *kvm,
-                         const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
        int r;
 
        mutex_lock(&kvm->slots_lock);
-       r = __x86_set_memory_region(kvm, mem);
+       r = __x86_set_memory_region(kvm, id, gpa, size);
        mutex_unlock(&kvm->slots_lock);
 
        return r;
@@ -7517,16 +7753,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                 * unless the the memory map has changed due to process exit
                 * or fd copying.
                 */
-               struct kvm_userspace_memory_region mem;
-               memset(&mem, 0, sizeof(mem));
-               mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
-
-               mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
-
-               mem.slot = TSS_PRIVATE_MEMSLOT;
-               x86_set_memory_region(kvm, &mem);
+               x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+               x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+               x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
        }
        kvm_iommu_unmap_guest(kvm);
        kfree(kvm->arch.vpic);
@@ -7629,27 +7858,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                const struct kvm_userspace_memory_region *mem,
                                enum kvm_mr_change change)
 {
-       /*
-        * Only private memory slots need to be mapped here since
-        * KVM_SET_MEMORY_REGION ioctl is no longer supported.
-        */
-       if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
-               unsigned long userspace_addr;
-
-               /*
-                * MAP_SHARED to prevent internal slot pages from being moved
-                * by fork()/COW.
-                */
-               userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
-                                        PROT_READ | PROT_WRITE,
-                                        MAP_SHARED | MAP_ANONYMOUS, 0);
-
-               if (IS_ERR((void *)userspace_addr))
-                       return PTR_ERR((void *)userspace_addr);
-
-               memslot->userspace_addr = userspace_addr;
-       }
-
        return 0;
 }
 
@@ -7711,17 +7919,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 {
        int nr_mmu_pages = 0;
 
-       if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
-               int ret;
-
-               ret = vm_munmap(old->userspace_addr,
-                               old->npages * PAGE_SIZE);
-               if (ret < 0)
-                       printk(KERN_WARNING
-                              "kvm_vm_ioctl_set_memory_region: "
-                              "failed to munmap memory\n");
-       }
-
        if (!kvm->arch.n_requested_mmu_pages)
                nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
@@ -7770,19 +7967,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+
+       if (kvm_apic_has_events(vcpu))
+               return true;
+
+       if (vcpu->arch.pv.pv_unhalted)
+               return true;
+
+       if (atomic_read(&vcpu->arch.nmi_queued))
+               return true;
+
+       if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+               return true;
+
+       if (kvm_arch_interrupt_allowed(vcpu) &&
+           kvm_cpu_has_interrupt(vcpu))
+               return true;
+
+       return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
                kvm_x86_ops->check_nested_events(vcpu, false);
 
-       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-               !vcpu->arch.apf.halted)
-               || !list_empty_careful(&vcpu->async_pf.done)
-               || kvm_apic_has_events(vcpu)
-               || vcpu->arch.pv.pv_unhalted
-               || atomic_read(&vcpu->arch.nmi_queued) ||
-               (kvm_arch_interrupt_allowed(vcpu) &&
-                kvm_cpu_has_interrupt(vcpu));
+       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
@@ -8014,7 +8228,59 @@ bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
 }
 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
 
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+{
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       if (kvm_x86_ops->update_pi_irte) {
+               irqfd->producer = prod;
+               return kvm_x86_ops->update_pi_irte(irqfd->kvm,
+                               prod->irq, irqfd->gsi, 1);
+       }
+
+       return -EINVAL;
+}
+
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+{
+       int ret;
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+       if (!kvm_x86_ops->update_pi_irte) {
+               WARN_ON(irqfd->producer != NULL);
+               return;
+       }
+
+       WARN_ON(irqfd->producer != prod);
+       irqfd->producer = NULL;
+
+       /*
+        * When producer of consumer is unregistered, we change back to
+        * remapped mode, so we can re-use the current implementation
+        * when the irq is masked/disabed or the consumer side (KVM
+        * int this case doesn't want to receive the interrupts.
+       */
+       ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
+       if (ret)
+               printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
+                      " fails: %d\n", irqfd->consumer.token, ret);
+}
+
+int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+                                  uint32_t guest_irq, bool set)
+{
+       if (!kvm_x86_ops->update_pi_irte)
+               return -EINVAL;
+
+       return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
+}
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
@@ -8029,3 +8295,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);