Merge branch 'kvm-ppc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus...
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Nov 2015 09:06:29 +0000 (10:06 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Nov 2015 09:07:07 +0000 (10:07 +0100)
"Paolo,

I have two fixes for HV KVM which I would like to have included in
v4.4-rc1.  The first one is a fix for a bug identified by Red Hat
which causes occasional guest crashes.  The second one fixes a bug
which causes host stalls and timeouts under certain circumstances when
the host is configured for static 2-way micro-threading mode."

14 files changed:
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/vmx.h
arch/x86/include/uapi/asm/svm.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
include/linux/context_tracking.h
include/linux/kvm_host.h
include/linux/math64.h
kernel/context_tracking.c

index 9265196e877f34d662ea56a8d5993dcb4349ea57..30cfd64295a0075ab9c4727721c0fc8c8b607f46 100644 (file)
@@ -505,6 +505,7 @@ struct kvm_vcpu_arch {
        u32 virtual_tsc_mult;
        u32 virtual_tsc_khz;
        s64 ia32_tsc_adjust_msr;
+       u64 tsc_scaling_ratio;
 
        atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
        unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -777,7 +778,7 @@ struct kvm_x86_ops {
        void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
        void (*vcpu_put)(struct kvm_vcpu *vcpu);
 
-       void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
+       void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
        int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
        int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
        u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
@@ -844,7 +845,7 @@ struct kvm_x86_ops {
        int (*get_lpage_level)(void);
        bool (*rdtscp_supported)(void);
        bool (*invpcid_supported)(void);
-       void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
+       void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
 
        void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 
@@ -852,11 +853,9 @@ struct kvm_x86_ops {
 
        bool (*has_wbinvd_exit)(void);
 
-       void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
        u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
        void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
-       u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
        u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
 
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
@@ -923,17 +922,6 @@ struct kvm_arch_async_pf {
 
 extern struct kvm_x86_ops *kvm_x86_ops;
 
-static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
-                                          s64 adjustment)
-{
-       kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
-}
-
-static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
-{
-       kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
-}
-
 int kvm_mmu_module_init(void);
 void kvm_mmu_module_exit(void);
 
@@ -986,10 +974,12 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
 
 /* control of guest tsc rate supported? */
 extern bool kvm_has_tsc_control;
-/* minimum supported tsc_khz for guests */
-extern u32  kvm_min_guest_tsc_khz;
 /* maximum supported tsc_khz for guests */
 extern u32  kvm_max_guest_tsc_khz;
+/* number of bits of the fractional part of the TSC scaling ratio */
+extern u8   kvm_tsc_scaling_ratio_frac_bits;
+/* maximum allowed value of TSC scaling ratio */
+extern u64  kvm_max_tsc_scaling_ratio;
 
 enum emulation_result {
        EMULATE_DONE,         /* no further processing */
@@ -1235,6 +1225,9 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
+
 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
index aa336ff3e03ec9c0680f7599bb642f807c64b81b..14c63c7e8337a3d2bdfc98a5b4c14d2892de5580 100644 (file)
@@ -73,6 +73,7 @@
 #define SECONDARY_EXEC_ENABLE_PML               0x00020000
 #define SECONDARY_EXEC_XSAVES                  0x00100000
 #define SECONDARY_EXEC_PCOMMIT                 0x00200000
+#define SECONDARY_EXEC_TSC_SCALING              0x02000000
 
 #define PIN_BASED_EXT_INTR_MASK                 0x00000001
 #define PIN_BASED_NMI_EXITING                   0x00000008
@@ -167,6 +168,8 @@ enum vmcs_field {
        VMWRITE_BITMAP                  = 0x00002028,
        XSS_EXIT_BITMAP                 = 0x0000202C,
        XSS_EXIT_BITMAP_HIGH            = 0x0000202D,
+       TSC_MULTIPLIER                  = 0x00002032,
+       TSC_MULTIPLIER_HIGH             = 0x00002033,
        GUEST_PHYSICAL_ADDRESS          = 0x00002400,
        GUEST_PHYSICAL_ADDRESS_HIGH     = 0x00002401,
        VMCS_LINK_POINTER               = 0x00002800,
index b5d7640abc5d6172838a99569d144f8acc96763a..8a4add8e463932445ba071903378927f76912cfa 100644 (file)
        { SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
        { SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
        { SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
+       { SVM_EXIT_EXCP_BASE + AC_VECTOR,       "AC excp" }, \
        { SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
        { SVM_EXIT_INTR,        "interrupt" }, \
        { SVM_EXIT_NMI,         "nmi" }, \
index ecd4ea1d28a8ce01b5b693f5b7d8a953e50131a1..4d30b865be30641f4964a3279939020e9f1a057a 100644 (file)
@@ -1250,7 +1250,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
 
        tsc_deadline = apic->lapic_timer.expired_tscdeadline;
        apic->lapic_timer.expired_tscdeadline = 0;
-       guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
+       guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
        trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
        /* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
@@ -1318,7 +1318,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
                local_irq_save(flags);
 
                now = apic->lapic_timer.timer.base->get_time();
-               guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
+               guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
                if (likely(tscdeadline > guest_tsc)) {
                        ns = (tscdeadline - guest_tsc) * 1000000ULL;
                        do_div(ns, this_tsc_khz);
index 7d85bcae3332f0eb04c0fb9314a6c401de271e18..e7c2c1428a691676a6a1fdadee044ab45124acc2 100644 (file)
@@ -3359,7 +3359,7 @@ exit:
        return reserved;
 }
 
-int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 {
        u64 spte;
        bool reserved;
@@ -3368,7 +3368,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
                return RET_MMIO_PF_EMULATE;
 
        reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
-       if (unlikely(reserved))
+       if (WARN_ON(reserved))
                return RET_MMIO_PF_BUG;
 
        if (is_mmio_spte(spte)) {
@@ -3392,17 +3392,7 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
         */
        return RET_MMIO_PF_RETRY;
 }
-EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
-
-static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
-                                 u32 error_code, bool direct)
-{
-       int ret;
-
-       ret = handle_mmio_page_fault_common(vcpu, addr, direct);
-       WARN_ON(ret == RET_MMIO_PF_BUG);
-       return ret;
-}
+EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                                u32 error_code, bool prefault)
@@ -3413,7 +3403,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
        pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
 
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, gva, error_code, true);
+               r = handle_mmio_page_fault(vcpu, gva, true);
 
                if (likely(r != RET_MMIO_PF_INVALID))
                        return r;
@@ -3503,7 +3493,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
        MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, gpa, error_code, true);
+               r = handle_mmio_page_fault(vcpu, gpa, true);
 
                if (likely(r != RET_MMIO_PF_INVALID))
                        return r;
index e4202e41d53541f06a468e0b72beac483e2e7b8f..55ffb7b0f95e9f82a97e8fd4ab7caf4f720fc335 100644 (file)
@@ -56,13 +56,13 @@ void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
 /*
- * Return values of handle_mmio_page_fault_common:
+ * Return values of handle_mmio_page_fault:
  * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
  *                     directly.
  * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
  *                     fault path update the mmio spte.
  * RET_MMIO_PF_RETRY: let CPU fault again on the address.
- * RET_MMIO_PF_BUG: bug is detected.
+ * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
  */
 enum {
        RET_MMIO_PF_EMULATE = 1,
@@ -71,7 +71,7 @@ enum {
        RET_MMIO_PF_BUG = -1
 };
 
-int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
+int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
 
index b41faa91a6f97ef4342b1c671160ddc5311b8502..3058a22a658d25db2bb8ecf4a37657bc1f873c1d 100644 (file)
@@ -705,8 +705,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
-               r = handle_mmio_page_fault(vcpu, addr, error_code,
-                                             mmu_is_nested(vcpu));
+               r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
                if (likely(r != RET_MMIO_PF_INVALID))
                        return r;
 
index f2c8e4917688a34a9dfb3994d776e8efab512fc2..83a1c643f9a50fd3fa0ffc041c40d9f4d25aa5b5 100644 (file)
@@ -158,8 +158,6 @@ struct vcpu_svm {
        unsigned long int3_rip;
        u32 apf_reason;
 
-       u64  tsc_ratio;
-
        /* cached guest cpuid flags for faster access */
        bool nrips_enabled      : 1;
 };
@@ -214,7 +212,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
 static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
-static u64 __scale_tsc(u64 ratio, u64 tsc);
 
 enum {
        VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
@@ -894,20 +891,9 @@ static __init int svm_hardware_setup(void)
                kvm_enable_efer_bits(EFER_FFXSR);
 
        if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
-               u64 max;
-
                kvm_has_tsc_control = true;
-
-               /*
-                * Make sure the user can only configure tsc_khz values that
-                * fit into a signed integer.
-                * A min value is not calculated needed because it will always
-                * be 1 on all machines and a value of 0 is used to disable
-                * tsc-scaling for the vcpu.
-                */
-               max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
-
-               kvm_max_guest_tsc_khz = max;
+               kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
+               kvm_tsc_scaling_ratio_frac_bits = 32;
        }
 
        if (nested) {
@@ -971,68 +957,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
        seg->base = 0;
 }
 
-static u64 __scale_tsc(u64 ratio, u64 tsc)
-{
-       u64 mult, frac, _tsc;
-
-       mult  = ratio >> 32;
-       frac  = ratio & ((1ULL << 32) - 1);
-
-       _tsc  = tsc;
-       _tsc *= mult;
-       _tsc += (tsc >> 32) * frac;
-       _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
-
-       return _tsc;
-}
-
-static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-       u64 _tsc = tsc;
-
-       if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
-               _tsc = __scale_tsc(svm->tsc_ratio, tsc);
-
-       return _tsc;
-}
-
-static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-       u64 ratio;
-       u64 khz;
-
-       /* Guest TSC same frequency as host TSC? */
-       if (!scale) {
-               svm->tsc_ratio = TSC_RATIO_DEFAULT;
-               return;
-       }
-
-       /* TSC scaling supported? */
-       if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
-               if (user_tsc_khz > tsc_khz) {
-                       vcpu->arch.tsc_catchup = 1;
-                       vcpu->arch.tsc_always_catchup = 1;
-               } else
-                       WARN(1, "user requested TSC rate below hardware speed\n");
-               return;
-       }
-
-       khz = user_tsc_khz;
-
-       /* TSC scaling required  - calculate ratio */
-       ratio = khz << 32;
-       do_div(ratio, tsc_khz);
-
-       if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
-               WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
-                               user_tsc_khz);
-               return;
-       }
-       svm->tsc_ratio             = ratio;
-}
-
 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1059,16 +983,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
+static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (host) {
-               if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
-                       WARN_ON(adjustment < 0);
-               adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
-       }
-
        svm->vmcb->control.tsc_offset += adjustment;
        if (is_guest_mode(vcpu))
                svm->nested.hsave->control.tsc_offset += adjustment;
@@ -1080,15 +998,6 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
-{
-       u64 tsc;
-
-       tsc = svm_scale_tsc(vcpu, rdtsc());
-
-       return target_tsc - tsc;
-}
-
 static void init_vmcb(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1110,6 +1019,8 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_exception_intercept(svm, PF_VECTOR);
        set_exception_intercept(svm, UD_VECTOR);
        set_exception_intercept(svm, MC_VECTOR);
+       set_exception_intercept(svm, AC_VECTOR);
+       set_exception_intercept(svm, DB_VECTOR);
 
        set_intercept(svm, INTERCEPT_INTR);
        set_intercept(svm, INTERCEPT_NMI);
@@ -1235,8 +1146,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
                goto out;
        }
 
-       svm->tsc_ratio = TSC_RATIO_DEFAULT;
-
        err = kvm_vcpu_init(&svm->vcpu, kvm, id);
        if (err)
                goto free_svm;
@@ -1322,10 +1231,12 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
                rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 
-       if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
-           svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
-               __this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
-               wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
+       if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
+               u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
+               if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
+                       __this_cpu_write(current_tsc_ratio, tsc_ratio);
+                       wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
+               }
        }
 }
 
@@ -1644,20 +1555,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
        mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
+static void update_bp_intercept(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       clr_exception_intercept(svm, DB_VECTOR);
        clr_exception_intercept(svm, BP_VECTOR);
 
-       if (svm->nmi_singlestep)
-               set_exception_intercept(svm, DB_VECTOR);
-
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
-               if (vcpu->guest_debug &
-                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-                       set_exception_intercept(svm, DB_VECTOR);
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        set_exception_intercept(svm, BP_VECTOR);
        } else
@@ -1763,7 +1667,6 @@ static int db_interception(struct vcpu_svm *svm)
                if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
                        svm->vmcb->save.rflags &=
                                ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
-               update_db_bp_intercept(&svm->vcpu);
        }
 
        if (svm->vcpu.guest_debug &
@@ -1798,6 +1701,12 @@ static int ud_interception(struct vcpu_svm *svm)
        return 1;
 }
 
+static int ac_interception(struct vcpu_svm *svm)
+{
+       kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
+       return 1;
+}
+
 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3075,8 +2984,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 {
        struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
-       return vmcb->control.tsc_offset +
-               svm_scale_tsc(vcpu, host_tsc);
+       return vmcb->control.tsc_offset + host_tsc;
 }
 
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3086,7 +2994,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        switch (msr_info->index) {
        case MSR_IA32_TSC: {
                msr_info->data = svm->vmcb->control.tsc_offset +
-                       svm_scale_tsc(vcpu, rdtsc());
+                       kvm_scale_tsc(vcpu, rdtsc());
 
                break;
        }
@@ -3362,6 +3270,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
        [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
        [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
+       [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
        [SVM_EXIT_INTR]                         = intr_interception,
        [SVM_EXIT_NMI]                          = nmi_interception,
        [SVM_EXIT_SMI]                          = nop_on_interception,
@@ -3745,7 +3654,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
         */
        svm->nmi_singlestep = true;
        svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
-       update_db_bp_intercept(vcpu);
 }
 
 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -4371,7 +4279,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .vcpu_load = svm_vcpu_load,
        .vcpu_put = svm_vcpu_put,
 
-       .update_db_bp_intercept = update_db_bp_intercept,
+       .update_bp_intercept = update_bp_intercept,
        .get_msr = svm_get_msr,
        .set_msr = svm_set_msr,
        .get_segment_base = svm_get_segment_base,
@@ -4443,11 +4351,9 @@ static struct kvm_x86_ops svm_x86_ops = {
 
        .has_wbinvd_exit = svm_has_wbinvd_exit,
 
-       .set_tsc_khz = svm_set_tsc_khz,
        .read_tsc_offset = svm_read_tsc_offset,
        .write_tsc_offset = svm_write_tsc_offset,
-       .adjust_tsc_offset = svm_adjust_tsc_offset,
-       .compute_tsc_offset = svm_compute_tsc_offset,
+       .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
        .read_l1_tsc = svm_read_l1_tsc,
 
        .set_tdp_cr3 = set_tdp_cr3,
index 5eb56ed77c1fdea01652acf0da476761cf7a9aaa..87acc5221740a588d256f9f0ed4059d366165872 100644 (file)
@@ -107,6 +107,8 @@ static u64 __read_mostly host_xss;
 static bool __read_mostly enable_pml = 1;
 module_param_named(pml, enable_pml, bool, S_IRUGO);
 
+#define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
+
 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
 #define KVM_VM_CR0_ALWAYS_ON                                           \
@@ -1172,6 +1174,12 @@ static inline bool cpu_has_vmx_pml(void)
        return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
 }
 
+static inline bool cpu_has_vmx_tsc_scaling(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_TSC_SCALING;
+}
+
 static inline bool report_flexpriority(void)
 {
        return flexpriority_enabled;
@@ -1631,7 +1639,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
        u32 eb;
 
        eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
-            (1u << NM_VECTOR) | (1u << DB_VECTOR);
+            (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
        if ((vcpu->guest_debug &
             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
            (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
@@ -2053,6 +2061,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+               /* Setup TSC multiplier */
+               if (cpu_has_vmx_tsc_scaling())
+                       vmcs_write64(TSC_MULTIPLIER,
+                                    vcpu->arch.tsc_scaling_ratio);
+
                vmx->loaded_vmcs->cpu = cpu;
        }
 
@@ -2357,15 +2371,16 @@ static void setup_msrs(struct vcpu_vmx *vmx)
 
 /*
  * reads and returns guest's timestamp counter "register"
- * guest_tsc = host_tsc + tsc_offset    -- 21.3
+ * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
+ * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
  */
-static u64 guest_read_tsc(void)
+static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
 {
        u64 host_tsc, tsc_offset;
 
        host_tsc = rdtsc();
        tsc_offset = vmcs_read64(TSC_OFFSET);
-       return host_tsc + tsc_offset;
+       return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
 }
 
 /*
@@ -2382,22 +2397,6 @@ static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
        return host_tsc + tsc_offset;
 }
 
-/*
- * Engage any workarounds for mis-matched TSC rates.  Currently limited to
- * software catchup for faster rates on slower CPUs.
- */
-static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
-{
-       if (!scale)
-               return;
-
-       if (user_tsc_khz > tsc_khz) {
-               vcpu->arch.tsc_catchup = 1;
-               vcpu->arch.tsc_always_catchup = 1;
-       } else
-               WARN(1, "user requested TSC rate below hardware speed\n");
-}
-
 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
 {
        return vmcs_read64(TSC_OFFSET);
@@ -2429,7 +2428,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        }
 }
 
-static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
+static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
 {
        u64 offset = vmcs_read64(TSC_OFFSET);
 
@@ -2442,11 +2441,6 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
                                           offset + adjustment);
 }
 
-static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
-{
-       return target_tsc - rdtsc();
-}
-
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -2778,7 +2772,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_info);
        case MSR_IA32_TSC:
-               msr_info->data = guest_read_tsc();
+               msr_info->data = guest_read_tsc(vcpu);
                break;
        case MSR_IA32_SYSENTER_CS:
                msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
@@ -3154,7 +3148,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                        SECONDARY_EXEC_SHADOW_VMCS |
                        SECONDARY_EXEC_XSAVES |
                        SECONDARY_EXEC_ENABLE_PML |
-                       SECONDARY_EXEC_PCOMMIT;
+                       SECONDARY_EXEC_PCOMMIT |
+                       SECONDARY_EXEC_TSC_SCALING;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
@@ -5266,6 +5261,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return handle_rmode_exception(vcpu, ex_no, error_code);
 
        switch (ex_no) {
+       case AC_VECTOR:
+               kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
+               return 1;
        case DB_VECTOR:
                dr6 = vmcs_readl(EXIT_QUALIFICATION);
                if (!(vcpu->guest_debug &
@@ -5908,7 +5906,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
                return 1;
        }
 
-       ret = handle_mmio_page_fault_common(vcpu, gpa, true);
+       ret = handle_mmio_page_fault(vcpu, gpa, true);
        if (likely(ret == RET_MMIO_PF_EMULATE))
                return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
                                              EMULATE_DONE;
@@ -6199,6 +6197,12 @@ static __init int hardware_setup(void)
        if (!cpu_has_vmx_apicv())
                enable_apicv = 0;
 
+       if (cpu_has_vmx_tsc_scaling()) {
+               kvm_has_tsc_control = true;
+               kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
+               kvm_tsc_scaling_ratio_frac_bits = 48;
+       }
+
        if (enable_apicv)
                kvm_x86_ops->update_cr8_intercept = NULL;
        else {
@@ -8008,6 +8012,9 @@ static void dump_vmcs(void)
               vmcs_read32(IDT_VECTORING_INFO_FIELD),
               vmcs_read32(IDT_VECTORING_ERROR_CODE));
        pr_err("TSC Offset = 0x%016lx\n", vmcs_readl(TSC_OFFSET));
+       if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
+               pr_err("TSC Multiplier = 0x%016lx\n",
+                      vmcs_readl(TSC_MULTIPLIER));
        if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
                pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
        if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
@@ -10752,7 +10759,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .vcpu_load = vmx_vcpu_load,
        .vcpu_put = vmx_vcpu_put,
 
-       .update_db_bp_intercept = update_exception_bitmap,
+       .update_bp_intercept = update_exception_bitmap,
        .get_msr = vmx_get_msr,
        .set_msr = vmx_set_msr,
        .get_segment_base = vmx_get_segment_base,
@@ -10826,11 +10833,9 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
-       .set_tsc_khz = vmx_set_tsc_khz,
        .read_tsc_offset = vmx_read_tsc_offset,
        .write_tsc_offset = vmx_write_tsc_offset,
-       .adjust_tsc_offset = vmx_adjust_tsc_offset,
-       .compute_tsc_offset = vmx_compute_tsc_offset,
+       .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
        .read_l1_tsc = vmx_read_l1_tsc,
 
        .set_tdp_cr3 = vmx_set_cr3,
index 30723a4122cd6a0583a64f23605ba22779e3d638..aba7f95d7a64c981fb6f88aade5edcac57286117 100644 (file)
@@ -93,10 +93,10 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 
-struct kvm_x86_ops *kvm_x86_ops;
+struct kvm_x86_ops *kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
 
-static bool ignore_msrs = 0;
+static bool __read_mostly ignore_msrs = 0;
 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
 unsigned int min_timer_period_us = 500;
@@ -105,20 +105,25 @@ module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
 static bool __read_mostly kvmclock_periodic_sync = true;
 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
 
-bool kvm_has_tsc_control;
+bool __read_mostly kvm_has_tsc_control;
 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
-u32  kvm_max_guest_tsc_khz;
+u32  __read_mostly kvm_max_guest_tsc_khz;
 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
+u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
+EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
+u64  __read_mostly kvm_max_tsc_scaling_ratio;
+EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
+static u64 __read_mostly kvm_default_tsc_scaling_ratio;
 
 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
-static u32 tsc_tolerance_ppm = 250;
+static u32 __read_mostly tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int lapic_timer_advance_ns = 0;
+unsigned int __read_mostly lapic_timer_advance_ns = 0;
 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
 
-static bool backwards_tsc_observed = false;
+static bool __read_mostly backwards_tsc_observed = false;
 
 #define KVM_NR_SHARED_MSRS 16
 
@@ -1248,14 +1253,53 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
        return v;
 }
 
-static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
+static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
+{
+       u64 ratio;
+
+       /* Guest TSC same frequency as host TSC? */
+       if (!scale) {
+               vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+               return 0;
+       }
+
+       /* TSC scaling supported? */
+       if (!kvm_has_tsc_control) {
+               if (user_tsc_khz > tsc_khz) {
+                       vcpu->arch.tsc_catchup = 1;
+                       vcpu->arch.tsc_always_catchup = 1;
+                       return 0;
+               } else {
+                       WARN(1, "user requested TSC rate below hardware speed\n");
+                       return -1;
+               }
+       }
+
+       /* TSC scaling required  - calculate ratio */
+       ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
+                               user_tsc_khz, tsc_khz);
+
+       if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
+               WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
+                         user_tsc_khz);
+               return -1;
+       }
+
+       vcpu->arch.tsc_scaling_ratio = ratio;
+       return 0;
+}
+
+static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
 {
        u32 thresh_lo, thresh_hi;
        int use_scaling = 0;
 
        /* tsc_khz can be zero if TSC calibration fails */
-       if (this_tsc_khz == 0)
-               return;
+       if (this_tsc_khz == 0) {
+               /* set tsc_scaling_ratio to a safe value */
+               vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+               return -1;
+       }
 
        /* Compute a scale to convert nanoseconds in TSC cycles */
        kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
@@ -1275,7 +1319,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
                pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
                use_scaling = 1;
        }
-       kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
+       return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
 }
 
 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
@@ -1321,6 +1365,48 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
        vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
 }
 
+/*
+ * Multiply tsc by a fixed point number represented by ratio.
+ *
+ * The most significant 64-N bits (mult) of ratio represent the
+ * integral part of the fixed point number; the remaining N bits
+ * (frac) represent the fractional part, ie. ratio represents a fixed
+ * point number (mult + frac * 2^(-N)).
+ *
+ * N equals to kvm_tsc_scaling_ratio_frac_bits.
+ */
+static inline u64 __scale_tsc(u64 ratio, u64 tsc)
+{
+       return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
+}
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+{
+       u64 _tsc = tsc;
+       u64 ratio = vcpu->arch.tsc_scaling_ratio;
+
+       if (ratio != kvm_default_tsc_scaling_ratio)
+               _tsc = __scale_tsc(ratio, tsc);
+
+       return _tsc;
+}
+EXPORT_SYMBOL_GPL(kvm_scale_tsc);
+
+static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+       u64 tsc;
+
+       tsc = kvm_scale_tsc(vcpu, rdtsc());
+
+       return target_tsc - tsc;
+}
+
+u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+{
+       return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
+}
+EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1332,7 +1418,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u64 data = msr->data;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+       offset = kvm_compute_tsc_offset(vcpu, data);
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -1389,7 +1475,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
                } else {
                        u64 delta = nsec_to_cycles(vcpu, elapsed);
                        data += delta;
-                       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+                       offset = kvm_compute_tsc_offset(vcpu, data);
                        pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
                }
                matched = true;
@@ -1446,6 +1532,20 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+                                          s64 adjustment)
+{
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+       if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+               WARN_ON(adjustment < 0);
+       adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
 #ifdef CONFIG_X86_64
 
 static cycle_t read_tsc(void)
@@ -1607,7 +1707,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
-       unsigned long flags, this_tsc_khz;
+       unsigned long flags, this_tsc_khz, tgt_tsc_khz;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct kvm_arch *ka = &v->kvm->arch;
        s64 kernel_ns;
@@ -1644,7 +1744,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                kernel_ns = get_kernel_ns();
        }
 
-       tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
+       tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
 
        /*
         * We may have to catch up the TSC to match elapsed wall clock
@@ -1670,7 +1770,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                return 0;
 
        if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-               kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+               tgt_tsc_khz = kvm_has_tsc_control ?
+                       vcpu->virtual_tsc_khz : this_tsc_khz;
+               kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz,
                                   &vcpu->hv_clock.tsc_shift,
                                   &vcpu->hv_clock.tsc_to_system_mul);
                vcpu->hw_tsc_khz = this_tsc_khz;
@@ -2616,7 +2718,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
-                       u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
+                       u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_x86_ops->write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
@@ -3318,9 +3420,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (user_tsc_khz == 0)
                        user_tsc_khz = tsc_khz;
 
-               kvm_set_tsc_khz(vcpu, user_tsc_khz);
+               if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
+                       r = 0;
 
-               r = 0;
                goto out;
        }
        case KVM_GET_TSC_KHZ: {
@@ -6451,8 +6553,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (hw_breakpoint_active())
                hw_breakpoint_restore();
 
-       vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-                                                          rdtsc());
+       vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
@@ -7014,7 +7115,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
         */
        kvm_set_rflags(vcpu, rflags);
 
-       kvm_x86_ops->update_db_bp_intercept(vcpu);
+       kvm_x86_ops->update_bp_intercept(vcpu);
 
        r = 0;
 
@@ -7363,6 +7464,20 @@ int kvm_arch_hardware_setup(void)
        if (r != 0)
                return r;
 
+       if (kvm_has_tsc_control) {
+               /*
+                * Make sure the user can only configure tsc_khz values that
+                * fit into a signed integer.
+                * A min value is not calculated needed because it will always
+                * be 1 on all machines.
+                */
+               u64 max = min(0x7fffffffULL,
+                             __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
+               kvm_max_guest_tsc_khz = max;
+
+               kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
+       }
+
        kvm_init_msr_list();
        return 0;
 }
index 008fc67d0d963ea811feb5cdfcdf1c750f407864..68b575afe5f5909f78b88f88c4dcbd086ef19720 100644 (file)
 #ifdef CONFIG_CONTEXT_TRACKING
 extern void context_tracking_cpu_set(int cpu);
 
+/* Called with interrupts disabled.  */
+extern void __context_tracking_enter(enum ctx_state state);
+extern void __context_tracking_exit(enum ctx_state state);
+
 extern void context_tracking_enter(enum ctx_state state);
 extern void context_tracking_exit(enum ctx_state state);
 extern void context_tracking_user_enter(void);
@@ -18,13 +22,13 @@ extern void context_tracking_user_exit(void);
 static inline void user_enter(void)
 {
        if (context_tracking_is_enabled())
-               context_tracking_user_enter();
+               context_tracking_enter(CONTEXT_USER);
 
 }
 static inline void user_exit(void)
 {
        if (context_tracking_is_enabled())
-               context_tracking_user_exit();
+               context_tracking_exit(CONTEXT_USER);
 }
 
 static inline enum ctx_state exception_enter(void)
@@ -88,13 +92,13 @@ static inline void guest_enter(void)
                current->flags |= PF_VCPU;
 
        if (context_tracking_is_enabled())
-               context_tracking_enter(CONTEXT_GUEST);
+               __context_tracking_enter(CONTEXT_GUEST);
 }
 
 static inline void guest_exit(void)
 {
        if (context_tracking_is_enabled())
-               context_tracking_exit(CONTEXT_GUEST);
+               __context_tracking_exit(CONTEXT_GUEST);
 
        if (vtime_accounting_enabled())
                vtime_guest_exit(current);
index 242a6d2b53ff2713b21496072289bbb576754fb6..5706a2108f0a67826ed644dce761edd21a6a05c0 100644 (file)
@@ -1183,4 +1183,5 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
                                  uint32_t guest_irq, bool set);
 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+
 #endif
index c45c089bfdaca9a91f32832102ff32291444884f..6e8b5b270ffeada870b3cdcd638530f2479b84ee 100644 (file)
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 }
 #endif /* mul_u64_u32_shr */
 
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+       return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
 #else
 
 #ifndef mul_u64_u32_shr
@@ -161,6 +168,79 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
 }
 #endif /* mul_u64_u32_shr */
 
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
+{
+       union {
+               u64 ll;
+               struct {
+#ifdef __BIG_ENDIAN
+                       u32 high, low;
+#else
+                       u32 low, high;
+#endif
+               } l;
+       } rl, rm, rn, rh, a0, b0;
+       u64 c;
+
+       a0.ll = a;
+       b0.ll = b;
+
+       rl.ll = (u64)a0.l.low * b0.l.low;
+       rm.ll = (u64)a0.l.low * b0.l.high;
+       rn.ll = (u64)a0.l.high * b0.l.low;
+       rh.ll = (u64)a0.l.high * b0.l.high;
+
+       /*
+        * Each of these lines computes a 64-bit intermediate result into "c",
+        * starting at bits 32-95.  The low 32-bits go into the result of the
+        * multiplication, the high 32-bits are carried into the next step.
+        */
+       rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
+       rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
+       rh.l.high = (c >> 32) + rh.l.high;
+
+       /*
+        * The 128-bit result of the multiplication is in rl.ll and rh.ll,
+        * shift it right and throw away the high part of the result.
+        */
+       if (shift == 0)
+               return rl.ll;
+       if (shift < 64)
+               return (rl.ll >> shift) | (rh.ll << (64 - shift));
+       return rh.ll >> (shift & 63);
+}
+#endif /* mul_u64_u64_shr */
+
 #endif
 
+#ifndef mul_u64_u32_div
+static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
+{
+       union {
+               u64 ll;
+               struct {
+#ifdef __BIG_ENDIAN
+                       u32 high, low;
+#else
+                       u32 low, high;
+#endif
+               } l;
+       } u, rl, rh;
+
+       u.ll = a;
+       rl.ll = (u64)u.l.low * mul;
+       rh.ll = (u64)u.l.high * mul + rl.l.high;
+
+       /* Bits 32-63 of the result will be in rh.l.low. */
+       rl.l.high = do_div(rh.ll, divisor);
+
+       /* Bits 0-31 of the result will be in rl.l.low. */
+       do_div(rl.ll, divisor);
+
+       rl.l.high = rh.l.low;
+       return rl.ll;
+}
+#endif /* mul_u64_u32_div */
+
 #endif /* _LINUX_MATH64_H */
index 0a495ab35bc72b55d8eab13f0922e6fdb8099d9a..d8560ee3bab788c15be6d1be8eb610473fc5c4b5 100644 (file)
@@ -58,36 +58,13 @@ static void context_tracking_recursion_exit(void)
  * instructions to execute won't use any RCU read side critical section
  * because this function sets RCU in extended quiescent state.
  */
-void context_tracking_enter(enum ctx_state state)
+void __context_tracking_enter(enum ctx_state state)
 {
-       unsigned long flags;
-
-       /*
-        * Repeat the user_enter() check here because some archs may be calling
-        * this from asm and if no CPU needs context tracking, they shouldn't
-        * go further. Repeat the check here until they support the inline static
-        * key check.
-        */
-       if (!context_tracking_is_enabled())
-               return;
-
-       /*
-        * Some contexts may involve an exception occuring in an irq,
-        * leading to that nesting:
-        * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
-        * This would mess up the dyntick_nesting count though. And rcu_irq_*()
-        * helpers are enough to protect RCU uses inside the exception. So
-        * just return immediately if we detect we are in an IRQ.
-        */
-       if (in_interrupt())
-               return;
-
        /* Kernel threads aren't supposed to go to userspace */
        WARN_ON_ONCE(!current->mm);
 
-       local_irq_save(flags);
        if (!context_tracking_recursion_enter())
-               goto out_irq_restore;
+               return;
 
        if ( __this_cpu_read(context_tracking.state) != state) {
                if (__this_cpu_read(context_tracking.active)) {
@@ -120,7 +97,27 @@ void context_tracking_enter(enum ctx_state state)
                __this_cpu_write(context_tracking.state, state);
        }
        context_tracking_recursion_exit();
-out_irq_restore:
+}
+NOKPROBE_SYMBOL(__context_tracking_enter);
+EXPORT_SYMBOL_GPL(__context_tracking_enter);
+
+void context_tracking_enter(enum ctx_state state)
+{
+       unsigned long flags;
+
+       /*
+        * Some contexts may involve an exception occuring in an irq,
+        * leading to that nesting:
+        * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+        * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+        * helpers are enough to protect RCU uses inside the exception. So
+        * just return immediately if we detect we are in an IRQ.
+        */
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+       __context_tracking_enter(state);
        local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(context_tracking_enter);
@@ -128,7 +125,7 @@ EXPORT_SYMBOL_GPL(context_tracking_enter);
 
 void context_tracking_user_enter(void)
 {
-       context_tracking_enter(CONTEXT_USER);
+       user_enter();
 }
 NOKPROBE_SYMBOL(context_tracking_user_enter);
 
@@ -144,19 +141,10 @@ NOKPROBE_SYMBOL(context_tracking_user_enter);
  * This call supports re-entrancy. This way it can be called from any exception
  * handler without needing to know if we came from userspace or not.
  */
-void context_tracking_exit(enum ctx_state state)
+void __context_tracking_exit(enum ctx_state state)
 {
-       unsigned long flags;
-
-       if (!context_tracking_is_enabled())
-               return;
-
-       if (in_interrupt())
-               return;
-
-       local_irq_save(flags);
        if (!context_tracking_recursion_enter())
-               goto out_irq_restore;
+               return;
 
        if (__this_cpu_read(context_tracking.state) == state) {
                if (__this_cpu_read(context_tracking.active)) {
@@ -173,7 +161,19 @@ void context_tracking_exit(enum ctx_state state)
                __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
        }
        context_tracking_recursion_exit();
-out_irq_restore:
+}
+NOKPROBE_SYMBOL(__context_tracking_exit);
+EXPORT_SYMBOL_GPL(__context_tracking_exit);
+
+void context_tracking_exit(enum ctx_state state)
+{
+       unsigned long flags;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+       __context_tracking_exit(state);
        local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(context_tracking_exit);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(context_tracking_exit);
 
 void context_tracking_user_exit(void)
 {
-       context_tracking_exit(CONTEXT_USER);
+       user_exit();
 }
 NOKPROBE_SYMBOL(context_tracking_user_exit);