KVM: add kvm_arch_sched_in
authorRadim Krčmář <rkrcmar@redhat.com>
Thu, 21 Aug 2014 16:08:05 +0000 (18:08 +0200)
committerChristoffer Dall <christoffer.dall@linaro.org>
Thu, 2 Oct 2014 15:19:10 +0000 (17:19 +0200)
Introduce preempt notifiers for architecture specific code.
Advantage over creating a new notifier in every arch is slightly simpler
code and guaranteed call order with respect to kvm_sched_in.

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit e790d9ef6405633b007339d746b709aed43a928d)
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/kvm/arm.c
arch/powerpc/kvm/powerpc.c
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 0e3d3dc8eea2bb7fc4c3c598e0cea0efff93b0a3..d55786c579bf1eb4cd0ac2fec940c7a798a75a26 100644 (file)
@@ -288,6 +288,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        vcpu->cpu = cpu;
index aacf3e35404fef70abdd550d14e9a1ddd10de7cc..e4c719d49e164e55e5bd694aaf2ed50a747a6740 100644 (file)
@@ -533,6 +533,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
 #ifdef CONFIG_BOOKE
index 9133f869b07011c194f40eac595f0e858b53744a..46392afd043dd5d8c763e40f7a55d56134caad48 100644 (file)
@@ -325,6 +325,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        /* Nothing todo */
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        save_fp_regs(&vcpu->arch.host_fpregs);
index 46a8c74fd431ec76b15dcab00b9492894c5b0040..a348daad61fdc64ea869e63357cce768d4f8e1fd 100644 (file)
@@ -6809,6 +6809,10 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
                static_key_slow_dec(&kvm_no_apic_vcpu);
 }
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
        if (type)
index 75d911ca47bd57607deb63d31a30fc8114b6d812..4c12f314aab2c08651da265b3a43e532ba9ac8e5 100644 (file)
@@ -600,6 +600,8 @@ void kvm_arch_exit(void);
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
+
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
index aa5057e7a1b940ef0e64907a51de4aa9d5a5da18..2659da911d99542ea8fb4ee3eb774dcfdd997146 100644 (file)
@@ -3063,6 +3063,8 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
        if (vcpu->preempted)
                vcpu->preempted = false;
 
+       kvm_arch_sched_in(vcpu, cpu);
+
        kvm_arch_vcpu_load(vcpu, cpu);
 }