KVM: Use cond_resched() directly and remove useless kvm_resched()
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Fri, 13 Dec 2013 06:07:21 +0000 (15:07 +0900)
committerChristoffer Dall <christoffer.dall@linaro.org>
Thu, 2 Oct 2014 15:18:21 +0000 (17:18 +0200)
Since the commit 15ad7146 ("KVM: Use the scheduler preemption notifiers
to make kvm preemptible"), the remaining stuff in this function is a
simple cond_resched() call with an extra need_resched() check which was
there to avoid dropping VCPUs unnecessarily.  Now it is meaningless.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit c08ac06ab3f3cdb8d34376c3a8a5e46a31a62c8f)
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/ia64/kvm/kvm-ia64.c
arch/powerpc/kvm/book3s_hv.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 985bf80c622e6dea757f16e75cb5004210196cd3..53f44bee9ebb2cc1efcb4c46b0549b5cfd9a4a48 100644 (file)
@@ -702,7 +702,7 @@ again:
 out:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        if (r > 0) {
-               kvm_resched(vcpu);
+               cond_resched();
                idx = srcu_read_lock(&vcpu->kvm->srcu);
                goto again;
        }
index 550f5928b394f6cb4c1da71978031a1ce1f96b79..717e5b525f3b1dba54ae687f9a51e6a2a7af4f87 100644 (file)
@@ -1253,7 +1253,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
        kvm_guest_exit();
 
        preempt_enable();
-       kvm_resched(vcpu);
+       cond_resched();
 
        spin_lock(&vc->lock);
        now = get_tb();
index 5276618579d34669779a6061f0fa66610f3532f4..805c8e92cf669167920c282aa3da5c6f11fe9d8a 100644 (file)
@@ -5947,7 +5947,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                }
                if (need_resched()) {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
-                       kvm_resched(vcpu);
+                       cond_resched();
                        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
                }
        }
index 6d24a2671d29899af8b47ee48017878389511237..9c0f8545df73ec8f78d2c4054d5ed5b87242316e 100644 (file)
@@ -569,7 +569,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
-void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 
index 2dd59b957164b9ab24c76700194b57e8f8a83a5b..cb9a865c8e01135e3fc17086cec0cf2bf7f5963e 100644 (file)
@@ -1706,14 +1706,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
 #endif /* !CONFIG_S390 */
 
-void kvm_resched(struct kvm_vcpu *vcpu)
-{
-       if (!need_resched())
-               return;
-       cond_resched();
-}
-EXPORT_SYMBOL_GPL(kvm_resched);
-
 bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
 {
        struct pid *pid;