diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c446435..9799cab 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -275,7 +275,6 @@ struct kvm { #endif struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; atomic_t online_vcpus; - int last_boosted_vcpu; struct list_head vm_list; struct mutex lock; struct kvm_io_bus *buses[KVM_NR_BUSES]; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7e14068..6bab9f7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -1572,31 +1573,32 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; - int last_boosted_vcpu = me->kvm->last_boosted_vcpu; + int vcpu_to_boost; int yielded = 0; int pass; int i; + int num_vcpus = atomic_read(&kvm->online_vcpus); + vcpu_to_boost = (random32() % num_vcpus); /* * We boost the priority of a VCPU that is runnable but not * currently running, because it got preempted by something * else and called schedule in __vcpu_run. Hopefully that * VCPU is holding the lock that we need and will release it. - * We approximate round-robin by starting at the last boosted VCPU. + * We approximate round-robin by starting at a random VCPU. */ for (pass = 0; pass < 2 && !yielded; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { - if (!pass && i < last_boosted_vcpu) { - i = last_boosted_vcpu; + if (!pass && i < vcpu_to_boost) { + i = vcpu_to_boost; continue; - } else if (pass && i > last_boosted_vcpu) + } else if (pass && i > vcpu_to_boost) break; if (vcpu == me) continue; if (waitqueue_active(&vcpu->wq)) continue; if (kvm_vcpu_yield_to(vcpu)) { - kvm->last_boosted_vcpu = i; yielded = 1; break; }