[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1504007201-12904-3-git-send-email-yang.zhang.wz@gmail.com>
Date: Tue, 29 Aug 2017 11:46:36 +0000
From: Yang Zhang <yang.zhang.wz@...il.com>
To: linux-kernel@...r.kernel.org
Cc: kvm@...r.kernel.org, wanpeng.li@...mail.com, mst@...hat.com,
pbonzini@...hat.com, tglx@...utronix.de, rkrcmar@...hat.com,
dmatlack@...gle.com, agraf@...e.de, peterz@...radead.org,
linux-doc@...r.kernel.org, Yang Zhang <yang.zhang.wz@...il.com>,
Quan Xu <quan.xu0@...il.com>, Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org
Subject: [RFC PATCH v2 2/7] KVM guest: register kvm_idle_poll for pv_idle_ops
Although smart idle poll has nothing to do with paravirt, it can not
bring any benifit to native. So we only enable it when Linux runs as a
KVM guest(it can extend to other hypervisor like Xen, HyperV and
VMware).
Introduce per-CPU variable poll_duration_ns to control the max poll
time.
Signed-off-by: Yang Zhang <yang.zhang.wz@...il.com>
Signed-off-by: Quan Xu <quan.xu0@...il.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: x86@...nel.org
Cc: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
---
arch/x86/kernel/kvm.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d04e30e..7d84a02 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -75,6 +75,7 @@ static int parse_no_kvmclock_vsyscall(char *arg)
early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+static DEFINE_PER_CPU(unsigned long, poll_duration_ns);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
@@ -357,6 +358,29 @@ static void kvm_guest_cpu_init(void)
kvm_register_steal_time();
}
+static void kvm_idle_poll(void)
+{
+ unsigned long poll_duration = this_cpu_read(poll_duration_ns);
+ ktime_t start, cur, stop;
+
+ start = cur = ktime_get();
+ stop = ktime_add_ns(ktime_get(), poll_duration);
+
+ do {
+ if (need_resched())
+ break;
+ cur = ktime_get();
+ } while (ktime_before(cur, stop));
+}
+
+static void kvm_guest_idle_init(void)
+{
+ if (!kvm_para_available())
+ return;
+
+ pv_idle_ops.poll = kvm_idle_poll;
+}
+
static void kvm_pv_disable_apf(void)
{
if (!__this_cpu_read(apf_reason.enabled))
@@ -492,6 +516,8 @@ void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
+ kvm_guest_idle_init();
+
/*
* Hard lockup detection is enabled by default. Disable it, as guests
* can get false positives too easily, for example if the host is
--
1.8.3.1
Powered by blists - more mailing lists