lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1502095466-21312-3-git-send-email-longpeng2@huawei.com>
Date:   Mon, 7 Aug 2017 16:44:25 +0800
From:   "Longpeng(Mike)" <longpeng2@...wei.com>
To:     <pbonzini@...hat.com>, <rkrcmar@...hat.com>
CC:     <agraf@...e.com>, <borntraeger@...ibm.com>, <cohuck@...hat.com>,
        <christoffer.dall@...aro.org>, <marc.zyngier@....com>,
        <james.hogan@...tec.com>, <kvm@...r.kernel.org>,
        <linux-kernel@...r.kernel.org>, <weidong.huang@...wei.com>,
        <arei.gonglei@...wei.com>, <wangxinxin.wang@...wei.com>,
        <longpeng.mike@...il.com>, <david@...hat.com>,
        "Longpeng(Mike)" <longpeng2@...wei.com>
Subject: [PATCH 2/3] KVM: X86: implement the logic for spinlock optimization

Implements the kvm_arch_vcpu_spin/preempt_in_kernel(), because get_cpl
requires vcpu_load, so we must cache the result(whether the vcpu was
preempted when its cpl=0) in kvm_arch_vcpu.

Signed-off-by: Longpeng(Mike) <longpeng2@...wei.com>
---
 arch/x86/include/asm/kvm_host.h |  5 +++++
 arch/x86/kvm/svm.c              |  6 ++++++
 arch/x86/kvm/vmx.c              | 20 ++++++++++++++++++++
 arch/x86/kvm/x86.c              |  9 +++++++--
 4 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 87ac4fb..d2b2d57 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -688,6 +688,9 @@ struct kvm_vcpu_arch {
 
 	/* GPA available (AMD only) */
 	bool gpa_available;
+
+	/* be preempted when it's in kernel-mode(cpl=0) */
+	bool preempted_in_kernel;
 };
 
 struct kvm_lpage_info {
@@ -1057,6 +1060,8 @@ struct kvm_x86_ops {
 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
 
 	void (*setup_mce)(struct kvm_vcpu *vcpu);
+
+	bool (*spin_in_kernel)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4d8141e..552ab4c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5352,6 +5352,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
 	vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+static bool svm_spin_in_kernel(struct kvm_vcpu *vcpu)
+{
+	return svm_get_cpl(vcpu) == 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -5464,6 +5469,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
 	.deliver_posted_interrupt = svm_deliver_avic_intr,
 	.update_pi_irte = svm_update_pi_irte,
 	.setup_mce = svm_setup_mce,
+	.spin_in_kernel = svm_spin_in_kernel,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 39a6222..d0dfe2e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11547,6 +11547,25 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 			~FEATURE_CONTROL_LMCE;
 }
 
+static bool vmx_spin_in_kernel(struct kvm_vcpu *vcpu)
+{
+	u32 secondary_exec_ctrl = 0;
+
+	/*
+	 * Intel sdm vol3 ch-25.1.3 says: The “PAUSE-loop exiting”
+	 * VM-execution control is ignored if CPL > 0. So the vcpu
+	 * is always exiting with CPL=0 if it uses PLE.
+	 *
+	 * The following block needs less cycles than vmx_get_cpl().
+	 */
+	if (cpu_has_secondary_exec_ctrls())
+		secondary_exec_ctrl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+	if (secondary_exec_ctrl & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
+		return true;
+
+	return vmx_get_cpl(vcpu) == 0;
+}
+
 static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = cpu_has_kvm_support,
 	.disabled_by_bios = vmx_disabled_by_bios,
@@ -11674,6 +11693,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
 #endif
 
 	.setup_mce = vmx_setup_mce,
+	.spin_in_kernel = vmx_spin_in_kernel,
 };
 
 static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 04c6a1f..fa79a60 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2881,6 +2881,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	int idx;
+
+	if (vcpu->preempted)
+		vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
+
 	/*
 	 * Disable page faults because we're in atomic context here.
 	 * kvm_write_guest_offset_cached() would call might_fault()
@@ -7988,6 +7992,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 	kvm_pmu_init(vcpu);
 
 	vcpu->arch.pending_external_vector = -1;
+	vcpu->arch.preempted_in_kernel = false;
 
 	kvm_hv_vcpu_init(vcpu);
 
@@ -8437,12 +8442,12 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 
 bool kvm_arch_vcpu_spin_in_kernel(struct kvm_vcpu *vcpu)
 {
-	return false;
+	return kvm_x86_ops->spin_in_kernel(vcpu);
 }
 
 bool kvm_arch_vcpu_preempt_in_kernel(struct kvm_vcpu *vcpu)
 {
-	return false;
+	return vcpu->arch.preempted_in_kernel;
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-- 
1.8.3.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ