lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date: Thu, 7 Mar 2024 10:14:26 +0800 (CST)
From: Zheyun Shen <szy0127@...u.edu.cn>
To: Sean Christopherson <seanjc@...gle.com>, 
	thomas lendacky <thomas.lendacky@....com>, 
	pbonzini <pbonzini@...hat.com>, tglx <tglx@...utronix.de>
Cc: kvm <kvm@...r.kernel.org>, linux-kernel <linux-kernel@...r.kernel.org>
Subject: [PATCH v2] KVM:SVM: Flush cache only on CPUs running SEV guest

On AMD CPUs without ensuring cache consistency, each memory page 
reclamation in an SEV guest triggers a call to wbinvd_on_all_cpus(), 
thereby affecting the performance of other programs on the host.

Typically, an AMD server may have 128 cores or more, while the SEV guest 
might only utilize 8 of these cores. Meanwhile, host can use qemu-affinity 
to bind these 8 vCPUs to specific physical CPUs.

Therefore, keeping a record of the physical core numbers each time a vCPU 
runs can help avoid flushing the cache for all CPUs every time.

Since the usage of sev_flush_asids() isn't tied to a single VM, we just 
replace all wbinvd_on_all_cpus() with sev_do_wbinvd() except for that 
in sev_flush_asids().

Signed-off-by: Zheyun Shen <szy0127@...u.edu.cn>
---
 arch/x86/kvm/svm/sev.c | 27 +++++++++++++++++++++++----
 arch/x86/kvm/svm/svm.h |  3 +++
 2 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index f760106c3..743931e33 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -215,6 +215,24 @@ static void sev_asid_free(struct kvm_sev_info *sev)
 	sev->misc_cg = NULL;
 }
 
+static struct cpumask *sev_get_wbinvd_dirty_mask(struct kvm *kvm)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+	return &sev->wbinvd_dirty_mask;
+}
+
+static void sev_do_wbinvd(struct kvm *kvm)
+{
+	int cpu;
+	struct cpumask *dirty_mask = sev_get_wbinvd_dirty_mask(kvm);
+
+	for_each_possible_cpu(cpu) {
+		if (cpumask_test_and_clear_cpu(cpu, dirty_mask))
+			wbinvd_on_cpu(cpu);
+	}
+}
+
 static void sev_decommission(unsigned int handle)
 {
 	struct sev_data_decommission decommission;
@@ -2048,7 +2066,7 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
 	 * releasing the pages back to the system for use. CLFLUSH will
 	 * not do this, so issue a WBINVD.
 	 */
-	wbinvd_on_all_cpus();
+	sev_do_wbinvd(kvm);
 
 	__unregister_enc_region_locked(kvm, region);
 
@@ -2152,7 +2170,7 @@ void sev_vm_destroy(struct kvm *kvm)
 	 * releasing the pages back to the system for use. CLFLUSH will
 	 * not do this, so issue a WBINVD.
 	 */
-	wbinvd_on_all_cpus();
+	sev_do_wbinvd(kvm);
 
 	/*
 	 * if userspace was terminated before unregistering the memory regions
@@ -2343,7 +2361,7 @@ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
 	return;
 
 do_wbinvd:
-	wbinvd_on_all_cpus();
+	sev_do_wbinvd(vcpu->kvm);
 }
 
 void sev_guest_memory_reclaimed(struct kvm *kvm)
@@ -2351,7 +2369,7 @@ void sev_guest_memory_reclaimed(struct kvm *kvm)
 	if (!sev_guest(kvm))
 		return;
 
-	wbinvd_on_all_cpus();
+	sev_do_wbinvd(kvm);
 }
 
 void sev_free_vcpu(struct kvm_vcpu *vcpu)
@@ -2648,6 +2666,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
 	sd->sev_vmcbs[asid] = svm->vmcb;
 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
 	vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
+	cpumask_set_cpu(get_cpu(), sev_get_wbinvd_dirty_mask(svm->vcpu.kvm));
 }
 
 #define GHCB_SCRATCH_AREA_LIMIT		(16ULL * PAGE_SIZE)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8ef95139c..de240a9e9 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -90,6 +90,9 @@ struct kvm_sev_info {
 	struct list_head mirror_entry; /* Use as a list entry of mirrors */
 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
 	atomic_t migration_in_progress;
+
+	/* CPUs invoked VMRUN should do wbinvd after guest memory is reclaimed */
+	struct cpumask wbinvd_dirty_mask;
 };
 
 struct kvm_svm {
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ