[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250227014858.3244505-7-seanjc@google.com>
Date: Wed, 26 Feb 2025 17:48:57 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
Zheyun Shen <szy0127@...u.edu.cn>, Tom Lendacky <thomas.lendacky@....com>,
Kevin Loughlin <kevinloughlin@...gle.com>, Mingwei Zhang <mizhang@...gle.com>
Subject: [PATCH 6/7] x86, lib: Add wbinvd and wbnoinvd helpers to target
multiple CPUs
From: Zheyun Shen <szy0127@...u.edu.cn>
Extract KVM's open-coded calls to do writeback caches on multiple CPUs to
common library helpers for both WBINVD and WBNOINVD (KVM will use both).
Put the onus on the caller to check for a non-empty mask to simplify the
SMP=n implementation, e.g. so that it doesn't need to check that the one
and only CPU in the system is present in the mask.
Signed-off-by: Zheyun Shen <szy0127@...u.edu.cn>
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
Link: https://lore.kernel.org/r/20250128015345.7929-2-szy0127@sjtu.edu.cn
[sean: move to lib, add SMP=n helpers, clarify usage]
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/include/asm/smp.h | 12 ++++++++++++
arch/x86/kvm/x86.c | 8 +-------
arch/x86/lib/cache-smp.c | 12 ++++++++++++
3 files changed, 25 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index d4c50128aa6c..df828b36e33f 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -112,7 +112,9 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
void wbinvd_on_all_cpus(void);
+void wbinvd_on_many_cpus(struct cpumask *cpus);
void wbnoinvd_on_all_cpus(void);
+void wbnoinvd_on_many_cpus(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
@@ -160,11 +162,21 @@ static inline void wbinvd_on_all_cpus(void)
wbinvd();
}
+static inline void wbinvd_on_many_cpus(struct cpumask *cpus)
+{
+ wbinvd();
+}
+
static inline void wbnoinvd_on_all_cpus(void)
{
wbnoinvd();
}
+static inline wbnoinvd_on_many_cpus(struct cpumask *cpus)
+{
+ wbnoinvd();
+}
+
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eab1e64a19a2..8146c3e7eb40 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4957,11 +4957,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
return r;
}
-static void wbinvd_ipi(void *garbage)
-{
- wbinvd();
-}
-
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
@@ -8236,8 +8231,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
- on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
- wbinvd_ipi, NULL, 1);
+ wbinvd_on_many_cpus(vcpu->arch.wbinvd_dirty_mask);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} else
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index 1789db5d8825..ebbc91b3ac67 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -20,6 +20,12 @@ void wbinvd_on_all_cpus(void)
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
+void wbinvd_on_many_cpus(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbinvd_on_many_cpus);
+
static void __wbnoinvd(void *dummy)
{
wbnoinvd();
@@ -30,3 +36,9 @@ void wbnoinvd_on_all_cpus(void)
on_each_cpu(__wbnoinvd, NULL, 1);
}
EXPORT_SYMBOL(wbnoinvd_on_all_cpus);
+
+void wbnoinvd_on_many_cpus(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbnoinvd_on_many_cpus);
--
2.48.1.711.g2feabab25a-goog
Powered by blists - more mailing lists