[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <175215886584.406.10370603448880022710.tip-bot2@tip-bot2>
Date: Thu, 10 Jul 2025 14:47:45 -0000
From: "tip-bot2 for Zheyun Shen" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Zheyun Shen <szy0127@...u.edu.cn>, Sean Christopherson <seanjc@...gle.com>,
"Borislav Petkov (AMD)" <bp@...en8.de>,
Tom Lendacky <thomas.lendacky@....com>, Kai Huang <kai.huang@...el.com>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: x86/core] x86/lib: Add WBINVD and WBNOINVD helpers to target
multiple CPUs
The following commit has been merged into the x86/core branch of tip:
Commit-ID: 4fdc3431e03b9c11803f399f91837fca487029a1
Gitweb: https://git.kernel.org/tip/4fdc3431e03b9c11803f399f91837fca487029a1
Author: Zheyun Shen <szy0127@...u.edu.cn>
AuthorDate: Thu, 22 May 2025 16:37:28 -07:00
Committer: Borislav Petkov (AMD) <bp@...en8.de>
CommitterDate: Thu, 10 Jul 2025 13:30:17 +02:00
x86/lib: Add WBINVD and WBNOINVD helpers to target multiple CPUs
Extract KVM's open-coded calls to do writeback caches on multiple CPUs to
common library helpers for both WBINVD and WBNOINVD (KVM will use both).
Put the onus on the caller to check for a non-empty mask to simplify the
SMP=n implementation, e.g. so that it doesn't need to check that the one
and only CPU in the system is present in the mask.
[sean: move to lib, add SMP=n helpers, clarify usage]
Signed-off-by: Zheyun Shen <szy0127@...u.edu.cn>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Borislav Petkov (AMD) <bp@...en8.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@....com>
Acked-by: Kai Huang <kai.huang@...el.com>
Link: https://lore.kernel.org/r/20250128015345.7929-2-szy0127@sjtu.edu.cn
Link: https://lore.kernel.org/20250522233733.3176144-5-seanjc@google.com
---
arch/x86/include/asm/smp.h | 12 ++++++++++++
arch/x86/kvm/x86.c | 3 +--
arch/x86/lib/cache-smp.c | 12 ++++++++++++
3 files changed, 25 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index e08f1ae..22bfebe 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -113,7 +113,9 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
void wbinvd_on_all_cpus(void);
+void wbinvd_on_cpus_mask(struct cpumask *cpus);
void wbnoinvd_on_all_cpus(void);
+void wbnoinvd_on_cpus_mask(struct cpumask *cpus);
void smp_kick_mwait_play_dead(void);
void __noreturn mwait_play_dead(unsigned int eax_hint);
@@ -154,11 +156,21 @@ static inline void wbinvd_on_all_cpus(void)
wbinvd();
}
+static inline void wbinvd_on_cpus_mask(struct cpumask *cpus)
+{
+ wbinvd();
+}
+
static inline void wbnoinvd_on_all_cpus(void)
{
wbnoinvd();
}
+static inline void wbnoinvd_on_cpus_mask(struct cpumask *cpus)
+{
+ wbnoinvd();
+}
+
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a9d992d..5a2160f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8289,8 +8289,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
- on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
- wbinvd_ipi, NULL, 1);
+ wbinvd_on_cpus_mask(vcpu->arch.wbinvd_dirty_mask);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} else
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index 74e0d5b..c5c60d0 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -20,6 +20,12 @@ void wbinvd_on_all_cpus(void)
}
EXPORT_SYMBOL(wbinvd_on_all_cpus);
+void wbinvd_on_cpus_mask(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbinvd_on_cpus_mask);
+
static void __wbnoinvd(void *dummy)
{
wbnoinvd();
@@ -30,3 +36,9 @@ void wbnoinvd_on_all_cpus(void)
on_each_cpu(__wbnoinvd, NULL, 1);
}
EXPORT_SYMBOL_GPL(wbnoinvd_on_all_cpus);
+
+void wbnoinvd_on_cpus_mask(struct cpumask *cpus)
+{
+ on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(wbnoinvd_on_cpus_mask);
Powered by blists - more mailing lists