[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230812195729.512632124@linutronix.de>
Date: Sat, 12 Aug 2023 21:59:29 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Borislav Petkov <bp@...en8.de>,
Ashok Raj <ashok.raj@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Nikolay Borisov <nik.borisov@...e.com>
Subject: [patch V2 34/37] x86/apic: Provide apic_force_nmi_on_cpu()
From: Thomas Gleixner <tglx@...utronix.de>
When SMT siblings are soft-offlined and parked in one of the play_dead()
variants they still react on NMI, which is problematic on affected Intel
CPUs. The default play_dead() variant uses MWAIT on modern CPUs, which is
not guaranteed to be safe when updated concurrently.
Right now late loading is prevented when not all SMT siblings are online,
but as they still react on NMI, it is possible to bring them out of their
park position into a trivial rendevouz handler.
Provide a function which allows to do that. I does sanity checks whether
the target is in the cpus_booted_once_mask and whether the APIC driver
supports it.
Mark X2APIC and XAPIC as capable, but exclude 32bit and the UV and NUMACHIP
variants as that needs feedback from the relevant experts.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/include/asm/apic.h | 5 +++++
arch/x86/kernel/apic/apic_flat_64.c | 2 ++
arch/x86/kernel/apic/ipi.c | 9 ++++++++-
arch/x86/kernel/apic/x2apic_cluster.c | 1 +
arch/x86/kernel/apic/x2apic_phys.c | 1 +
5 files changed, 17 insertions(+), 1 deletion(-)
---
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -301,6 +301,9 @@ struct apic {
enum apic_delivery_modes delivery_mode;
bool dest_mode_logical;
+ /* Allows to send an NMI to an "offline" CPU which hangs in *play_dead() */
+ bool nmi_to_offline_cpu;
+
u32 (*calc_dest_apicid)(unsigned int cpu);
/* ICR related functions */
@@ -505,6 +508,8 @@ extern void default_ioapic_phys_id_map(p
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
+void apic_send_nmi_to_offline_cpu(unsigned int cpu);
+
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_SMP
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -138,6 +138,7 @@ static struct apic apic_flat __ro_after_
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.inquire_remote_apic = default_inquire_remote_apic,
@@ -229,6 +230,7 @@ static struct apic apic_physflat __ro_af
.send_IPI_allbutself = default_send_IPI_allbutself,
.send_IPI_all = default_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.inquire_remote_apic = default_inquire_remote_apic,
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -95,8 +95,15 @@ void native_send_call_func_ipi(const str
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
}
+void apic_send_nmi_to_offline_cpu(unsigned int cpu)
+{
+ if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
+ return;
+ if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
+ return;
+ apic->send_IPI(cpu, NMI_VECTOR);
+}
#endif /* CONFIG_SMP */
-
static inline int __prepare_ICR2(unsigned int mask)
{
return SET_XAPIC_DEST_FIELD(mask);
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -264,6 +264,7 @@ static struct apic apic_x2apic_cluster _
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.inquire_remote_apic = NULL,
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -188,6 +188,7 @@ static struct apic apic_x2apic_phys __ro
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
+ .nmi_to_offline_cpu = true,
.inquire_remote_apic = NULL,
Powered by blists - more mailing lists