[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aQJLpSYz3jdazzdb@localhost.localdomain>
Date: Wed, 29 Oct 2025 18:15:17 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: Valentin Schneider <vschneid@...hat.com>
Cc: Phil Auld <pauld@...hat.com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, rcu@...r.kernel.org, x86@...nel.org,
linux-arm-kernel@...ts.infradead.org, loongarch@...ts.linux.dev,
linux-riscv@...ts.infradead.org, linux-arch@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>, Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Paolo Bonzini <pbonzini@...hat.com>, Arnd Bergmann <arnd@...db.de>,
"Paul E. McKenney" <paulmck@...nel.org>,
Jason Baron <jbaron@...mai.com>,
Steven Rostedt <rostedt@...dmis.org>,
Ard Biesheuvel <ardb@...nel.org>,
Sami Tolvanen <samitolvanen@...gle.com>,
"David S. Miller" <davem@...emloft.net>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joelagnelf@...dia.com>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Uladzislau Rezki <urezki@...il.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Mel Gorman <mgorman@...e.de>,
Andrew Morton <akpm@...ux-foundation.org>,
Masahiro Yamada <masahiroy@...nel.org>,
Han Shen <shenhan@...gle.com>, Rik van Riel <riel@...riel.com>,
Jann Horn <jannh@...gle.com>,
Dan Carpenter <dan.carpenter@...aro.org>,
Oleg Nesterov <oleg@...hat.com>, Juri Lelli <juri.lelli@...hat.com>,
Clark Williams <williams@...hat.com>,
Yair Podemsky <ypodemsk@...hat.com>,
Marcelo Tosatti <mtosatti@...hat.com>,
Daniel Wagner <dwagner@...e.de>, Petr Tesarik <ptesarik@...e.com>
Subject: Re: [PATCH v6 00/29] context_tracking,x86: Defer some IPIs until a
user->kernel transition
Le Wed, Oct 29, 2025 at 11:32:58AM +0100, Valentin Schneider a écrit :
> I need to have a think about that one; one pain point I see is the context
> tracking work has to be NMI safe since e.g. an NMI can take us out of
> userspace. Another is that NOHZ-full CPUs need to be special cased in the
> stop machine queueing / completion.
>
> /me goes fetch a new notebook
Something like the below (untested) ?
diff --git a/arch/x86/include/asm/context_tracking_work.h b/arch/x86/include/asm/context_tracking_work.h
index 485b32881fde..2940e28ecea6 100644
--- a/arch/x86/include/asm/context_tracking_work.h
+++ b/arch/x86/include/asm/context_tracking_work.h
@@ -3,6 +3,7 @@
#define _ASM_X86_CONTEXT_TRACKING_WORK_H
#include <asm/sync_core.h>
+#include <linux/stop_machine.h>
static __always_inline void arch_context_tracking_work(enum ct_work work)
{
@@ -10,6 +11,9 @@ static __always_inline void arch_context_tracking_work(enum ct_work work)
case CT_WORK_SYNC:
sync_core();
break;
+ case CT_WORK_STOP_MACHINE:
+ stop_machine_poll_wait();
+ break;
case CT_WORK_MAX:
WARN_ON_ONCE(true);
}
diff --git a/include/linux/context_tracking_work.h b/include/linux/context_tracking_work.h
index 2facc621be06..b63200bd73d6 100644
--- a/include/linux/context_tracking_work.h
+++ b/include/linux/context_tracking_work.h
@@ -6,12 +6,14 @@
enum {
CT_WORK_SYNC_OFFSET,
+ CT_WORK_STOP_MACHINE_OFFSET,
CT_WORK_MAX_OFFSET
};
enum ct_work {
- CT_WORK_SYNC = BIT(CT_WORK_SYNC_OFFSET),
- CT_WORK_MAX = BIT(CT_WORK_MAX_OFFSET)
+ CT_WORK_SYNC = BIT(CT_WORK_SYNC_OFFSET),
+ CT_WORK_STOP_MACHINE = BIT(CT_WORK_STOP_MACHINE_OFFSET),
+ CT_WORK_MAX = BIT(CT_WORK_MAX_OFFSET)
};
#include <asm/context_tracking_work.h>
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 72820503514c..0efe88e84b8a 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -36,6 +36,7 @@ bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
+void stop_machine_poll_wait(void);
extern void print_stop_info(const char *log_lvl, struct task_struct *task);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 3fe6b0c99f3d..8f0281b0db64 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/nmi.h>
#include <linux/sched/wake_q.h>
+#include <linux/sched/isolation.h>
/*
* Structure to determine completion condition and record errors. May
@@ -176,6 +177,68 @@ struct multi_stop_data {
atomic_t thread_ack;
};
+static DEFINE_PER_CPU(int, stop_machine_poll);
+
+void stop_machine_poll_wait(void)
+{
+ int *poll = this_cpu_ptr(&stop_machine_poll);
+
+ while (*poll)
+ cpu_relax();
+ /* Enforce the work in stop machine to be visible */
+ smp_mb();
+}
+
+static void stop_machine_poll_start(struct multi_stop_data *msdata)
+{
+ int cpu;
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return;
+
+ /* Random target can't be known in advance */
+ if (!msdata->active_cpus)
+ return;
+
+ for_each_cpu_andnot(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)) {
+ int *poll = per_cpu_ptr(&stop_machine_poll, cpu);
+
+ if (cpumask_test_cpu(cpu, msdata->active_cpus))
+ continue;
+
+ *poll = 1;
+
+ /*
+ * Act as a full barrier so that if the work is queued, polling is
+ * visible.
+ */
+ if (ct_set_cpu_work(cpu, CT_WORK_STOP_MACHINE))
+ msdata->num_threads--;
+ else
+ *poll = 0;
+ }
+}
+
+static void stop_machine_poll_complete(struct multi_stop_data *msdata)
+{
+ int cpu;
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return;
+
+ for_each_cpu_andnot(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)) {
+ int *poll = per_cpu_ptr(&stop_machine_poll, cpu);
+
+ if (cpumask_test_cpu(cpu, msdata->active_cpus))
+ continue;
+ /*
+ * The RmW in ack_state() fully orders the work performed in stop_machine()
+ * with polling.
+ */
+ *poll = 0;
+ }
+}
+
static void set_state(struct multi_stop_data *msdata,
enum multi_stop_state newstate)
{
@@ -186,10 +249,13 @@ static void set_state(struct multi_stop_data *msdata,
}
/* Last one to ack a state moves to the next state. */
-static void ack_state(struct multi_stop_data *msdata)
+static bool ack_state(struct multi_stop_data *msdata)
{
- if (atomic_dec_and_test(&msdata->thread_ack))
+ if (atomic_dec_and_test(&msdata->thread_ack)) {
set_state(msdata, msdata->state + 1);
+ return true;
+ }
+ return false;
}
notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
@@ -240,7 +306,8 @@ static int multi_cpu_stop(void *data)
default:
break;
}
- ack_state(msdata);
+ if (ack_state(msdata) && msdata->state == MULTI_STOP_EXIT)
+ stop_machine_poll_complete(msdata);
} else if (curstate > MULTI_STOP_PREPARE) {
/*
* At this stage all other CPUs we depend on must spin
@@ -615,6 +682,8 @@ int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
return ret;
}
+ stop_machine_poll_start(&msdata);
+
/* Set the initial state and stop all online cpus. */
set_state(&msdata, MULTI_STOP_PREPARE);
return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
Powered by blists - more mailing lists