[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250424192612.669992559@goodmis.org>
Date: Thu, 24 Apr 2025 15:24:58 -0400
From: Steven Rostedt <rostedt@...dmis.org>
To: linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org
Cc: Masami Hiramatsu <mhiramat@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Josh Poimboeuf <jpoimboe@...nel.org>,
x86@...nel.org,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Indu Bhagat <indu.bhagat@...cle.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
linux-perf-users@...r.kernel.org,
Mark Brown <broonie@...nel.org>,
linux-toolchains@...r.kernel.org,
Jordan Rome <jordalgo@...a.com>,
Sam James <sam@...too.org>,
Andrii Nakryiko <andrii.nakryiko@...il.com>,
Jens Remus <jremus@...ux.ibm.com>,
Florian Weimer <fweimer@...hat.com>,
Andy Lutomirski <luto@...nel.org>,
Weinan Liu <wnliu@...gle.com>,
Blake Jones <blakejones@...gle.com>,
Beau Belgrave <beaub@...ux.microsoft.com>,
"Jose E. Marchesi" <jemarch@....org>,
Alexander Aring <aahringo@...hat.com>
Subject: [PATCH v5 2/9] unwind_user/deferred: Make unwind deferral requests NMI-safe
From: Josh Poimboeuf <jpoimboe@...nel.org>
Make unwind_deferred_request() NMI-safe so tracers in NMI context can
call it to get the cookie immediately rather than have to do the fragile
"schedule irq work and then call unwind_deferred_request()" dance.
Signed-off-by: Josh Poimboeuf <jpoimboe@...nel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@...dmis.org>
---
include/linux/unwind_deferred_types.h | 1 +
kernel/unwind/deferred.c | 100 ++++++++++++++++++++++----
2 files changed, 89 insertions(+), 12 deletions(-)
diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h
index 33373c32c221..8f47d77ddda0 100644
--- a/include/linux/unwind_deferred_types.h
+++ b/include/linux/unwind_deferred_types.h
@@ -10,6 +10,7 @@ struct unwind_cache {
struct unwind_task_info {
struct unwind_cache cache;
u64 cookie;
+ u64 nmi_cookie;
struct callback_head work;
int pending;
};
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index dc438c5f6618..2afd197da2ef 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -47,23 +47,47 @@ static u64 ctx_to_cookie(u64 cpu, u64 ctx)
/*
* Read the task context cookie, first initializing it if this is the first
- * call to get_cookie() since the most recent entry from user.
+ * call to get_cookie() since the most recent entry from user. This has to be
+ * done carefully to coordinate with unwind_deferred_request_nmi().
*/
static u64 get_cookie(struct unwind_task_info *info)
{
u64 ctx_ctr;
u64 cookie;
- u64 cpu;
guard(irqsave)();
- cookie = info->cookie;
+ cookie = READ_ONCE(info->cookie);
if (cookie)
return cookie;
- cpu = raw_smp_processor_id();
- ctx_ctr = __this_cpu_inc_return(unwind_ctx_ctr);
- info->cookie = ctx_to_cookie(cpu, ctx_ctr);
+ ctx_ctr = __this_cpu_read(unwind_ctx_ctr);
+
+ /* Read ctx_ctr before info->nmi_cookie */
+ barrier();
+
+ cookie = READ_ONCE(info->nmi_cookie);
+ if (cookie) {
+ /*
+ * This is the first call to get_cookie() since an NMI handler
+ * first wrote it to info->nmi_cookie. Sync it.
+ */
+ WRITE_ONCE(info->cookie, cookie);
+ WRITE_ONCE(info->nmi_cookie, 0);
+ return cookie;
+ }
+
+ /*
+ * Write info->cookie. It's ok to race with an NMI here. The value of
+ * the cookie is based on ctx_ctr from before the NMI could have
+ * incremented it. The result will be the same even if cookie or
+ * ctx_ctr end up getting written twice.
+ */
+ cookie = ctx_to_cookie(raw_smp_processor_id(), ctx_ctr + 1);
+ WRITE_ONCE(info->cookie, cookie);
+ WRITE_ONCE(info->nmi_cookie, 0);
+ barrier();
+ __this_cpu_write(unwind_ctx_ctr, ctx_ctr + 1);
return info->cookie;
}
@@ -140,6 +164,51 @@ static void unwind_deferred_task_work(struct callback_head *head)
WRITE_ONCE(info->cookie, 0);
}
+static int unwind_deferred_request_nmi(struct unwind_work *work, u64 *cookie)
+{
+ struct unwind_task_info *info = ¤t->unwind_info;
+ bool inited_cookie = false;
+ int ret;
+
+ *cookie = info->cookie;
+ if (!*cookie) {
+ /*
+ * This is the first unwind request since the most recent entry
+ * from user. Initialize the task cookie.
+ *
+ * Don't write to info->cookie directly, otherwise it may get
+ * cleared if the NMI occurred in the kernel during early entry
+ * or late exit before the task work gets to run. Instead, use
+ * info->nmi_cookie which gets synced later by get_cookie().
+ */
+ if (!info->nmi_cookie) {
+ u64 cpu = raw_smp_processor_id();
+ u64 ctx_ctr;
+
+ ctx_ctr = __this_cpu_inc_return(unwind_ctx_ctr);
+ info->nmi_cookie = ctx_to_cookie(cpu, ctx_ctr);
+
+ inited_cookie = true;
+ }
+
+ *cookie = info->nmi_cookie;
+ }
+
+ if (info->pending)
+ return 0;
+
+ ret = task_work_add(current, &info->work, TWA_NMI_CURRENT);
+ if (ret) {
+ if (inited_cookie)
+ info->nmi_cookie = 0;
+ return ret;
+ }
+
+ info->pending = 1;
+
+ return 0;
+}
+
/*
* Schedule a user space unwind to be done in task work before exiting the
* kernel.
@@ -160,30 +229,37 @@ static void unwind_deferred_task_work(struct callback_head *head)
int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
{
struct unwind_task_info *info = ¤t->unwind_info;
+ int pending;
int ret;
*cookie = 0;
- if (WARN_ON_ONCE(in_nmi()))
- return -EINVAL;
-
if ((current->flags & PF_KTHREAD) || !user_mode(task_pt_regs(current)))
return -EINVAL;
+ if (in_nmi())
+ return unwind_deferred_request_nmi(work, cookie);
+
guard(irqsave)();
*cookie = get_cookie(info);
/* callback already pending? */
- if (info->pending)
+ pending = READ_ONCE(info->pending);
+ if (pending)
+ return 0;
+
+ /* Claim the work unless an NMI just now swooped in to do so. */
+ if (!try_cmpxchg(&info->pending, &pending, 1))
return 0;
/* The work has been claimed, now schedule it. */
ret = task_work_add(current, &info->work, TWA_RESUME);
- if (WARN_ON_ONCE(ret))
+ if (WARN_ON_ONCE(ret)) {
+ WRITE_ONCE(info->pending, 0);
return ret;
+ }
- info->pending = 1;
return 0;
}
--
2.47.2
Powered by blists - more mailing lists