[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110163634.3686676-14-wangjinchao600@gmail.com>
Date: Tue, 11 Nov 2025 00:36:08 +0800
From: Jinchao Wang <wangjinchao600@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
"Masami Hiramatsu (Google)" <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Randy Dunlap <rdunlap@...radead.org>,
Marco Elver <elver@...gle.com>,
Mike Rapoport <rppt@...nel.org>,
Alexander Potapenko <glider@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Alice Ryhl <aliceryhl@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Andrii Nakryiko <andrii@...nel.org>,
Ard Biesheuvel <ardb@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Ben Segall <bsegall@...gle.com>,
Bill Wendling <morbo@...gle.com>,
Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
David Kaplan <david.kaplan@....com>,
"David S. Miller" <davem@...emloft.net>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Dmitry Vyukov <dvyukov@...gle.com>,
"H. Peter Anvin" <hpa@...or.com>,
Ian Rogers <irogers@...gle.com>,
Ingo Molnar <mingo@...hat.com>,
James Clark <james.clark@...aro.org>,
Jinchao Wang <wangjinchao600@...il.com>,
Jinjie Ruan <ruanjinjie@...wei.com>,
Jiri Olsa <jolsa@...nel.org>,
Jonathan Corbet <corbet@....net>,
Juri Lelli <juri.lelli@...hat.com>,
Justin Stitt <justinstitt@...gle.com>,
kasan-dev@...glegroups.com,
Kees Cook <kees@...nel.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
"Liang Kan" <kan.liang@...ux.intel.com>,
Linus Walleij <linus.walleij@...aro.org>,
linux-arm-kernel@...ts.infradead.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-perf-users@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
llvm@...ts.linux.dev,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Mark Rutland <mark.rutland@....com>,
Masahiro Yamada <masahiroy@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Mel Gorman <mgorman@...e.de>,
Michal Hocko <mhocko@...e.com>,
Miguel Ojeda <ojeda@...nel.org>,
Nam Cao <namcao@...utronix.de>,
Namhyung Kim <namhyung@...nel.org>,
Nathan Chancellor <nathan@...nel.org>,
Naveen N Rao <naveen@...nel.org>,
Nick Desaulniers <nick.desaulniers+lkml@...il.com>,
Rong Xu <xur@...gle.com>,
Sami Tolvanen <samitolvanen@...gle.com>,
Steven Rostedt <rostedt@...dmis.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Thomas Weißschuh <thomas.weissschuh@...utronix.de>,
Valentin Schneider <vschneid@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
workflows@...r.kernel.org,
x86@...nel.org
Subject: [PATCH v8 13/27] mm/ksw: add per-task ctx tracking
Each task tracks its depth, stack pointer, and generation. A watchpoint is
enabled only when the configured depth is reached, and disabled on function
exit.
The context is reset when probes are disabled, generation changes, or exit
depth becomes inconsistent.
Duplicate arming on the same frame is skipped.
Signed-off-by: Jinchao Wang <wangjinchao600@...il.com>
---
mm/kstackwatch/stack.c | 67 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 67 insertions(+)
diff --git a/mm/kstackwatch/stack.c b/mm/kstackwatch/stack.c
index 3aa02f8370af..96014eb4cb12 100644
--- a/mm/kstackwatch/stack.c
+++ b/mm/kstackwatch/stack.c
@@ -11,6 +11,53 @@
static struct kprobe entry_probe;
static struct fprobe exit_probe;
+static bool probe_enable;
+static u16 probe_generation;
+
+static void ksw_reset_ctx(void)
+{
+ struct ksw_ctx *ctx = ¤t->ksw_ctx;
+
+ if (ctx->wp)
+ ksw_watch_off(ctx->wp);
+
+ ctx->wp = NULL;
+ ctx->sp = 0;
+ ctx->depth = 0;
+ ctx->generation = READ_ONCE(probe_generation);
+}
+
+static bool ksw_stack_check_ctx(bool entry)
+{
+ struct ksw_ctx *ctx = ¤t->ksw_ctx;
+ u16 cur_enable = READ_ONCE(probe_enable);
+ u16 cur_generation = READ_ONCE(probe_generation);
+ u16 cur_depth, target_depth = ksw_get_config()->depth;
+
+ if (!cur_enable) {
+ ksw_reset_ctx();
+ return false;
+ }
+
+ if (ctx->generation != cur_generation)
+ ksw_reset_ctx();
+
+ if (!entry && !ctx->depth) {
+ ksw_reset_ctx();
+ return false;
+ }
+
+ if (entry)
+ cur_depth = ctx->depth++;
+ else
+ cur_depth = --ctx->depth;
+
+ if (cur_depth == target_depth)
+ return true;
+ else
+ return false;
+}
+
static int ksw_stack_prepare_watch(struct pt_regs *regs,
const struct ksw_config *config,
ulong *watch_addr, u16 *watch_len)
@@ -25,10 +72,22 @@ static void ksw_stack_entry_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
+ ulong stack_pointer;
ulong watch_addr;
u16 watch_len;
int ret;
+ stack_pointer = kernel_stack_pointer(regs);
+
+ /*
+ * triggered more than once, may be in a loop
+ */
+ if (ctx->wp && ctx->sp == stack_pointer)
+ return;
+
+ if (!ksw_stack_check_ctx(true))
+ return;
+
ret = ksw_watch_get(&ctx->wp);
if (ret)
return;
@@ -49,6 +108,7 @@ static void ksw_stack_entry_handler(struct kprobe *p, struct pt_regs *regs,
return;
}
+ ctx->sp = stack_pointer;
}
static void ksw_stack_exit_handler(struct fprobe *fp, unsigned long ip,
@@ -57,6 +117,8 @@ static void ksw_stack_exit_handler(struct fprobe *fp, unsigned long ip,
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
+ if (!ksw_stack_check_ctx(false))
+ return;
if (ctx->wp) {
ksw_watch_off(ctx->wp);
@@ -91,11 +153,16 @@ int ksw_stack_init(void)
return ret;
}
+ WRITE_ONCE(probe_generation, READ_ONCE(probe_generation) + 1);
+ WRITE_ONCE(probe_enable, true);
+
return 0;
}
void ksw_stack_exit(void)
{
+ WRITE_ONCE(probe_enable, false);
+ WRITE_ONCE(probe_generation, READ_ONCE(probe_generation) + 1);
unregister_fprobe(&exit_probe);
unregister_kprobe(&entry_probe);
}
--
2.43.0
Powered by blists - more mailing lists