[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250924115124.194940-14-wangjinchao600@gmail.com>
Date: Wed, 24 Sep 2025 19:50:56 +0800
From: Jinchao Wang <wangjinchao600@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Mike Rapoport <rppt@...nel.org>,
Alexander Potapenko <glider@...gle.com>,
Randy Dunlap <rdunlap@...radead.org>,
Jonathan Corbet <corbet@....net>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
"Liang, Kan" <kan.liang@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <nick.desaulniers+lkml@...il.com>,
Bill Wendling <morbo@...gle.com>,
Justin Stitt <justinstitt@...gle.com>,
Kees Cook <kees@...nel.org>,
Alice Ryhl <aliceryhl@...gle.com>,
Sami Tolvanen <samitolvanen@...gle.com>,
Miguel Ojeda <ojeda@...nel.org>,
Masahiro Yamada <masahiroy@...nel.org>,
Rong Xu <xur@...gle.com>,
Naveen N Rao <naveen@...nel.org>,
David Kaplan <david.kaplan@....com>,
Andrii Nakryiko <andrii@...nel.org>,
Jinjie Ruan <ruanjinjie@...wei.com>,
Nam Cao <namcao@...utronix.de>,
workflows@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
linux-mm@...ck.org,
llvm@...ts.linux.dev,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Andrey Konovalov <andreyknvl@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
kasan-dev@...glegroups.com,
"David S. Miller" <davem@...emloft.net>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
linux-trace-kernel@...r.kernel.org
Cc: Jinchao Wang <wangjinchao600@...il.com>
Subject: [PATCH v5 13/23] mm/ksw: add per-task ctx tracking
Each task tracks its depth, stack pointer, and generation. A watchpoint is
enabled only when the configured depth is reached, and disabled on function
exit.
The context is reset when probes are disabled, generation changes, or exit
depth becomes inconsistent.
Duplicate arming on the same frame is skipped.
Signed-off-by: Jinchao Wang <wangjinchao600@...il.com>
---
mm/kstackwatch/stack.c | 67 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 67 insertions(+)
diff --git a/mm/kstackwatch/stack.c b/mm/kstackwatch/stack.c
index 9f59f41d954c..e596ef97222d 100644
--- a/mm/kstackwatch/stack.c
+++ b/mm/kstackwatch/stack.c
@@ -12,6 +12,53 @@
static struct kprobe entry_probe;
static struct fprobe exit_probe;
+static bool probe_enable;
+static u16 probe_generation;
+
+static void ksw_reset_ctx(void)
+{
+ struct ksw_ctx *ctx = ¤t->ksw_ctx;
+
+ if (ctx->wp)
+ ksw_watch_off(ctx->wp);
+
+ ctx->wp = NULL;
+ ctx->sp = 0;
+ ctx->depth = 0;
+ ctx->generation = READ_ONCE(probe_generation);
+}
+
+static bool ksw_stack_check_ctx(bool entry)
+{
+ struct ksw_ctx *ctx = ¤t->ksw_ctx;
+ u16 cur_enable = READ_ONCE(probe_enable);
+ u16 cur_generation = READ_ONCE(probe_generation);
+ u16 cur_depth, target_depth = ksw_get_config()->depth;
+
+ if (!cur_enable) {
+ ksw_reset_ctx();
+ return false;
+ }
+
+ if (ctx->generation != cur_generation)
+ ksw_reset_ctx();
+
+ if (!entry && !ctx->depth) {
+ ksw_reset_ctx();
+ return false;
+ }
+
+ if (entry)
+ cur_depth = ctx->depth++;
+ else
+ cur_depth = --ctx->depth;
+
+ if (cur_depth == target_depth)
+ return true;
+ else
+ return false;
+}
+
static int ksw_stack_prepare_watch(struct pt_regs *regs,
const struct ksw_config *config,
ulong *watch_addr, u16 *watch_len)
@@ -26,10 +73,22 @@ static void ksw_stack_entry_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
+ ulong stack_pointer;
ulong watch_addr;
u16 watch_len;
int ret;
+ stack_pointer = kernel_stack_pointer(regs);
+
+ /*
+ * triggered more than once, may be in a loop
+ */
+ if (ctx->wp && ctx->sp == stack_pointer)
+ return;
+
+ if (!ksw_stack_check_ctx(true))
+ return;
+
ret = ksw_watch_get(&ctx->wp);
if (ret)
return;
@@ -50,6 +109,7 @@ static void ksw_stack_entry_handler(struct kprobe *p, struct pt_regs *regs,
return;
}
+ ctx->sp = stack_pointer;
}
static void ksw_stack_exit_handler(struct fprobe *fp, unsigned long ip,
@@ -58,6 +118,8 @@ static void ksw_stack_exit_handler(struct fprobe *fp, unsigned long ip,
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
+ if (!ksw_stack_check_ctx(false))
+ return;
if (ctx->wp) {
ksw_watch_off(ctx->wp);
@@ -92,11 +154,16 @@ int ksw_stack_init(void)
return ret;
}
+ WRITE_ONCE(probe_generation, READ_ONCE(probe_generation) + 1);
+ WRITE_ONCE(probe_enable, true);
+
return 0;
}
void ksw_stack_exit(void)
{
+ WRITE_ONCE(probe_enable, false);
+ WRITE_ONCE(probe_generation, READ_ONCE(probe_generation) + 1);
unregister_fprobe(&exit_probe);
unregister_kprobe(&entry_probe);
}
--
2.43.0
Powered by blists - more mailing lists