[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110163634.3686676-18-wangjinchao600@gmail.com>
Date: Tue, 11 Nov 2025 00:36:12 +0800
From: Jinchao Wang <wangjinchao600@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
"Masami Hiramatsu (Google)" <mhiramat@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Randy Dunlap <rdunlap@...radead.org>,
Marco Elver <elver@...gle.com>,
Mike Rapoport <rppt@...nel.org>,
Alexander Potapenko <glider@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Alice Ryhl <aliceryhl@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
Andrii Nakryiko <andrii@...nel.org>,
Ard Biesheuvel <ardb@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Ben Segall <bsegall@...gle.com>,
Bill Wendling <morbo@...gle.com>,
Borislav Petkov <bp@...en8.de>,
Catalin Marinas <catalin.marinas@....com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
David Kaplan <david.kaplan@....com>,
"David S. Miller" <davem@...emloft.net>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Dmitry Vyukov <dvyukov@...gle.com>,
"H. Peter Anvin" <hpa@...or.com>,
Ian Rogers <irogers@...gle.com>,
Ingo Molnar <mingo@...hat.com>,
James Clark <james.clark@...aro.org>,
Jinchao Wang <wangjinchao600@...il.com>,
Jinjie Ruan <ruanjinjie@...wei.com>,
Jiri Olsa <jolsa@...nel.org>,
Jonathan Corbet <corbet@....net>,
Juri Lelli <juri.lelli@...hat.com>,
Justin Stitt <justinstitt@...gle.com>,
kasan-dev@...glegroups.com,
Kees Cook <kees@...nel.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
"Liang Kan" <kan.liang@...ux.intel.com>,
Linus Walleij <linus.walleij@...aro.org>,
linux-arm-kernel@...ts.infradead.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-perf-users@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
llvm@...ts.linux.dev,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Mark Rutland <mark.rutland@....com>,
Masahiro Yamada <masahiroy@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Mel Gorman <mgorman@...e.de>,
Michal Hocko <mhocko@...e.com>,
Miguel Ojeda <ojeda@...nel.org>,
Nam Cao <namcao@...utronix.de>,
Namhyung Kim <namhyung@...nel.org>,
Nathan Chancellor <nathan@...nel.org>,
Naveen N Rao <naveen@...nel.org>,
Nick Desaulniers <nick.desaulniers+lkml@...il.com>,
Rong Xu <xur@...gle.com>,
Sami Tolvanen <samitolvanen@...gle.com>,
Steven Rostedt <rostedt@...dmis.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Thomas Weißschuh <thomas.weissschuh@...utronix.de>,
Valentin Schneider <vschneid@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Vlastimil Babka <vbabka@...e.cz>,
Will Deacon <will@...nel.org>,
workflows@...r.kernel.org,
x86@...nel.org
Subject: [PATCH v8 17/27] mm/ksw: add KSTACKWATCH_PROFILING to measure probe cost
CONFIG_KSTACKWATCH_PROFILING enables runtime measurement of KStackWatch
probe latencies. When profiling is enabled, KStackWatch collects
entry/exit latencies in its probe callbacks. When KStackWatch is
disabled by clearing its config file, the previously collected statistics
are printed.
Signed-off-by: Jinchao Wang <wangjinchao600@...il.com>
---
mm/kstackwatch/Kconfig | 10 +++
mm/kstackwatch/stack.c | 185 ++++++++++++++++++++++++++++++++++++++---
2 files changed, 183 insertions(+), 12 deletions(-)
diff --git a/mm/kstackwatch/Kconfig b/mm/kstackwatch/Kconfig
index 496caf264f35..3c9385a15c33 100644
--- a/mm/kstackwatch/Kconfig
+++ b/mm/kstackwatch/Kconfig
@@ -12,3 +12,13 @@ config KSTACKWATCH
introduce minor overhead during runtime monitoring.
If unsure, say N.
+
+config KSTACKWATCH_PROFILING
+ bool "KStackWatch profiling"
+ depends on KSTACKWATCH
+ help
+ Measure probe latency and overhead in KStackWatch. It records
+ entry/exit probe times (ns and cycles) and shows statistics when
+ stopping. Useful for performance tuning, not for production use.
+
+ If unsure, say N.
diff --git a/mm/kstackwatch/stack.c b/mm/kstackwatch/stack.c
index 3455d1e70db9..72ae2d3adeec 100644
--- a/mm/kstackwatch/stack.c
+++ b/mm/kstackwatch/stack.c
@@ -6,7 +6,10 @@
#include <linux/kprobes.h>
#include <linux/kstackwatch.h>
#include <linux/kstackwatch_types.h>
+#include <linux/ktime.h>
+#include <linux/percpu.h>
#include <linux/printk.h>
+#include <linux/timex.h>
#define MAX_CANARY_SEARCH_STEPS 128
static struct kprobe entry_probe;
@@ -15,6 +18,120 @@ static struct fprobe exit_probe;
static bool probe_enable;
static u16 probe_generation;
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+struct measure_data {
+ u64 total_entry_with_watch_ns;
+ u64 total_entry_with_watch_cycles;
+ u64 total_entry_without_watch_ns;
+ u64 total_entry_without_watch_cycles;
+ u64 total_exit_with_watch_ns;
+ u64 total_exit_with_watch_cycles;
+ u64 total_exit_without_watch_ns;
+ u64 total_exit_without_watch_cycles;
+ u64 entry_with_watch_count;
+ u64 entry_without_watch_count;
+ u64 exit_with_watch_count;
+ u64 exit_without_watch_count;
+};
+
+static DEFINE_PER_CPU(struct measure_data, measure_stats);
+
+struct measure_ctx {
+ u64 ns_start;
+ u64 cycles_start;
+};
+
+static __always_inline void measure_start(struct measure_ctx *ctx)
+{
+ ctx->ns_start = ktime_get_ns();
+ ctx->cycles_start = get_cycles();
+}
+
+static __always_inline void measure_end(struct measure_ctx *ctx, u64 *total_ns,
+ u64 *total_cycles, u64 *count)
+{
+ u64 ns_end = ktime_get_ns();
+ u64 c_end = get_cycles();
+
+ *total_ns += ns_end - ctx->ns_start;
+ *total_cycles += c_end - ctx->cycles_start;
+ (*count)++;
+}
+
+static void show_measure_stats(void)
+{
+ int cpu;
+ struct measure_data sum = {};
+
+ for_each_possible_cpu(cpu) {
+ struct measure_data *md = per_cpu_ptr(&measure_stats, cpu);
+
+ sum.total_entry_with_watch_ns += md->total_entry_with_watch_ns;
+ sum.total_entry_with_watch_cycles +=
+ md->total_entry_with_watch_cycles;
+ sum.total_entry_without_watch_ns +=
+ md->total_entry_without_watch_ns;
+ sum.total_entry_without_watch_cycles +=
+ md->total_entry_without_watch_cycles;
+
+ sum.total_exit_with_watch_ns += md->total_exit_with_watch_ns;
+ sum.total_exit_with_watch_cycles +=
+ md->total_exit_with_watch_cycles;
+ sum.total_exit_without_watch_ns +=
+ md->total_exit_without_watch_ns;
+ sum.total_exit_without_watch_cycles +=
+ md->total_exit_without_watch_cycles;
+
+ sum.entry_with_watch_count += md->entry_with_watch_count;
+ sum.entry_without_watch_count += md->entry_without_watch_count;
+ sum.exit_with_watch_count += md->exit_with_watch_count;
+ sum.exit_without_watch_count += md->exit_without_watch_count;
+ }
+
+#define AVG(ns, cnt) ((cnt) ? ((ns) / (cnt)) : 0ULL)
+
+ pr_info("entry (with watch): %llu ns, %llu cycles (%llu samples)\n",
+ AVG(sum.total_entry_with_watch_ns, sum.entry_with_watch_count),
+ AVG(sum.total_entry_with_watch_cycles,
+ sum.entry_with_watch_count),
+ sum.entry_with_watch_count);
+
+ pr_info("entry (without watch): %llu ns, %llu cycles (%llu samples)\n",
+ AVG(sum.total_entry_without_watch_ns,
+ sum.entry_without_watch_count),
+ AVG(sum.total_entry_without_watch_cycles,
+ sum.entry_without_watch_count),
+ sum.entry_without_watch_count);
+
+ pr_info("exit (with watch): %llu ns, %llu cycles (%llu samples)\n",
+ AVG(sum.total_exit_with_watch_ns, sum.exit_with_watch_count),
+ AVG(sum.total_exit_with_watch_cycles,
+ sum.exit_with_watch_count),
+ sum.exit_with_watch_count);
+
+ pr_info("exit (without watch): %llu ns, %llu cycles (%llu samples)\n",
+ AVG(sum.total_exit_without_watch_ns,
+ sum.exit_without_watch_count),
+ AVG(sum.total_exit_without_watch_cycles,
+ sum.exit_without_watch_count),
+ sum.exit_without_watch_count);
+}
+
+static void reset_measure_stats(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct measure_data *md = per_cpu_ptr(&measure_stats, cpu);
+
+ memset(md, 0, sizeof(*md));
+ }
+
+ pr_info("measure stats reset.\n");
+}
+
+#endif
+
static void ksw_reset_ctx(void)
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
@@ -159,25 +276,28 @@ static void ksw_stack_entry_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
- ulong stack_pointer;
- ulong watch_addr;
+ ulong stack_pointer, watch_addr;
u16 watch_len;
int ret;
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ struct measure_ctx m;
+ struct measure_data *md = this_cpu_ptr(&measure_stats);
+ bool watched = false;
+
+ measure_start(&m);
+#endif
stack_pointer = kernel_stack_pointer(regs);
- /*
- * triggered more than once, may be in a loop
- */
if (ctx->wp && ctx->sp == stack_pointer)
- return;
+ goto out;
if (!ksw_stack_check_ctx(true))
- return;
+ goto out;
ret = ksw_watch_get(&ctx->wp);
if (ret)
- return;
+ goto out;
ret = ksw_stack_prepare_watch(regs, ksw_get_config(), &watch_addr,
&watch_len);
@@ -185,17 +305,32 @@ static void ksw_stack_entry_handler(struct kprobe *p, struct pt_regs *regs,
ksw_watch_off(ctx->wp);
ctx->wp = NULL;
pr_err("failed to prepare watch target: %d\n", ret);
- return;
+ goto out;
}
ret = ksw_watch_on(ctx->wp, watch_addr, watch_len);
if (ret) {
pr_err("failed to watch on depth:%d addr:0x%lx len:%u %d\n",
ksw_get_config()->depth, watch_addr, watch_len, ret);
- return;
+ goto out;
}
ctx->sp = stack_pointer;
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ watched = true;
+#endif
+
+out:
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ if (watched)
+ measure_end(&m, &md->total_entry_with_watch_ns,
+ &md->total_entry_with_watch_cycles,
+ &md->entry_with_watch_count);
+ else
+ measure_end(&m, &md->total_entry_without_watch_ns,
+ &md->total_entry_without_watch_cycles,
+ &md->entry_without_watch_count);
+#endif
}
static void ksw_stack_exit_handler(struct fprobe *fp, unsigned long ip,
@@ -203,15 +338,36 @@ static void ksw_stack_exit_handler(struct fprobe *fp, unsigned long ip,
struct ftrace_regs *regs, void *data)
{
struct ksw_ctx *ctx = ¤t->ksw_ctx;
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ struct measure_ctx m;
+ struct measure_data *md = this_cpu_ptr(&measure_stats);
+ bool watched = false;
+ measure_start(&m);
+#endif
if (!ksw_stack_check_ctx(false))
- return;
+ goto out;
if (ctx->wp) {
ksw_watch_off(ctx->wp);
ctx->wp = NULL;
ctx->sp = 0;
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ watched = true;
+#endif
}
+
+out:
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ if (watched)
+ measure_end(&m, &md->total_exit_with_watch_ns,
+ &md->total_exit_with_watch_cycles,
+ &md->exit_with_watch_count);
+ else
+ measure_end(&m, &md->total_exit_without_watch_ns,
+ &md->total_exit_without_watch_cycles,
+ &md->exit_without_watch_count);
+#endif
}
int ksw_stack_init(void)
@@ -239,7 +395,9 @@ int ksw_stack_init(void)
unregister_kprobe(&entry_probe);
return ret;
}
-
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ reset_measure_stats();
+#endif
WRITE_ONCE(probe_generation, READ_ONCE(probe_generation) + 1);
WRITE_ONCE(probe_enable, true);
@@ -252,4 +410,7 @@ void ksw_stack_exit(void)
WRITE_ONCE(probe_generation, READ_ONCE(probe_generation) + 1);
unregister_fprobe(&exit_probe);
unregister_kprobe(&entry_probe);
+#ifdef CONFIG_KSTACKWATCH_PROFILING
+ show_measure_stats();
+#endif
}
--
2.43.0
Powered by blists - more mailing lists