[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251109163559.4102849-2-chen.dylane@linux.dev>
Date: Mon, 10 Nov 2025 00:35:57 +0800
From: Tao Chen <chen.dylane@...ux.dev>
To: peterz@...radead.org,
mingo@...hat.com,
acme@...nel.org,
namhyung@...nel.org,
mark.rutland@....com,
alexander.shishkin@...ux.intel.com,
jolsa@...nel.org,
irogers@...gle.com,
adrian.hunter@...el.com,
kan.liang@...ux.intel.com
Cc: linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org,
bpf@...r.kernel.org,
Tao Chen <chen.dylane@...ux.dev>
Subject: [PATCH bpf-next v5 1/3] perf: Refactor get_perf_callchain
>From BPF stack map, we want to ensure that the callchain buffer
will not be overwritten by other preemptive tasks. Peter
suggested provide more flexible stack-sampling APIs, which
can be used in BPF, and we can still use the perf callchain
entry with the help of these APIs. The next patch will modify
the BPF part.
In the future, these APIs will also make it convenient for us to
add stack-sampling kfuncs in the eBPF subsystem, just as Andrii and
Alexei discussed earlier.
Signed-off-by: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
---
include/linux/perf_event.h | 9 +++++
kernel/events/callchain.c | 73 ++++++++++++++++++++++++--------------
2 files changed, 56 insertions(+), 26 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fd1d91017b9..edd3058e4d8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -67,6 +67,7 @@ struct perf_callchain_entry_ctx {
u32 nr;
short contexts;
bool contexts_maxed;
+ bool add_mark;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -1718,6 +1719,14 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+
+extern void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
+ struct perf_callchain_entry *entry,
+ u32 max_stack, bool add_mark);
+
+extern void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
+extern void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
+
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 808c0d7a31f..fb1f26be297 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -216,13 +216,54 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
#endif
}
+void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
+ struct perf_callchain_entry *entry,
+ u32 max_stack, bool add_mark)
+
+{
+ ctx->entry = entry;
+ ctx->max_stack = max_stack;
+ ctx->nr = entry->nr = 0;
+ ctx->contexts = 0;
+ ctx->contexts_maxed = false;
+ ctx->add_mark = add_mark;
+}
+
+void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return;
+
+ if (ctx->add_mark)
+ perf_callchain_store_context(ctx, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(ctx, regs);
+}
+
+void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
+{
+ int start_entry_idx;
+
+ if (!user_mode(regs)) {
+ if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
+ return;
+ regs = task_pt_regs(current);
+ }
+
+ if (ctx->add_mark)
+ perf_callchain_store_context(ctx, PERF_CONTEXT_USER);
+
+ start_entry_idx = ctx->nr;
+ perf_callchain_user(ctx, regs);
+ fixup_uretprobe_trampoline_entries(ctx->entry, start_entry_idx);
+}
+
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark)
{
struct perf_callchain_entry *entry;
struct perf_callchain_entry_ctx ctx;
- int rctx, start_entry_idx;
+ int rctx;
/* crosstask is not supported for user stacks */
if (crosstask && user && !kernel)
@@ -232,34 +273,14 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
if (!entry)
return NULL;
- ctx.entry = entry;
- ctx.max_stack = max_stack;
- ctx.nr = entry->nr = 0;
- ctx.contexts = 0;
- ctx.contexts_maxed = false;
+ __init_perf_callchain_ctx(&ctx, entry, max_stack, add_mark);
- if (kernel && !user_mode(regs)) {
- if (add_mark)
- perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(&ctx, regs);
- }
-
- if (user && !crosstask) {
- if (!user_mode(regs)) {
- if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
- goto exit_put;
- regs = task_pt_regs(current);
- }
+ if (kernel)
+ __get_perf_callchain_kernel(&ctx, regs);
- if (add_mark)
- perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
-
- start_entry_idx = entry->nr;
- perf_callchain_user(&ctx, regs);
- fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
- }
+ if (user && !crosstask)
+ __get_perf_callchain_user(&ctx, regs);
-exit_put:
put_callchain_entry(rctx);
return entry;
--
2.48.1
Powered by blists - more mailing lists