[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251020114040.GT3419281@noisy.programming.kicks-ass.net>
Date: Mon, 20 Oct 2025 13:40:40 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Tao Chen <chen.dylane@...ux.dev>
Cc: mingo@...hat.com, acme@...nel.org, namhyung@...nel.org,
mark.rutland@....com, alexander.shishkin@...ux.intel.com,
jolsa@...nel.org, irogers@...gle.com, adrian.hunter@...el.com,
kan.liang@...ux.intel.com, song@...nel.org, ast@...nel.org,
daniel@...earbox.net, andrii@...nel.org, martin.lau@...ux.dev,
eddyz87@...il.com, yonghong.song@...ux.dev,
john.fastabend@...il.com, kpsingh@...nel.org, sdf@...ichev.me,
haoluo@...gle.com, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, bpf@...r.kernel.org
Subject: Re: [PATCH bpf-next v3 1/2] perf: Use extern perf_callchain_entry
for get_perf_callchain
On Mon, Oct 20, 2025 at 01:01:17AM +0800, Tao Chen wrote:
> From bpf stack map, we want to use our own buffers to avoid unnecessary
> copy, so let us pass it directly. BPF will use this in the next patch.
>
> Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
> ---
> diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
> index 808c0d7a31f..851e8f9d026 100644
> --- a/kernel/events/callchain.c
> +++ b/kernel/events/callchain.c
> @@ -217,8 +217,8 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
> }
>
> struct perf_callchain_entry *
> -get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> - u32 max_stack, bool crosstask, bool add_mark)
> +get_perf_callchain(struct pt_regs *regs, struct perf_callchain_entry *external_entry,
> + bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark)
> {
> struct perf_callchain_entry *entry;
> struct perf_callchain_entry_ctx ctx;
> @@ -228,7 +228,11 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> if (crosstask && user && !kernel)
> return NULL;
>
> - entry = get_callchain_entry(&rctx);
> + if (external_entry)
> + entry = external_entry;
> + else
> + entry = get_callchain_entry(&rctx);
> +
> if (!entry)
> return NULL;
>
> @@ -260,7 +264,8 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
> }
>
> exit_put:
> - put_callchain_entry(rctx);
> + if (!external_entry)
> + put_callchain_entry(rctx);
>
> return entry;
> }
Urgh.. How about something like the below, and then you fix up
__bpf_get_stack() a little like this:
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 4d53cdd1374c..8b85b49cecf7 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -303,8 +303,8 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u32 max_depth = map->value_size / stack_map_data_size(map);
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
bool user = flags & BPF_F_USER_STACK;
+ struct perf_callchain_entry_ctx ctx;
struct perf_callchain_entry *trace;
- bool kernel = !user;
if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
@@ -314,8 +314,13 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
if (max_depth > sysctl_perf_event_max_stack)
max_depth = sysctl_perf_event_max_stack;
- trace = get_perf_callchain(regs, kernel, user, max_depth,
- false, false);
+ trace = your-stuff;
+
+ __init_perf_callchain_ctx(&ctx, trace, max_depth, false);
+ if (!user)
+ __get_perf_callchain_kernel(&ctx, regs);
+ else
+ __get_perf_callchain_user(&ctx, regs);
if (unlikely(!trace))
/* couldn't fetch the stack trace */
---
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fd1d91017b99..14a382cad1dd 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -67,6 +67,7 @@ struct perf_callchain_entry_ctx {
u32 nr;
short contexts;
bool contexts_maxed;
+ bool add_mark;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -1718,9 +1719,17 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+
+extern void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
+ struct perf_callchain_entry *entry,
+ u32 max_stack, bool add_mark);
+
+extern void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
+extern void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs);
+
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark);
+ u32 max_stack, bool crosstask);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 808c0d7a31fa..edd76e3bb139 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -216,50 +216,70 @@ static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entr
#endif
}
+void __init_perf_callchain_ctx(struct perf_callchain_entry_ctx *ctx,
+ struct perf_callchain_entry *entry,
+ u32 max_stack, bool add_mark)
+
+{
+ ctx->entry = entry;
+ ctx->max_stack = max_stack;
+ ctx->nr = entry->nr = 0;
+ ctx->contexts = 0;
+ ctx->contexts_maxed = false;
+ ctx->add_mark = add_mark;
+}
+
+void __get_perf_callchain_kernel(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return;
+
+ if (ctx->add_mark)
+ perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(ctx, regs);
+}
+
+void __get_perf_callchain_user(struct perf_callchain_entry_ctx *ctx, struct pt_regs *regs)
+{
+ int start_entry_idx;
+
+ if (!user_mode(regs)) {
+ if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
+ return;
+ regs = task_pt_regs(current);
+ }
+
+ if (ctx->add_mark)
+ perf_callchain_store_context(ctx, PERF_CONTEXT_USER);
+
+ start_entry_idx = entry->nr;
+ perf_callchain_user(ctx, regs);
+ fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
+}
+
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark)
+ u32 max_stack, bool crosstask)
{
- struct perf_callchain_entry *entry;
struct perf_callchain_entry_ctx ctx;
- int rctx, start_entry_idx;
+ struct perf_callchain_entry *entry;
+ int rctx;
/* crosstask is not supported for user stacks */
if (crosstask && user && !kernel)
return NULL;
- entry = get_callchain_entry(&rctx);
+ entry = get_callchain_entry(&rctx, regs);
if (!entry)
return NULL;
- ctx.entry = entry;
- ctx.max_stack = max_stack;
- ctx.nr = entry->nr = 0;
- ctx.contexts = 0;
- ctx.contexts_maxed = false;
+ __init_perf_callchain_ctx(&ctx, entry, max_stack, true);
- if (kernel && !user_mode(regs)) {
- if (add_mark)
- perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(&ctx, regs);
- }
+ if (kernel)
+ __get_perf_callchain_kernel(&ctx, regs);
+ if (user && !crosstask)
+ __get_perf_callchain_user(&ctx, regs);
- if (user && !crosstask) {
- if (!user_mode(regs)) {
- if (current->flags & (PF_KTHREAD | PF_USER_WORKER))
- goto exit_put;
- regs = task_pt_regs(current);
- }
-
- if (add_mark)
- perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
-
- start_entry_idx = entry->nr;
- perf_callchain_user(&ctx, regs);
- fixup_uretprobe_trampoline_entries(entry, start_entry_idx);
- }
-
-exit_put:
put_callchain_entry(rctx);
return entry;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 177e57c1a362..cbe073d761a8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8218,7 +8218,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
return &__empty_callchain;
callchain = get_perf_callchain(regs, kernel, user,
- max_stack, crosstask, true);
+ max_stack, crosstask);
return callchain ?: &__empty_callchain;
}
Powered by blists - more mailing lists