[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <6dbaff1f-09d3-421a-9813-c9c1cda8e7d8@linux.dev>
Date: Fri, 16 Jan 2026 12:35:14 +0800
From: Tao Chen <chen.dylane@...ux.dev>
To: Andrii Nakryiko <andrii.nakryiko@...il.com>, peterz@...radead.org
Cc: mingo@...hat.com, acme@...nel.org, namhyung@...nel.org,
mark.rutland@....com, alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
irogers@...gle.com, adrian.hunter@...el.com, kan.liang@...ux.intel.com,
song@...nel.org, ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
martin.lau@...ux.dev, eddyz87@...il.com, yonghong.song@...ux.dev,
john.fastabend@...il.com, kpsingh@...nel.org, sdf@...ichev.me,
haoluo@...gle.com, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, bpf@...r.kernel.org
Subject: Re: [PATCH bpf-next v7 2/2] bpf: Hold the perf callchain entry until
used completely
在 2026/1/10 07:47, Andrii Nakryiko 写道:
> On Tue, Jan 6, 2026 at 8:00 AM Tao Chen <chen.dylane@...ux.dev> wrote:
>>
>> 在 2025/12/23 14:29, Tao Chen 写道:
>>> 在 2025/12/17 17:33, Tao Chen 写道:
>>>> As Alexei noted, get_perf_callchain() return values may be reused
>>>> if a task is preempted after the BPF program enters migrate disable
>>>> mode. The perf_callchain_entres has a small stack of entries, and
>>>> we can reuse it as follows:
>>>>
>>>> 1. get the perf callchain entry
>>>> 2. BPF use...
>>>> 3. put the perf callchain entry
>>>>
>>>> And Peter suggested that get_recursion_context used with preemption
>>>> disabled, so we should disable preemption at BPF side.
>>>>
>>>> Acked-by: Yonghong Song <yonghong.song@...ux.dev>
>>>> Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
>>>> ---
>>>> kernel/bpf/stackmap.c | 68 +++++++++++++++++++++++++++++++++++--------
>>>> 1 file changed, 56 insertions(+), 12 deletions(-)
>>>>
>>>> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
>>>> index da3d328f5c1..3bdd99a630d 100644
>>>> --- a/kernel/bpf/stackmap.c
>>>> +++ b/kernel/bpf/stackmap.c
>>>> @@ -210,13 +210,14 @@ static void stack_map_get_build_id_offset(struct
>>>> bpf_stack_build_id *id_offs,
>>>> }
>>>> static struct perf_callchain_entry *
>>>> -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
>>>> +get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32
>>>> max_depth)
>>>> {
>>>> #ifdef CONFIG_STACKTRACE
>>>> struct perf_callchain_entry *entry;
>>>> - int rctx;
>>>> - entry = get_callchain_entry(&rctx);
>>>> + preempt_disable();
>>>> + entry = get_callchain_entry(rctx);
>>>> + preempt_enable();
>>>> if (!entry)
>>>> return NULL;
>>>> @@ -238,8 +239,6 @@ get_callchain_entry_for_task(struct task_struct
>>>> *task, u32 max_depth)
>>>> to[i] = (u64)(from[i]);
>>>> }
>>>> - put_callchain_entry(rctx);
>>>> -
>>>> return entry;
>>>> #else /* CONFIG_STACKTRACE */
>>>> return NULL;
>>>> @@ -320,6 +319,34 @@ static long __bpf_get_stackid(struct bpf_map *map,
>>>> return id;
>>>> }
>>>> +static struct perf_callchain_entry *
>>>> +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel,
>>>> bool user,
>>>> + int max_stack, bool crosstask)
>>>> +{
>>>> + struct perf_callchain_entry_ctx ctx;
>>>> + struct perf_callchain_entry *entry;
>>>> +
>>>> + preempt_disable();
>>>> + entry = get_callchain_entry(rctx);
>>>> + preempt_enable();
>>>> +
>>>> + if (unlikely(!entry))
>>>> + return NULL;
>>>> +
>>>> + __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
>>>> + if (kernel)
>>>> + __get_perf_callchain_kernel(&ctx, regs);
>>>> + if (user && !crosstask)
>>>> + __get_perf_callchain_user(&ctx, regs, 0);
>>>> +
>>>> + return entry;
>>>> +}
>>>> +
>>>> +static void bpf_put_perf_callchain(int rctx)
>>>> +{
>>>> + put_callchain_entry(rctx);
>>>> +}
>>>> +
>>>> BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map
>>>> *, map,
>>>> u64, flags)
>>>> {
>>>> @@ -328,20 +355,25 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *,
>>>> regs, struct bpf_map *, map,
>>>> struct perf_callchain_entry *trace;
>>>> bool kernel = !user;
>>>> u32 max_depth;
>>>> + int rctx, ret;
>>>> if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
>>>> BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
>>>> return -EINVAL;
>>>> max_depth = stack_map_calculate_max_depth(map->value_size,
>>>> elem_size, flags);
>>>> - trace = get_perf_callchain(regs, kernel, user, max_depth,
>>>> - false, false, 0);
>>>> +
>>>> + trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth,
>>>> + false);
>>>> if (unlikely(!trace))
>>>> /* couldn't fetch the stack trace */
>>>> return -EFAULT;
>>>> - return __bpf_get_stackid(map, trace, flags);
>>>> + ret = __bpf_get_stackid(map, trace, flags);
>>>> + bpf_put_perf_callchain(rctx);
>>>> +
>>>> + return ret;
>>>> }
>>>> const struct bpf_func_proto bpf_get_stackid_proto = {
>>>> @@ -435,6 +467,7 @@ static long __bpf_get_stack(struct pt_regs *regs,
>>>> struct task_struct *task,
>>>> bool kernel = !user;
>>>> int err = -EINVAL;
>>>> u64 *ips;
>>>> + int rctx;
>>>> if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
>>>> BPF_F_USER_BUILD_ID)))
>>>> @@ -467,18 +500,26 @@ static long __bpf_get_stack(struct pt_regs
>>>> *regs, struct task_struct *task,
>>>> trace = trace_in;
>>>> trace->nr = min_t(u32, trace->nr, max_depth);
>>>> } else if (kernel && task) {
>>>> - trace = get_callchain_entry_for_task(task, max_depth);
>>>> + trace = get_callchain_entry_for_task(&rctx, task, max_depth);
>>>> } else {
>>>> - trace = get_perf_callchain(regs, kernel, user, max_depth,
>>>> - crosstask, false, 0);
>>>> + trace = bpf_get_perf_callchain(&rctx, regs, kernel, user,
>>>> max_depth,
>>>> + crosstask);
>>>> }
>>>> - if (unlikely(!trace) || trace->nr < skip) {
>>>> + if (unlikely(!trace)) {
>>>> if (may_fault)
>>>> rcu_read_unlock();
>>>> goto err_fault;
>>>> }
>>>> + if (trace->nr < skip) {
>>>> + if (may_fault)
>>>> + rcu_read_unlock();
>>>> + if (!trace_in)
>>>> + bpf_put_perf_callchain(rctx);
>>>> + goto err_fault;
>>>> + }
>>>> +
>>>> trace_nr = trace->nr - skip;
>>>> copy_len = trace_nr * elem_size;
>>>> @@ -497,6 +538,9 @@ static long __bpf_get_stack(struct pt_regs *regs,
>>>> struct task_struct *task,
>>>> if (may_fault)
>>>> rcu_read_unlock();
>>>> + if (!trace_in)
>>>> + bpf_put_perf_callchain(rctx);
>>>> +
>>>> if (user_build_id)
>>>> stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
>>>
>>> Hi Peter,
>>>
>>> As Alexei said, the patch needs your ack, please review again, thanks.
>>>
>>
>> ping...
>
> Peter, if I understand correctly, this will go through bpf-next tree,
> but it would be great if you could take a look and confirm this
> overall is not broken. Thanks!
>
>>
>> --
>> Best Regards
>> Tao Chen
Hi Andrii, Peter
It appears that the code does not require a rebase, and the latest CI
build is valid. Looking forward to your response. Thanks.
CI has tested the following submission:
Status: SUCCESS
Name: [RESEND,bpf-next,v7,0/2] Pass external callchain entry to
get_perf_callchain
Patchwork:
https://patchwork.kernel.org/project/netdevbpf/list/?series=1034091&state=*
Matrix: https://github.com/kernel-patches/bpf/actions/runs/21051611369
--
Best Regards
Tao Chen
Powered by blists - more mailing lists