lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <3aae1f51-b1d5-44c2-89d4-242887df34a7@linux.dev>
Date: Wed, 17 Dec 2025 17:11:59 +0800
From: Tao Chen <chen.dylane@...ux.dev>
To: peterz@...radead.org, mingo@...hat.com, acme@...nel.org,
 namhyung@...nel.org, mark.rutland@....com,
 alexander.shishkin@...ux.intel.com, jolsa@...nel.org, irogers@...gle.com,
 adrian.hunter@...el.com, kan.liang@...ux.intel.com, song@...nel.org,
 ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
 martin.lau@...ux.dev, eddyz87@...il.com, yonghong.song@...ux.dev,
 john.fastabend@...il.com, kpsingh@...nel.org, sdf@...ichev.me,
 haoluo@...gle.com
Cc: linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
 bpf@...r.kernel.org
Subject: Re: [PATCH bpf-next v7 2/2] bpf: Hold the perf callchain entry until
 used completely

在 2025/12/17 13:22, Tao Chen 写道:
> 在 2025/12/17 13:12, Tao Chen 写道:
>> As Alexei noted, get_perf_callchain() return values may be reused
>> if a task is preempted after the BPF program enters migrate disable
>> mode. The perf_callchain_entres has a small stack of entries, and
>> we can reuse it as follows:
>>
>> 1. get the perf callchain entry
>> 2. BPF use...
>> 3. put the perf callchain entry
>>
>> And Peter suggested that get_recursion_context used with preemption
>> disabled, so we should disable preemption at BPF side.
>>
>> Acked-by: Yonghong Song <yonghong.song@...ux.dev>
>> Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
>> ---
>>   kernel/bpf/stackmap.c | 67 +++++++++++++++++++++++++++++++++++--------
>>   1 file changed, 55 insertions(+), 12 deletions(-)
>>
>> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
>> index 2365541c81d..64ace4ed50e 100644
>> --- a/kernel/bpf/stackmap.c
>> +++ b/kernel/bpf/stackmap.c
>> @@ -210,13 +210,14 @@ static void stack_map_get_build_id_offset(struct 
>> bpf_stack_build_id *id_offs,
>>   }
>>   static struct perf_callchain_entry *
>> -get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
>> +get_callchain_entry_for_task(int *rctx, struct task_struct *task, u32 
>> max_depth)
>>   {
>>   #ifdef CONFIG_STACKTRACE
>>       struct perf_callchain_entry *entry;
>> -    int rctx;
>> -    entry = get_callchain_entry(&rctx);
>> +    preempt_disable();
>> +    entry = get_callchain_entry(rctx);
>> +    preempt_enable();
>>       if (!entry)
>>           return NULL;
>> @@ -238,8 +239,6 @@ get_callchain_entry_for_task(struct task_struct 
>> *task, u32 max_depth)
>>               to[i] = (u64)(from[i]);
>>       }
>> -    put_callchain_entry(rctx);
>> -
>>       return entry;
>>   #else /* CONFIG_STACKTRACE */
>>       return NULL;
>> @@ -320,6 +319,34 @@ static long __bpf_get_stackid(struct bpf_map *map,
>>       return id;
>>   }
>> +static struct perf_callchain_entry *
>> +bpf_get_perf_callchain(int *rctx, struct pt_regs *regs, bool kernel, 
>> bool user,
>> +               int max_stack, bool crosstask)
>> +{
>> +    struct perf_callchain_entry_ctx ctx;
>> +    struct perf_callchain_entry *entry;
>> +
>> +    preempt_disable();
>> +    entry = get_callchain_entry(rctx);
>> +    preempt_enable();
>> +
>> +    if (unlikely(!entry))
>> +        return NULL;
>> +
>> +    __init_perf_callchain_ctx(&ctx, entry, max_stack, false);
>> +    if (kernel)
>> +        __get_perf_callchain_kernel(&ctx, regs);
>> +    if (user && !crosstask)
>> +        __get_perf_callchain_user(&ctx, regs);
>> +
>> +    return entry;
>> +}
>> +
>> +static void bpf_put_perf_callchain(int rctx)
>> +{
>> +    put_callchain_entry(rctx);
>> +}
>> +
>>   BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map 
>> *, map,
>>          u64, flags)
>>   {
>> @@ -328,20 +355,24 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, 
>> regs, struct bpf_map *, map,
>>       struct perf_callchain_entry *trace;
>>       bool kernel = !user;
>>       u32 max_depth;
>> +    int rctx, ret;
>>       if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
>>                      BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
>>           return -EINVAL;
>>       max_depth = stack_map_calculate_max_depth(map->value_size, 
>> elem_size, flags);
>> -    trace = get_perf_callchain(regs, kernel, user, max_depth,
>> -                   false, false);
>> +    trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, max_depth,
>> +                       false);
>>       if (unlikely(!trace))
>>           /* couldn't fetch the stack trace */
>>           return -EFAULT;
>> -    return __bpf_get_stackid(map, trace, flags);
>> +    ret = __bpf_get_stackid(map, trace, flags);
>> +    bpf_put_perf_callchain(rctx);
>> +
>> +    return ret;
>>   }
>>   const struct bpf_func_proto bpf_get_stackid_proto = {
>> @@ -435,6 +466,7 @@ static long __bpf_get_stack(struct pt_regs *regs, 
>> struct task_struct *task,
>>       bool kernel = !user;
>>       int err = -EINVAL;
>>       u64 *ips;
>> +    int rctx;
>>       if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
>>                      BPF_F_USER_BUILD_ID)))
>> @@ -467,18 +499,26 @@ static long __bpf_get_stack(struct pt_regs 
>> *regs, struct task_struct *task,
>>           trace = trace_in;
>>           trace->nr = min_t(u32, trace->nr, max_depth);
>>       } else if (kernel && task) {
>> -        trace = get_callchain_entry_for_task(task, max_depth);
>> +        trace = get_callchain_entry_for_task(&rctx, task, max_depth);
>>       } else {
>> -        trace = get_perf_callchain(regs, kernel, user, max_depth,
>> -                       crosstask, false);
>> +        trace = bpf_get_perf_callchain(&rctx, regs, kernel, user, 
>> max_depth,
>> +                           crosstask);
>>       }
>> -    if (unlikely(!trace) || trace->nr < skip) {
>> +    if (unlikely(!trace)) {
>>           if (may_fault)
>>               rcu_read_unlock();
>>           goto err_fault;
>>       }
>> +    if (trace->nr < skip) {
>> +        if (may_fault)
>> +            rcu_read_unlock();
>> +        if (!trace_in)
>> +            bpf_put_perf_callchain(rctx);
>> +        goto err_fault;
>> +    }
>> +
>>       trace_nr = trace->nr - skip;
>>       copy_len = trace_nr * elem_size;
>> @@ -497,6 +537,9 @@ static long __bpf_get_stack(struct pt_regs *regs, 
>> struct task_struct *task,
>>       if (may_fault)
>>           rcu_read_unlock();
>> +    if (!trace_in)
>> +        bpf_put_perf_callchain(rctx);
>> +
>>       if (user_build_id)
>>           stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
> 
> Hi Peter,
> 
> As requested by Alexei, I have re-sent the v7 version. Compared with the 
> v6 version, the only change is the addition of the ack tag in patch2. In 
> accordance with your previous suggestions, patch1 has been modified 
> based on your earlier patch, and patch2 adds preempt_disable on the eBPF 
> side—this does not affect the original perf logic. Please review it 
> again, thank you.
> 

Sorry, there are code conflicts. I will resend it.

-- 
Best Regards
Tao Chen

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ