[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fbabac62-4bc1-4c11-9316-ed51ae9dbb0d@linux.dev>
Date: Thu, 7 Aug 2025 12:01:57 -0700
From: Yonghong Song <yonghong.song@...ux.dev>
To: Arnaud Lecomte <contact@...aud-lcm.com>
Cc: andrii@...nel.org, ast@...nel.org, bpf@...r.kernel.org,
daniel@...earbox.net, eddyz87@...il.com, haoluo@...gle.com,
john.fastabend@...il.com, jolsa@...nel.org, kpsingh@...nel.org,
linux-kernel@...r.kernel.org, martin.lau@...ux.dev, sdf@...ichev.me,
song@...nel.org, syzbot+c9b724fbb41cf2538b7b@...kaller.appspotmail.com,
syzkaller-bugs@...glegroups.com
Subject: Re: [PATCH 1/2] bpf: refactor max_depth computation in
bpf_get_stack()
On 8/7/25 10:50 AM, Arnaud Lecomte wrote:
> A new helper function stack_map_calculate_max_depth() that
> computes the max depth for a stackmap.
>
> Signed-off-by: Arnaud Lecomte <contact@...aud-lcm.com>
> ---
> kernel/bpf/stackmap.c | 38 ++++++++++++++++++++++++++++++--------
> 1 file changed, 30 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index 3615c06b7dfa..14e034045310 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -42,6 +42,31 @@ static inline int stack_map_data_size(struct bpf_map *map)
> sizeof(struct bpf_stack_build_id) : sizeof(u64);
> }
>
> +/**
> + * stack_map_calculate_max_depth - Calculate maximum allowed stack trace depth
> + * @map_size: Size of the buffer/map value in bytes
> + * @elem_size: Size of each stack trace element
> + * @map_flags: BPF stack trace flags (BPF_F_USER_STACK, BPF_F_USER_BUILD_ID, ...)
> + *
> + * Return: Maximum number of stack trace entries that can be safely stored,
> + * or -EINVAL if size is not a multiple of elem_size
-EINVAL is not needed here. See below.
> + */
> +static u32 stack_map_calculate_max_depth(u32 map_size, u32 map_elem_size, u64 map_flags)
map_elem_size -> elem_size
> +{
> + u32 max_depth;
> + u32 skip = map_flags & BPF_F_SKIP_FIELD_MASK;
reverse Christmas tree?
> +
> + if (unlikely(map_size%map_elem_size))
> + return -EINVAL;
The above should not be here. The checking 'map_size % map_elem_size' is only needed
for bpf_get_stack(), not applicable for bpf_get_stackid().
> +
> + max_depth = map_size / map_elem_size;
> + max_depth += skip;
> + if (max_depth > sysctl_perf_event_max_stack)
> + return sysctl_perf_event_max_stack;
> +
> + return max_depth;
> +}
> +
> static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
> {
> u64 elem_size = sizeof(struct stack_map_bucket) +
> @@ -406,7 +431,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
> struct perf_callchain_entry *trace_in,
> void *buf, u32 size, u64 flags, bool may_fault)
> {
> - u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
> + u32 trace_nr, copy_len, elem_size, max_depth;
> bool user_build_id = flags & BPF_F_USER_BUILD_ID;
> bool crosstask = task && task != current;
> u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
> @@ -423,8 +448,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
> goto clear;
>
> elem_size = user_build_id ? sizeof(struct bpf_stack_build_id) : sizeof(u64);
> - if (unlikely(size % elem_size))
> - goto clear;
Please keep this one.
>
> /* cannot get valid user stack for task without user_mode regs */
> if (task && user && !user_mode(regs))
> @@ -438,10 +461,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
> goto clear;
> }
>
> - num_elem = size / elem_size;
> - max_depth = num_elem + skip;
> - if (sysctl_perf_event_max_stack < max_depth)
> - max_depth = sysctl_perf_event_max_stack;
> + max_depth = stack_map_calculate_max_depth(size, elem_size, flags);
> + if (max_depth < 0)
> + goto err_fault;
max_depth is never less than 0.
>
> if (may_fault)
> rcu_read_lock(); /* need RCU for perf's callchain below */
> @@ -461,7 +483,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
> }
>
> trace_nr = trace->nr - skip;
> - trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
> + trace_nr = min(trace_nr, max_depth - skip);
> copy_len = trace_nr * elem_size;
>
> ips = trace->ip + skip;
Powered by blists - more mailing lists