lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bfffe2d9-1d2a-4376-abb6-a8746a8a3a69@linux.dev>
Date: Thu, 7 Aug 2025 12:05:23 -0700
From: Yonghong Song <yonghong.song@...ux.dev>
To: Arnaud Lecomte <contact@...aud-lcm.com>
Cc: andrii@...nel.org, ast@...nel.org, bpf@...r.kernel.org,
 daniel@...earbox.net, eddyz87@...il.com, haoluo@...gle.com,
 john.fastabend@...il.com, jolsa@...nel.org, kpsingh@...nel.org,
 linux-kernel@...r.kernel.org, martin.lau@...ux.dev, sdf@...ichev.me,
 song@...nel.org, syzbot+c9b724fbb41cf2538b7b@...kaller.appspotmail.com,
 syzkaller-bugs@...glegroups.com
Subject: Re: [PATCH 2/2] bpf: fix stackmap overflow check in
 __bpf_get_stackid()



On 8/7/25 10:52 AM, Arnaud Lecomte wrote:
> Syzkaller reported a KASAN slab-out-of-bounds write in __bpf_get_stackid()
> when copying stack trace data. The issue occurs when the perf trace
>   contains more stack entries than the stack map bucket can hold,
>   leading to an out-of-bounds write in the bucket's data array.
>
> Reported-by: syzbot+c9b724fbb41cf2538b7b@...kaller.appspotmail.com
> Closes: https://syzkaller.appspot.com/bug?extid=c9b724fbb41cf2538b7b
> Signed-off-by: Arnaud Lecomte <contact@...aud-lcm.com>
> ---
>   kernel/bpf/stackmap.c | 26 +++++++++++++++-----------
>   1 file changed, 15 insertions(+), 11 deletions(-)
>
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index 14e034045310..d7ef840971f0 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -250,7 +250,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
>   }
>   
>   static long __bpf_get_stackid(struct bpf_map *map,
> -			      struct perf_callchain_entry *trace, u64 flags)
> +			      struct perf_callchain_entry *trace, u64 flags, u32 max_depth)
>   {
>   	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
>   	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
> @@ -266,6 +266,8 @@ static long __bpf_get_stackid(struct bpf_map *map,
>   
>   	trace_nr = trace->nr - skip;
>   	trace_len = trace_nr * sizeof(u64);
> +	trace_nr = min(trace_nr, max_depth - skip);
> +
>   	ips = trace->ip + skip;
>   	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
>   	id = hash & (smap->n_buckets - 1);
> @@ -325,19 +327,19 @@ static long __bpf_get_stackid(struct bpf_map *map,
>   BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
>   	   u64, flags)
>   {
> -	u32 max_depth = map->value_size / stack_map_data_size(map);
> -	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
> +	u32 elem_size = stack_map_data_size(map);
>   	bool user = flags & BPF_F_USER_STACK;
>   	struct perf_callchain_entry *trace;
>   	bool kernel = !user;
> +	u32 max_depth;
>   
>   	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
>   			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
>   		return -EINVAL;
>   
> -	max_depth += skip;
> -	if (max_depth > sysctl_perf_event_max_stack)
> -		max_depth = sysctl_perf_event_max_stack;
> +	max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
> +	if (max_depth < 0)
> +		return -EFAULT;

the above condition is not needed.

>   
>   	trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
>   				   false, false);
> @@ -346,7 +348,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
>   		/* couldn't fetch the stack trace */
>   		return -EFAULT;
>   
> -	return __bpf_get_stackid(map, trace, flags);
> +	return __bpf_get_stackid(map, trace, flags, max_depth);
>   }
>   
>   const struct bpf_func_proto bpf_get_stackid_proto = {
> @@ -378,6 +380,7 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
>   	bool kernel, user;
>   	__u64 nr_kernel;
>   	int ret;
> +	u32 elem_size, pe_max_depth;

pe_max_depth -> max_depth.

>   
>   	/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
>   	if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
> @@ -396,24 +399,25 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
>   		return -EFAULT;
>   
>   	nr_kernel = count_kernel_ip(trace);
> -
> +	elem_size = stack_map_data_size(map);
>   	if (kernel) {
>   		__u64 nr = trace->nr;
>   
>   		trace->nr = nr_kernel;
> -		ret = __bpf_get_stackid(map, trace, flags);
> +		pe_max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
> +		ret = __bpf_get_stackid(map, trace, flags, pe_max_depth);
>   
>   		/* restore nr */
>   		trace->nr = nr;
>   	} else { /* user */
>   		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
> -

please keep an empty line here.

>   		skip += nr_kernel;
>   		if (skip > BPF_F_SKIP_FIELD_MASK)
>   			return -EFAULT;
>   
>   		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
> -		ret = __bpf_get_stackid(map, trace, flags);
> +		pe_max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
> +		ret = __bpf_get_stackid(map, trace, flags, pe_max_depth);
>   	}
>   	return ret;
>   }


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ