lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251012135649.59492-1-contact@arnaud-lcm.com>
Date: Sun, 12 Oct 2025 14:56:49 +0100
From: Arnaud lecomte <contact@...aud-lcm.com>
To: syzbot+c9b724fbb41cf2538b7b@...kaller.appspotmail.com
Cc: bpf@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	netdev@...r.kernel.org,
	syzkaller-bugs@...glegroups.com,
	contact@...aud-lcm.com
Subject: test

#syz test

diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 3615c06b7dfa..c0ee51db8eed 100644
--- a/kernel/bpf/stackmap.c

 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
           u64, flags)
 {
-       u32 max_depth = map->value_size / stack_map_data_size(map);
-       u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
+       u32 elem_size = stack_map_data_size(map);
        bool user = flags & BPF_F_USER_STACK;
        struct perf_callchain_entry *trace;
        bool kernel = !user;
+       u32 max_depth;

        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
                return -EINVAL;

-       max_depth += skip;
-       if (max_depth > sysctl_perf_event_max_stack)
-               max_depth = sysctl_perf_event_max_stack;
-
+       max_depth = stack_map_calculate_max_depth(map->value_size, elem_size, flags);
        trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
                                   false, false);

@@ -371,15 +391,11 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
                return -EFAULT;

        nr_kernel = count_kernel_ip(trace);
+       __u64 nr = trace->nr; /* save original */

        if (kernel) {
-               __u64 nr = trace->nr;
-
                trace->nr = nr_kernel;
                ret = __bpf_get_stackid(map, trace, flags);
-
-               /* restore nr */
-               trace->nr = nr;
        } else { /* user */
                u64 skip = flags & BPF_F_SKIP_FIELD_MASK;

@@ -390,6 +406,10 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
                flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
                ret = __bpf_get_stackid(map, trace, flags);
        }
+
+       /* restore nr */
+       trace->nr = nr;
+
        return ret;
 }

@@ -406,7 +426,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
                            struct perf_callchain_entry *trace_in,
                            void *buf, u32 size, u64 flags, bool may_fault)
 {
-       u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
+       u32 trace_nr, copy_len, elem_size, max_depth;
        bool user_build_id = flags & BPF_F_USER_BUILD_ID;
        bool crosstask = task && task != current;
        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
@@ -438,21 +458,20 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
                goto clear;
        }

-       num_elem = size / elem_size;
-       max_depth = num_elem + skip;
-       if (sysctl_perf_event_max_stack < max_depth)
-               max_depth = sysctl_perf_event_max_stack;
+       max_depth = stack_map_calculate_max_depth(size, elem_size, flags);

        if (may_fault)
                rcu_read_lock(); /* need RCU for perf's callchain below */

-       if (trace_in)
+       if (trace_in) {
                trace = trace_in;
-       else if (kernel && task)
+               trace->nr = min_t(u32, trace->nr, max_depth);
+       } else if (kernel && task) {
                trace = get_callchain_entry_for_task(task, max_depth);
-       else
+       } else {
                trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
                                           crosstask, false);
+       }

        if (unlikely(!trace) || trace->nr < skip) {
                if (may_fault)
@@ -461,7 +480,6 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
        }

        trace_nr = trace->nr - skip;
-       trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
        copy_len = trace_nr * elem_size;

        ips = trace->ip + skip;
--
2.47.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ