lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAEf4BzbLJtMGaZoFAaAgnNXe8GCStsw+kZ_3hWoGfySWZ6B5mg@mail.gmail.com>
Date: Fri, 26 Sep 2025 11:52:53 -0700
From: Andrii Nakryiko <andrii.nakryiko@...il.com>
To: Tao Chen <chen.dylane@...ux.dev>
Cc: song@...nel.org, jolsa@...nel.org, ast@...nel.org, daniel@...earbox.net, 
	andrii@...nel.org, martin.lau@...ux.dev, eddyz87@...il.com, 
	yonghong.song@...ux.dev, john.fastabend@...il.com, kpsingh@...nel.org, 
	sdf@...ichev.me, haoluo@...gle.com, bpf@...r.kernel.org, 
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH bpf-next v2] bpf: Add preempt_disable to protect get_perf_callchain

On Fri, Sep 26, 2025 at 8:40 AM Tao Chen <chen.dylane@...ux.dev> wrote:
>
> As Alexei noted, get_perf_callchain() return values may be reused
> if a task is preempted after the BPF program enters migrate disable
> mode. We therefore use bpf_perf_callchain_entries percpu entries
> similarly to bpf_try_get_buffers to preserve the current task's
> callchain and prevent overwriting by preempting tasks. And we also
> add preempt_disable to protect get_perf_callchain.
>
> Reported-by: Alexei Starovoitov <ast@...nel.org>
> Closes: https://lore.kernel.org/bpf/CAADnVQ+s8B7-fvR1TNO-bniSyKv57cH_ihRszmZV7pQDyV=VDQ@mail.gmail.com
> Signed-off-by: Tao Chen <chen.dylane@...ux.dev>
> ---
>  kernel/bpf/stackmap.c | 76 ++++++++++++++++++++++++++++++++++---------
>  1 file changed, 61 insertions(+), 15 deletions(-)
>
> Change list:
>  v1 -> v2:
>   From Alexei
>   - create percpu entris to preserve current task's callchain
>     similarly to bpf_try_get_buffers.
>   v1: https://lore.kernel.org/bpf/20250922075333.1452803-1-chen.dylane@linux.dev
>
> diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
> index 2e182a3ac4c..8788c219926 100644
> --- a/kernel/bpf/stackmap.c
> +++ b/kernel/bpf/stackmap.c
> @@ -31,6 +31,55 @@ struct bpf_stack_map {
>         struct stack_map_bucket *buckets[] __counted_by(n_buckets);
>  };
>
> +struct bpf_perf_callchain_entry {
> +       u64 nr;
> +       u64 ip[PERF_MAX_STACK_DEPTH];
> +};
> +
> +#define MAX_PERF_CALLCHAIN_PREEMPT 3
> +static DEFINE_PER_CPU(struct bpf_perf_callchain_entry[MAX_PERF_CALLCHAIN_PREEMPT],
> +                     bpf_perf_callchain_entries);
> +static DEFINE_PER_CPU(int, bpf_perf_callchain_preempt_cnt);
> +
> +static int bpf_get_perf_callchain(struct bpf_perf_callchain_entry **entry,
> +                                 struct pt_regs *regs, u32 init_nr, bool kernel,
> +                                 bool user, u32 max_stack, bool crosstack,
> +                                 bool add_mark)
> +{
> +       struct bpf_perf_callchain_entry *bpf_entry;
> +       struct perf_callchain_entry *perf_entry;
> +       int preempt_cnt;
> +
> +       preempt_cnt = this_cpu_inc_return(bpf_perf_callchain_preempt_cnt);
> +       if (WARN_ON_ONCE(preempt_cnt > MAX_PERF_CALLCHAIN_PREEMPT)) {
> +               this_cpu_dec(bpf_perf_callchain_preempt_cnt);
> +               return -EBUSY;
> +       }
> +
> +       bpf_entry = this_cpu_ptr(&bpf_perf_callchain_entries[preempt_cnt - 1]);
> +
> +       preempt_disable();
> +       perf_entry = get_perf_callchain(regs, init_nr, kernel, user, max_stack,
> +                                       crosstack, add_mark);
> +       if (unlikely(!perf_entry)) {
> +               preempt_enable();
> +               this_cpu_dec(bpf_perf_callchain_preempt_cnt);
> +               return -EFAULT;
> +       }
> +       memcpy(bpf_entry, perf_entry, sizeof(u64) * (perf_entry->nr + 1));

N copies of a stack trace is not good enough, let's have N + 1 now :)

If we are going with our own buffers, we need to teach
get_perf_callchain to let us pass that buffer directly to avoid that
unnecessary copy.

Also, I know it's about 1KB, but it would be so simple and efficient
to just have this bpf_perf_callchain_entry on the stack. Kernel has a
16KB stack, right? It feels like for something like this using 1KB of
the stack to simplify and speed up stack trace capture is a good
enough reason.

> +       *entry = bpf_entry;
> +       preempt_enable();
> +
> +       return 0;
> +}
> +
> +static void bpf_put_perf_callchain(void)
> +{
> +       if (WARN_ON_ONCE(this_cpu_read(bpf_perf_callchain_preempt_cnt) == 0))
> +               return;
> +       this_cpu_dec(bpf_perf_callchain_preempt_cnt);
> +}
> +
>  static inline bool stack_map_use_build_id(struct bpf_map *map)
>  {
>         return (map->map_flags & BPF_F_STACK_BUILD_ID);
> @@ -303,8 +352,9 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
>         u32 max_depth = map->value_size / stack_map_data_size(map);
>         u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
>         bool user = flags & BPF_F_USER_STACK;
> -       struct perf_callchain_entry *trace;
> +       struct bpf_perf_callchain_entry *trace;
>         bool kernel = !user;
> +       int err;
>
>         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
>                                BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
> @@ -314,14 +364,15 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
>         if (max_depth > sysctl_perf_event_max_stack)
>                 max_depth = sysctl_perf_event_max_stack;
>
> -       trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
> -                                  false, false);
> +       err = bpf_get_perf_callchain(&trace, regs, 0, kernel, user, max_depth,
> +                                    false, false);
> +       if (err)
> +               return err;
>
> -       if (unlikely(!trace))
> -               /* couldn't fetch the stack trace */
> -               return -EFAULT;
> +       err = __bpf_get_stackid(map, (struct perf_callchain_entry *)trace, flags);
> +       bpf_put_perf_callchain();
>
> -       return __bpf_get_stackid(map, trace, flags);
> +       return err;
>  }
>
>  const struct bpf_func_proto bpf_get_stackid_proto = {
> @@ -443,8 +494,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
>         if (sysctl_perf_event_max_stack < max_depth)
>                 max_depth = sysctl_perf_event_max_stack;
>
> -       if (may_fault)
> -               rcu_read_lock(); /* need RCU for perf's callchain below */
> +       preempt_disable();
>
>         if (trace_in)
>                 trace = trace_in;
> @@ -455,8 +505,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
>                                            crosstask, false);
>
>         if (unlikely(!trace) || trace->nr < skip) {
> -               if (may_fault)
> -                       rcu_read_unlock();
> +               preempt_enable();
>                 goto err_fault;
>         }
>
> @@ -474,10 +523,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
>         } else {
>                 memcpy(buf, ips, copy_len);
>         }
> -
> -       /* trace/ips should not be dereferenced after this point */
> -       if (may_fault)
> -               rcu_read_unlock();
> +       preempt_enable();
>
>         if (user_build_id)
>                 stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);

really it's just build_id resolution that can take a while, which is
why we are trying to avoid preemption around it. But for non-build_id
case, can we avoid extra copying?

> --
> 2.48.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ