[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CACkPEmne--T_3bdwJEy8-GEYm4EJF60xSoqbrQ-m_LuZhJhCuQ@mail.gmail.com>
Date: Tue, 27 Jan 2026 16:29:00 -0500
From: Henry Zhang <zeri@...ch.edu>
To: Qing Wang <wangqing7171@...il.com>
Cc: henryzhangjcle@...il.com, acme@...nel.org, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org, mingo@...hat.com, peterz@...radead.org,
syzbot+2a077cb788749964cf68@...kaller.appspotmail.com,
syzkaller-bugs@...glegroups.com
Subject: Re: [PATCH] perf: Fix data race in perf_event_set_bpf_handler()
Thanks, this looks good.
--
Henry
On Tue, Jan 27, 2026 at 5:36 AM Qing Wang <wangqing7171@...il.com> wrote:
>
> On Tue, 27 Jan 2026 at 16:37, Qing Wang <wangqing7171@...il.com> wrote:
> > On Tue, 27 Jan 2026 at 10:36, Henry Zhang <henryzhangjcle@...il.com> wrote:
> > > diff --git a/kernel/events/core.c b/kernel/events/core.c
> > > index a0fa488bce84..1f3ed9e87507 100644
> > > --- a/kernel/events/core.c
> > > +++ b/kernel/events/core.c
> > > @@ -10349,7 +10349,7 @@ static inline int perf_event_set_bpf_handler(struct perf_event *event,
> > > return -EPROTO;
> > > }
> > >
> > > - event->prog = prog;
> > > + WRITE_ONCE(event->prog, prog);
> > > event->bpf_cookie = bpf_cookie;
> > > return 0;
> > > }
> > > @@ -10407,7 +10407,9 @@ static int __perf_event_overflow(struct perf_event *event,
> > > if (event->attr.aux_pause)
> > > perf_event_aux_pause(event->aux_event, true);
> > >
> > > - if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT &&
> > > + struct bpf_prog *prog = READ_ONCE(event->prog);
> > > +
> > > + if (prog && prog->type == BPF_PROG_TYPE_PERF_EVENT &&
> > > !bpf_overflow_handler(event, data, regs))
> > > goto out;
> >
> > Looking at this code, I guess there may be an serious issue: a potential
> > use-after-free (UAF) risk when accessing event->prog in __perf_event_overflow.
> >
> > CPU 0 (interrupt context) CPU 1 (process context)
> > read event->prog
> > perf_event_free_bpf_handler()
> > put(prog)
> > free(prog)
> > access memory pointed to by prog
> >
> > This scenario need to be more analysis.
> >
> > --
> > Qing
>
> This is my idea for solving the problem of data competition and potential UAF.
>
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index a0fa488bce84..3abf3689157d 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -10291,7 +10291,12 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r
> }
>
> #ifdef CONFIG_BPF_SYSCALL
> +/*
> + * Execute the attached BPF program. Caller must ensure prog is non-NULL
> + * and of type BPF_PROG_TYPE_PERF_EVENT under RCU protection.
> + */
> static int bpf_overflow_handler(struct perf_event *event,
> + struct bpf_prog *prog,
> struct perf_sample_data *data,
> struct pt_regs *regs)
> {
> @@ -10299,22 +10304,17 @@ static int bpf_overflow_handler(struct perf_event *event,
> .data = data,
> .event = event,
> };
> - struct bpf_prog *prog;
> int ret = 0;
>
> ctx.regs = perf_arch_bpf_user_pt_regs(regs);
> if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
> goto out;
> - rcu_read_lock();
> - prog = READ_ONCE(event->prog);
> - if (prog) {
> - perf_prepare_sample(data, event, regs);
> - ret = bpf_prog_run(prog, &ctx);
> - }
> - rcu_read_unlock();
> +
> + perf_prepare_sample(data, event, regs);
> + ret = bpf_prog_run(prog, &ctx);
> +
> out:
> __this_cpu_dec(bpf_prog_active);
> -
> return ret;
> }
>
> @@ -10349,7 +10349,7 @@ static inline int perf_event_set_bpf_handler(struct perf_event *event,
> return -EPROTO;
> }
>
> - event->prog = prog;
> + WRITE_ONCE(event->prog, prog);
> event->bpf_cookie = bpf_cookie;
> return 0;
> }
> @@ -10361,13 +10361,14 @@ static inline void perf_event_free_bpf_handler(struct perf_event *event)
> if (!prog)
> return;
>
> - event->prog = NULL;
> + WRITE_ONCE(event->prog, NULL);
> bpf_prog_put(prog);
> }
> #else
> static inline int bpf_overflow_handler(struct perf_event *event,
> - struct perf_sample_data *data,
> - struct pt_regs *regs)
> + struct bpf_prog *prog,
> + struct perf_sample_data *data,
> + struct pt_regs *regs)
> {
> return 1;
> }
> @@ -10407,9 +10408,19 @@ static int __perf_event_overflow(struct perf_event *event,
> if (event->attr.aux_pause)
> perf_event_aux_pause(event->aux_event, true);
>
> - if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT &&
> - !bpf_overflow_handler(event, data, regs))
> + /*
> + * For BPF-based overflow handling. If a BPF_PROG_TYPE_PERF_EVENT
> + * program is attached, execute it and skip default overflow handling.
> + */
> + rcu_read_lock();
> + struct bpf_prog *prog = rcu_dereference(event->prog);
> +
> + if (prog && prog->type == BPF_PROG_TYPE_PERF_EVENT &&
> + !bpf_overflow_handler(event, prog, data, regs)) {
> + rcu_read_unlock();
> goto out;
> + }
> + rcu_read_unlock();
>
> /*
> * XXX event_limit might not quite work as expected on inherited
>
> What do you think about this solution? Looking forward to your review.
> --
> Qing
Powered by blists - more mailing lists