[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAADnVQLNdDn1jfyEAeKO17vXQiN+VKAvq+VFkY2G_pvSbaPjFA@mail.gmail.com>
Date: Thu, 5 Nov 2020 20:19:47 -0800
From: Alexei Starovoitov <alexei.starovoitov@...il.com>
To: Kevin Sheldrake <Kevin.Sheldrake@...rosoft.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Network Development <netdev@...r.kernel.org>
Cc: "bpf@...r.kernel.org" <bpf@...r.kernel.org>,
Andrii Nakryiko <andrii.nakryiko@...il.com>,
KP Singh <kpsingh@...gle.com>
Subject: Re: [PATCH bpf-next v2] Update perf ring buffer to prevent corruption
On Thu, Nov 5, 2020 at 7:18 AM Kevin Sheldrake
<Kevin.Sheldrake@...rosoft.com> wrote:
>
> Resent due to some failure at my end. Apologies if it arrives twice.
>
> From 63e34d4106b4dd767f9bfce951f8a35f14b52072 Mon Sep 17 00:00:00 2001
> From: Kevin Sheldrake <kevin.sheldrake@...rosoft.com>
> Date: Thu, 5 Nov 2020 12:18:53 +0000
> Subject: [PATCH] Update perf ring buffer to prevent corruption from
> bpf_perf_output_event()
>
> The bpf_perf_output_event() helper takes a sample size parameter of u64, but
> the underlying perf ring buffer uses a u16 internally. This 64KB maximum size
> has to also accommodate a variable sized header. Failure to observe this
> restriction can result in corruption of the perf ring buffer as samples
> overlap.
>
> Track the sample size and return -E2BIG if too big to fit into the u16
> size parameter.
>
> Signed-off-by: Kevin Sheldrake <kevin.sheldrake@...rosoft.com>
The fix makes sense to me.
Peter, Ingo,
should I take it through the bpf tree or you want to route via tip?
> ---
> include/linux/perf_event.h | 2 +-
> kernel/events/core.c | 40 ++++++++++++++++++++++++++--------------
> 2 files changed, 27 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 0c19d27..b9802e5 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -1060,7 +1060,7 @@ extern void perf_output_sample(struct perf_output_handle *handle,
> struct perf_event_header *header,
> struct perf_sample_data *data,
> struct perf_event *event);
> -extern void perf_prepare_sample(struct perf_event_header *header,
> +extern int perf_prepare_sample(struct perf_event_header *header,
> struct perf_sample_data *data,
> struct perf_event *event,
> struct pt_regs *regs);
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index da467e1..c6c4a3c 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -7016,15 +7016,17 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
> return callchain ?: &__empty_callchain;
> }
>
> -void perf_prepare_sample(struct perf_event_header *header,
> +int perf_prepare_sample(struct perf_event_header *header,
> struct perf_sample_data *data,
> struct perf_event *event,
> struct pt_regs *regs)
> {
> u64 sample_type = event->attr.sample_type;
> + u32 header_size = header->size;
> +
>
> header->type = PERF_RECORD_SAMPLE;
> - header->size = sizeof(*header) + event->header_size;
> + header_size = sizeof(*header) + event->header_size;
>
> header->misc = 0;
> header->misc |= perf_misc_flags(regs);
> @@ -7042,7 +7044,7 @@ void perf_prepare_sample(struct perf_event_header *header,
>
> size += data->callchain->nr;
>
> - header->size += size * sizeof(u64);
> + header_size += size * sizeof(u64);
> }
>
> if (sample_type & PERF_SAMPLE_RAW) {
> @@ -7067,7 +7069,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> size = sizeof(u64);
> }
>
> - header->size += size;
> + header_size += size;
> }
>
> if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
> @@ -7079,7 +7081,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> size += data->br_stack->nr
> * sizeof(struct perf_branch_entry);
> }
> - header->size += size;
> + header_size += size;
> }
>
> if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
> @@ -7095,7 +7097,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> size += hweight64(mask) * sizeof(u64);
> }
>
> - header->size += size;
> + header_size += size;
> }
>
> if (sample_type & PERF_SAMPLE_STACK_USER) {
> @@ -7108,7 +7110,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> u16 stack_size = event->attr.sample_stack_user;
> u16 size = sizeof(u64);
>
> - stack_size = perf_sample_ustack_size(stack_size, header->size,
> + stack_size = perf_sample_ustack_size(stack_size, header_size,
> data->regs_user.regs);
>
> /*
> @@ -7120,7 +7122,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> size += sizeof(u64) + stack_size;
>
> data->stack_user_size = stack_size;
> - header->size += size;
> + header_size += size;
> }
>
> if (sample_type & PERF_SAMPLE_REGS_INTR) {
> @@ -7135,7 +7137,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> size += hweight64(mask) * sizeof(u64);
> }
>
> - header->size += size;
> + header_size += size;
> }
>
> if (sample_type & PERF_SAMPLE_PHYS_ADDR)
> @@ -7154,7 +7156,7 @@ void perf_prepare_sample(struct perf_event_header *header,
> if (sample_type & PERF_SAMPLE_AUX) {
> u64 size;
>
> - header->size += sizeof(u64); /* size */
> + header_size += sizeof(u64); /* size */
>
> /*
> * Given the 16bit nature of header::size, an AUX sample can
> @@ -7162,14 +7164,20 @@ void perf_prepare_sample(struct perf_event_header *header,
> * Make sure this doesn't happen by using up to U16_MAX bytes
> * per sample in total (rounded down to 8 byte boundary).
> */
> - size = min_t(size_t, U16_MAX - header->size,
> + size = min_t(size_t, U16_MAX - header_size,
> event->attr.aux_sample_size);
> size = rounddown(size, 8);
> size = perf_prepare_sample_aux(event, data, size);
>
> - WARN_ON_ONCE(size + header->size > U16_MAX);
> - header->size += size;
> + WARN_ON_ONCE(size + header_size > U16_MAX);
> + header_size += size;
> }
> +
> + if (header_size > U16_MAX)
> + return -E2BIG;
> +
> + header->size = header_size;
> +
> /*
> * If you're adding more sample types here, you likely need to do
> * something about the overflowing header::size, like repurpose the
> @@ -7179,6 +7187,8 @@ void perf_prepare_sample(struct perf_event_header *header,
> * do here next.
> */
> WARN_ON_ONCE(header->size & 7);
> +
> + return 0;
> }
>
> static __always_inline int
> @@ -7196,7 +7206,9 @@ __perf_event_output(struct perf_event *event,
> /* protect the callchain buffers */
> rcu_read_lock();
>
> - perf_prepare_sample(&header, data, event, regs);
> + err = perf_prepare_sample(&header, data, event, regs);
> + if (err)
> + goto exit;
>
> err = output_begin(&handle, event, header.size);
> if (err)
> --
> 2.7.4
>
Powered by blists - more mailing lists