[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPhsuW6rmx=R0op=wa0y3VVJvg6JNZHm2EVSJV9qGAaFVeryGQ@mail.gmail.com>
Date: Wed, 30 May 2018 09:46:56 -0700
From: Song Liu <liu.song.a23@...il.com>
To: Daniel Borkmann <daniel@...earbox.net>
Cc: Alexei Starovoitov <ast@...nel.org>,
Networking <netdev@...r.kernel.org>
Subject: Re: [PATCH bpf-next 09/11] bpf: fix context access in tracing progs
on 32 bit archs
On Sun, May 27, 2018 at 5:43 PM, Daniel Borkmann <daniel@...earbox.net> wrote:
> Wang reported that all the testcases for BPF_PROG_TYPE_PERF_EVENT
> program type in test_verifier report the following errors on x86_32:
>
> 172/p unpriv: spill/fill of different pointers ldx FAIL
> Unexpected error message!
> 0: (bf) r6 = r10
> 1: (07) r6 += -8
> 2: (15) if r1 == 0x0 goto pc+3
> R1=ctx(id=0,off=0,imm=0) R6=fp-8,call_-1 R10=fp0,call_-1
> 3: (bf) r2 = r10
> 4: (07) r2 += -76
> 5: (7b) *(u64 *)(r6 +0) = r2
> 6: (55) if r1 != 0x0 goto pc+1
> R1=ctx(id=0,off=0,imm=0) R2=fp-76,call_-1 R6=fp-8,call_-1 R10=fp0,call_-1 fp-8=fp
> 7: (7b) *(u64 *)(r6 +0) = r1
> 8: (79) r1 = *(u64 *)(r6 +0)
> 9: (79) r1 = *(u64 *)(r1 +68)
> invalid bpf_context access off=68 size=8
>
> 378/p check bpf_perf_event_data->sample_period byte load permitted FAIL
> Failed to load prog 'Permission denied'!
> 0: (b7) r0 = 0
> 1: (71) r0 = *(u8 *)(r1 +68)
> invalid bpf_context access off=68 size=1
>
> 379/p check bpf_perf_event_data->sample_period half load permitted FAIL
> Failed to load prog 'Permission denied'!
> 0: (b7) r0 = 0
> 1: (69) r0 = *(u16 *)(r1 +68)
> invalid bpf_context access off=68 size=2
>
> 380/p check bpf_perf_event_data->sample_period word load permitted FAIL
> Failed to load prog 'Permission denied'!
> 0: (b7) r0 = 0
> 1: (61) r0 = *(u32 *)(r1 +68)
> invalid bpf_context access off=68 size=4
>
> 381/p check bpf_perf_event_data->sample_period dword load permitted FAIL
> Failed to load prog 'Permission denied'!
> 0: (b7) r0 = 0
> 1: (79) r0 = *(u64 *)(r1 +68)
> invalid bpf_context access off=68 size=8
>
> Reason is that struct pt_regs on x86_32 doesn't fully align to 8 byte
> boundary due to its size of 68 bytes.
>
> Therefore, bpf_ctx_narrow_access_ok() will then bail out saying that
> off & (size_default - 1) which is 68 & 7 doesn't cleanly align in the
> case of sample_period access from struct bpf_perf_event_data, hence
> verifier wrongly thinks we might be doing an unaligned access here.
> Therefore adjust this down to machine size and check the offset for
> narrow access on that basis.
>
> We also need to fix pe_prog_is_valid_access(), since we hit the check
> for off % size != 0 (e.g. 68 % 8 -> 4) in the first and last test.
>
> Reported-by: Wang YanQing <udknight@...il.com>
> Signed-off-by: Daniel Borkmann <daniel@...earbox.net>
> Acked-by: Alexei Starovoitov <ast@...nel.org>
> ---
> include/linux/filter.h | 30 ++++++++++++++++++++++++------
> kernel/trace/bpf_trace.c | 10 ++++++++--
> 2 files changed, 32 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index d407ede..89903d2 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -639,16 +639,34 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
> return prog->type == BPF_PROG_TYPE_UNSPEC;
> }
>
> -static inline bool
> -bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
> +static inline u32 bpf_ctx_off_adjust_machine(u32 size)
> +{
> + const u32 size_machine = sizeof(unsigned long);
> +
> + if (size > size_machine && size % size_machine == 0)
> + size = size_machine;
Not sure whether I understand this correctly. I guess we only need:
if (size % size_machine == 0)
size = size_machine;
Or, is this function equivalent to
if (size == 8 && size_machine == 4)
size = 4;
If this is the case, maybe we can make bpf_ctx_narrow_align_ok()
simpler?
Thanks,
Song
> +
> + return size;
> +}
> +
> +static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
> + u32 size_default)
> {
> - bool off_ok;
> + size_default = bpf_ctx_off_adjust_machine(size_default);
> + size_access = bpf_ctx_off_adjust_machine(size_access);
> +
> #ifdef __LITTLE_ENDIAN
> - off_ok = (off & (size_default - 1)) == 0;
> + return (off & (size_default - 1)) == 0;
> #else
> - off_ok = (off & (size_default - 1)) + size == size_default;
> + return (off & (size_default - 1)) + size_access == size_default;
> #endif
> - return off_ok && size <= size_default && (size & (size - 1)) == 0;
> +}
> +
> +static inline bool
> +bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
> +{
> + return bpf_ctx_narrow_align_ok(off, size, size_default) &&
> + size <= size_default && (size & (size - 1)) == 0;
> }
>
> #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 81fdf2f..7269530 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -880,8 +880,14 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
> return false;
> if (type != BPF_READ)
> return false;
> - if (off % size != 0)
> - return false;
> + if (off % size != 0) {
> + if (sizeof(unsigned long) != 4)
> + return false;
> + if (size != 8)
> + return false;
> + if (off % size != 4)
> + return false;
> + }
>
> switch (off) {
> case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
> --
> 2.9.5
>
Powered by blists - more mailing lists