[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c97b9ffd2042790ae6eb55ba01eae94282014006.1734045451.git.dxu@dxuuu.xyz>
Date: Thu, 12 Dec 2024 16:22:09 -0700
From: Daniel Xu <dxu@...uu.xyz>
To: andrii@...nel.org,
ast@...nel.org,
eddyz87@...il.com,
shuah@...nel.org,
daniel@...earbox.net
Cc: mykolal@...com,
martin.lau@...ux.dev,
song@...nel.org,
yonghong.song@...ux.dev,
john.fastabend@...il.com,
kpsingh@...nel.org,
sdf@...ichev.me,
haoluo@...gle.com,
jolsa@...nel.org,
bpf@...r.kernel.org,
linux-kselftest@...r.kernel.org,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org
Subject: [PATCH bpf-next v5 5/5] bpf: selftests: verifier: Add nullness elision tests
Test that nullness elision works for common use cases. For example, we
want to check that both full and subreg stack slots are recognized. As
well as when there's both const and non-const values of R2 leading up to
a lookup. And obviously some bound checks.
Signed-off-by: Daniel Xu <dxu@...uu.xyz>
---
.../bpf/progs/verifier_array_access.c | 214 ++++++++++++++++++
1 file changed, 214 insertions(+)
diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c
index 4195aa824ba5..8ed8865fc6f6 100644
--- a/tools/testing/selftests/bpf/progs/verifier_array_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c
@@ -28,6 +28,20 @@ struct {
__uint(map_flags, BPF_F_WRONLY_PROG);
} map_array_wo SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array_pcpu SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, struct test_val);
+} map_array SEC(".maps");
+
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
@@ -525,4 +539,204 @@ l0_%=: exit; \
: __clobber_all);
}
+SEC("socket")
+__description("valid map access into an array using constant without nullness")
+__success __retval(4)
+__naked void an_array_with_a_constant_no_nullness(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid multiple map access into an array using constant without nullness")
+__success __retval(8)
+__naked void multiple_array_with_a_constant_no_nullness(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r6 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r6; \
+ r7 = *(u64*)(r0 + 0); \
+ r1 = 0; \
+ *(u64*)(r10 - 16) = r1; \
+ r2 = r10; \
+ r2 += -16; \
+ r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ *(u64*)(r0 + 0) = r6; \
+ r1 = *(u64*)(r0 + 0); \
+ r7 += r1; \
+ r0 = r7; \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into an array using 32-bit constant without nullness")
+__success __retval(4)
+__naked void an_array_with_a_32bit_constant_no_nullness(void)
+{
+ /* 32-bit write must be to stack address aligned to BPF_REG_SIZE
+ * so that the spill is tracked. Unaligned subreg writes are less
+ * precisely tracked.
+ */
+ asm volatile (" \
+ r1 = 1; \
+ *(u32*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into an array using 32-bit constant 0 without nullness")
+__success __retval(4)
+__naked void an_array_with_a_32bit_constant_0_no_nullness(void)
+{
+ /* Unlike the above test, 32-bit zeroing is precisely tracked even
+ * if writes are not aligned to BPF_REG_SIZE. This tests that our
+ * STACK_ZERO handling functions.
+ */
+ asm volatile (" \
+ r1 = 0; \
+ *(u32*)(r10 - 4) = r1; \
+ r2 = r10; \
+ r2 += -4; \
+ r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("valid map access into a pcpu array using constant without nullness")
+__success __retval(4)
+__naked void a_pcpu_array_with_a_constant_no_nullness(void)
+{
+ asm volatile (" \
+ r1 = 1; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array_pcpu] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array_pcpu),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid map access into an array using constant without nullness")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__naked void an_array_with_a_constant_no_nullness_out_of_bounds(void)
+{
+ asm volatile (" \
+ r1 = 3; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+ r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r1 = %[test_val_foo]; \
+ *(u64*)(r0 + 0) = r1; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array),
+ __imm_const(test_val_foo, offsetof(struct test_val, foo))
+ : __clobber_all);
+}
+
+SEC("socket")
+__description("invalid elided lookup using const and non-const key")
+__failure __msg("R0 invalid mem access 'map_value_or_null'")
+__naked void mixed_const_and_non_const_key_lookup(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ if r0 > 42 goto l1_%=; \
+ *(u64*)(r10 - 8) = r0; \
+ r2 = r10; \
+ r2 += -8; \
+ goto l0_%=; \
+l1_%=: r1 = 1; \
+ *(u64*)(r10 - 8) = r1; \
+ r2 = r10; \
+ r2 += -8; \
+l0_%=: r1 = %[map_array] ll; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32),
+ __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
+SEC("socket")
+__failure __msg("invalid read from stack R2 off=4096 size=4")
+__naked void key_lookup_at_invalid_fp(void)
+{
+ asm volatile (" \
+ r1 = %[map_array] ll; \
+ r2 = r10; \
+ r2 += 4096; \
+ call %[bpf_map_lookup_elem]; \
+ r0 = *(u64*)(r0 + 0); \
+ exit; \
+" :
+ : __imm(bpf_map_lookup_elem),
+ __imm_addr(map_array)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
--
2.46.0
Powered by blists - more mailing lists