[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4c2d2a3a44ea34ea21939f2f345687195f9e9fad.1653600578.git.lorenzo@kernel.org>
Date: Thu, 26 May 2022 23:34:58 +0200
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: bpf@...r.kernel.org
Cc: netdev@...r.kernel.org, ast@...nel.org, daniel@...earbox.net,
andrii@...nel.org, davem@...emloft.net, kuba@...nel.org,
edumazet@...gle.com, pabeni@...hat.com, pablo@...filter.org,
fw@...len.de, netfilter-devel@...r.kernel.org,
lorenzo.bianconi@...hat.com, brouer@...hat.com, toke@...hat.com,
memxor@...il.com, yhs@...com
Subject: [PATCH v4 bpf-next 10/14] selftests/bpf: Add verifier tests for rdonly PTR_TO_BTF_ID
From: Kumar Kartikeya Dwivedi <memxor@...il.com>
Add matching verifier tests which ensure the read only flag is set and
handled appropriately.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@...il.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
tools/testing/selftests/bpf/test_verifier.c | 17 +-
.../testing/selftests/bpf/verifier/map_kptr.c | 156 ++++++++++++++++++
2 files changed, 169 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 372579c9f45e..9728f87e5a40 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -626,6 +626,8 @@ static int create_cgroup_storage(bool percpu)
* struct prog_test_ref_kfunc __kptr *ptr;
* struct prog_test_ref_kfunc __kptr_ref *ptr;
* struct prog_test_member __kptr_ref *ptr;
+ * const struct prog_test_ref_kfunc __kptr *ptr;
+ * const struct prog_test_ref_kfunc __kptr_ref *ptr;
* }
*/
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
@@ -657,11 +659,18 @@ static __u32 btf_raw_types[] = {
BTF_PTR_ENC(8), /* [11] */
BTF_PTR_ENC(9), /* [12] */
BTF_PTR_ENC(10), /* [13] */
- /* struct btf_ptr */ /* [14] */
- BTF_STRUCT_ENC(43, 3, 24),
+ BTF_CONST_ENC(6), /* [14] */
+ BTF_TYPE_TAG_ENC(75, 14), /* [15] */
+ BTF_TYPE_TAG_ENC(80, 14), /* [16] */
+ BTF_PTR_ENC(15), /* [17] */
+ BTF_PTR_ENC(16), /* [18] */
+ /* struct btf_ptr */ /* [19] */
+ BTF_STRUCT_ENC(43, 5, 40),
BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
+ BTF_MEMBER_ENC(71, 17, 192), /* const struct prog_test_ref_kfunc __kptr *ptr; */
+ BTF_MEMBER_ENC(71, 18, 256), /* const struct prog_test_ref_kfunc __kptr_ref *ptr; */
};
static int load_btf(void)
@@ -755,7 +764,7 @@ static int create_map_kptr(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts,
.btf_key_type_id = 1,
- .btf_value_type_id = 14,
+ .btf_value_type_id = 19,
);
int fd, btf_fd;
@@ -764,7 +773,7 @@ static int create_map_kptr(void)
return -1;
opts.btf_fd = btf_fd;
- fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 40, 1, &opts);
if (fd < 0)
printf("Failed to create map with btf_id pointer\n");
return fd;
diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c
index 6914904344c0..666c78969478 100644
--- a/tools/testing/selftests/bpf/verifier/map_kptr.c
+++ b/tools/testing/selftests/bpf/verifier/map_kptr.c
@@ -197,6 +197,27 @@
.result = REJECT,
.errstr = "R0 invalid mem access 'untrusted_ptr_or_null_'",
},
+{
+ "map_kptr: unref: loaded pointer to const marked as rdonly_untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 24),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'rdonly_untrusted_ptr_or_null_'",
+},
{
"map_kptr: unref: correct in kernel type size",
.insns = {
@@ -315,6 +336,32 @@
{ "bpf_kfunc_call_test_kptr_get", 13 },
}
},
+{
+ "map_kptr: unref: store const to non-const",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R0 type=rdonly_ptr_or_null_ expected=ptr_prog_test",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire_const", 11 },
+ }
+},
/* Tests for referenced PTR_TO_BTF_ID */
{
"map_kptr: ref: loaded pointer marked as untrusted",
@@ -338,6 +385,27 @@
.result = REJECT,
.errstr = "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_",
},
+{
+ "map_kptr: ref: loaded pointer to const marked as rdonly_untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 32),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'rdonly_untrusted_ptr_or_null_'",
+},
{
"map_kptr: ref: reject off != 0",
.insns = {
@@ -403,6 +471,34 @@
{ "bpf_kfunc_call_test_acquire", 15 },
}
},
+{
+ "map_kptr: const ref: bpf_kfunc_call_test_kptr_get rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "arg#0 cannot raise reference for pointer to const",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_kptr_get", 14 },
+ }
+},
{
"map_kptr: ref: reject STX",
.insns = {
@@ -467,3 +563,63 @@
.result = REJECT,
.errstr = "kptr cannot be accessed indirectly by helper",
},
+{
+ "map_kptr: ref: xchg or_null const to non-const",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R2 type=rdonly_ptr_or_null_ expected=ptr_prog_test_ref_kfunc",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire_const", 11 },
+ }
+},
+{
+ "map_kptr: ref: xchg const to non-const",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R2 type=rdonly_ptr_ expected=ptr_prog_test_ref_kfunc",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire_const", 11 },
+ }
+},
--
2.35.3
Powered by blists - more mailing lists