[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220711083220.2175036-5-asavkov@redhat.com>
Date: Mon, 11 Jul 2022 10:32:20 +0200
From: Artem Savkov <asavkov@...hat.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>, bpf@...r.kernel.org,
netdev@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
Andrea Arcangeli <aarcange@...hat.com>,
Artem Savkov <asavkov@...hat.com>
Subject: [RFC PATCH bpf-next 4/4] selftests/bpf: bpf_panic selftest
Add a selftest for bpf_panic() checking that the program will only load
if all the prerequisites are met.
Signed-off-by: Artem Savkov <asavkov@...hat.com>
---
.../selftests/bpf/prog_tests/bpf_panic.c | 144 ++++++++++++++++++
1 file changed, 144 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_panic.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_panic.c b/tools/testing/selftests/bpf/prog_tests/bpf_panic.c
new file mode 100644
index 000000000000..9d008c0a5140
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_panic.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Red Hat, Inc. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "cap_helpers.h"
+
+static int sysctl_get(const char *sysctl_path, char *old_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "r");
+ if (!fp)
+ return -errno;
+
+ if (fscanf(fp, "%s", old_val) <= 0)
+ ret = -ENOENT;
+
+ fclose(fp);
+
+ return ret;
+}
+
+static int sysctl_set(const char *sysctl_path, const char *new_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "w");
+ if (!fp)
+ return -errno;
+
+ if (fprintf(fp, "%s", new_val) < 0)
+ ret = -errno;
+
+ fclose(fp);
+
+ return ret;
+}
+
+static char bpf_vlog[UINT_MAX >> 8];
+
+static void test_bpf_panic_conditions(void)
+{
+ int fd_prog;
+ int map_fd;
+ struct bpf_insn prog_insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_panic),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
+ LIBBPF_OPTS(bpf_map_create_opts, map_create_opts);
+ int attach_btf_id;
+ __u64 save_caps = 0;
+
+ if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/destructive_bpf_enabled",
+ "1"), "set destructive_bpf_enabled"))
+ return;
+
+ load_opts.log_level = 1;
+ load_opts.log_buf = bpf_vlog;
+ load_opts.log_size = sizeof(bpf_vlog);
+ load_opts.expected_attach_type = BPF_TRACE_FENTRY;
+
+ attach_btf_id = libbpf_find_vmlinux_btf_id("dentry_open", load_opts.expected_attach_type);
+ if (!ASSERT_GE(attach_btf_id, 0, "attach_btf_id"))
+ return;
+
+ load_opts.attach_btf_id = attach_btf_id;
+
+ map_create_opts.map_flags = BPF_F_RDONLY_PROG;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, &map_create_opts);
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_create"))
+ return;
+ ASSERT_OK(bpf_map_freeze(map_fd), "bpf_map_freeze");
+
+ prog_insns[0].imm = map_fd;
+
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+
+ if (ASSERT_EQ(fd_prog, -EACCES, "BPF_F_DESTRUCTIVE required")) {
+ if (!ASSERT_OK_PTR(
+ strstr(bpf_vlog, "require BPF_F_DESTRUCTIVE"),
+ "BPF_F_DESTRUCTIVE verifier log")) {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+ }
+
+ load_opts.prog_flags = BPF_F_DESTRUCTIVE;
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+
+ if (ASSERT_GE(fd_prog, 0, "successful load")) {
+ close(fd_prog);
+ } else {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+
+
+ if (ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "disable caps")) {
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+ ASSERT_EQ(fd_prog, -EINVAL, "CAP_SYS_BOOT required");
+ if (!ASSERT_OK_PTR(
+ strstr(bpf_vlog, "unknown func bpf_panic"),
+ "CAP_SYS_BOOT verifier log")) {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+ cap_enable_effective(save_caps, NULL);
+ }
+
+ if (ASSERT_OK(sysctl_set("/proc/sys/kernel/destructive_bpf_enabled",
+ "0"), "unset destructive_bpf_enabled")) {
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+ ASSERT_EQ(fd_prog, -EINVAL, "!destructive_bpf_enabled");
+ if (!ASSERT_OK_PTR(
+ strstr(bpf_vlog, "unknown func bpf_panic"),
+ "!destructive_bpf_enabled verifier log")) {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+ }
+ close(map_fd);
+}
+
+void test_bpf_panic(void)
+{
+ char destructive_bpf_enabled_orig[32] = {};
+
+ if (!ASSERT_OK(sysctl_get("/proc/sys/kernel/destructive_bpf_enabled",
+ destructive_bpf_enabled_orig), "read destructive_bpf_enabled"))
+ goto cleanup;
+
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ if (test__start_subtest("bpf_panic_conditions"))
+ test_bpf_panic_conditions();
+
+cleanup:
+ if (strlen(destructive_bpf_enabled_orig) > 0)
+ sysctl_set("/proc/sys/kernel/destructive_bpf_enabled",
+ destructive_bpf_enabled_orig);
+}
--
2.35.3
Powered by blists - more mailing lists