lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210929235910.1765396-13-jevburton.kernel@gmail.com>
Date:   Wed, 29 Sep 2021 23:59:09 +0000
From:   Joe Burton <jevburton.kernel@...il.com>
To:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Martin KaFai Lau <kafai@...com>
Cc:     Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        John Fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...nel.org>,
        Petar Penkov <ppenkov@...gle.com>,
        Stanislav Fomichev <sdf@...gle.com>,
        Hao Luo <haoluo@...gle.com>, netdev@...r.kernel.org,
        bpf@...r.kernel.org, Joe Burton <jevburton@...gle.com>
Subject: [RFC PATCH v2 12/13] bpf: Add selftests for map tracing

From: Joe Burton <jevburton@...gle.com>

Add selftests for intended usage and infinite loop detection.

Signed-off-by: Joe Burton <jevburton@...gle.com>
---
 .../selftests/bpf/prog_tests/bpf_map_trace.c  | 144 ++++++++++++++++++
 .../bpf/progs/bpf_map_trace_delete_elem.c     |  49 ++++++
 .../selftests/bpf/progs/bpf_map_trace_loop0.c |  26 ++++
 .../selftests/bpf/progs/bpf_map_trace_loop1.c |  43 ++++++
 .../bpf/progs/bpf_map_trace_update_elem.c     |  51 +++++++
 .../selftests/bpf/verifier/map_trace.c        |  40 +++++
 6 files changed, 353 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_map_trace.c
 create mode 100644 tools/testing/selftests/bpf/progs/bpf_map_trace_delete_elem.c
 create mode 100644 tools/testing/selftests/bpf/progs/bpf_map_trace_loop0.c
 create mode 100644 tools/testing/selftests/bpf/progs/bpf_map_trace_loop1.c
 create mode 100644 tools/testing/selftests/bpf/progs/bpf_map_trace_update_elem.c
 create mode 100644 tools/testing/selftests/bpf/verifier/map_trace.c

diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_map_trace.c b/tools/testing/selftests/bpf/prog_tests/bpf_map_trace.c
new file mode 100644
index 000000000000..89bae9a83339
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_map_trace.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include <test_progs.h>
+
+#include <assert.h>
+#include <asm/unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <linux/bpf.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "bpf_map_trace_delete_elem.skel.h"
+#include "bpf_map_trace_loop0.skel.h"
+#include "bpf_map_trace_loop1.skel.h"
+#include "bpf_map_trace_update_elem.skel.h"
+
+uint32_t collatz(uint32_t x)
+{
+	return x % 2 ? x * 3 + 1 : x / 2;
+}
+
+void update_elem__basic(void)
+{
+	const uint32_t tracer_value = collatz(0xdeadbeef);
+	struct bpf_map_trace_update_elem *skel;
+	const uint32_t tracer_key = 0x5;
+	uint32_t value;
+	int rc;
+
+	skel = bpf_map_trace_update_elem__open_and_load();
+	if (!ASSERT_NEQ(skel, NULL, "open/load skeleton failure"))
+		return;
+	rc = bpf_map_trace_update_elem__attach(skel);
+	if (!ASSERT_EQ(rc, 0, "attach skeleton failure")) {
+		fprintf(stderr, "Failed to attach skeleton: %d\n", errno);
+		goto out;
+	}
+
+	/* The kprobe will place (0x5, 0xdeadbeef) in its map. The tracer will
+	 * place (0x5, collatz(0xdeadbeef)) in its map. This map lookup will
+	 * trigger the kprobe.
+	 */
+	rc = bpf_map_lookup_elem(bpf_map__fd(skel->maps.tracer_map),
+				 &tracer_key, &value);
+	if (!ASSERT_EQ(rc, 0, "map lookup failure")) {
+		fprintf(stderr, "Failed to lookup tracer map: %s\n",
+			strerror(errno));
+		goto out;
+	}
+	if (!ASSERT_EQ(value, tracer_value, "map lookup mismatch"))
+		goto out;
+
+out:
+	bpf_map_trace_update_elem__destroy(skel);
+}
+
+void delete_elem__basic(void)
+{
+	const uint32_t tracer_key = collatz(0x5);
+	struct bpf_map_trace_delete_elem *skel;
+	uint32_t value = 0;
+	int rc;
+
+	skel = bpf_map_trace_delete_elem__open_and_load();
+	if (!ASSERT_NEQ(skel, NULL, "open/load skeleton failure"))
+		return;
+	rc = bpf_map_trace_delete_elem__attach(skel);
+	if (!ASSERT_EQ(rc, 0, "attach skeleton failure")) {
+		fprintf(stderr, "Failed to attach skeleton: %d\n", errno);
+		goto out;
+	}
+
+	/* The kprobe will delete (0x5) from its map. The tracer will
+	 * place (collatz(0x5), pid) in its map. This map lookup will trigger
+	 * the kprobe.
+	 */
+	rc = bpf_map_lookup_elem(bpf_map__fd(skel->maps.tracer_map),
+				 &tracer_key, &value);
+	if (!ASSERT_EQ(rc, 0, "map lookup failure")) {
+		fprintf(stderr, "Failed to lookup tracer map: %s\n",
+			strerror(errno));
+		goto out;
+	}
+	if (!ASSERT_EQ(value, getpid(), "map lookup mismatch"))
+		goto out;
+
+out:
+	bpf_map_trace_delete_elem__destroy(skel);
+}
+
+void infinite_loop__direct(void)
+{
+	struct bpf_map_trace_loop0 *skel;
+	struct bpf_link *tracer_link;
+
+	skel = bpf_map_trace_loop0__open_and_load();
+	if (!ASSERT_NEQ(skel, NULL, "open/load skeleton failure"))
+		goto out;
+	tracer_link = bpf_program__attach(skel->progs.tracer);
+	if (!ASSERT_ERR_PTR(tracer_link, "link creation success"))
+		goto out;
+
+out:
+	bpf_map_trace_loop0__destroy(skel);
+}
+
+void infinite_loop__indirect(void)
+{
+	struct bpf_map_trace_loop1 *skel;
+	struct bpf_link *tracer_link;
+
+	skel = bpf_map_trace_loop1__open_and_load();
+	if (!ASSERT_NEQ(skel, NULL, "open/load skeleton failure"))
+		return;
+	tracer_link = bpf_program__attach(skel->progs.tracer0);
+	if (!ASSERT_OK_PTR(tracer_link, "link creation failure"))
+		goto out;
+	tracer_link = bpf_program__attach(skel->progs.tracer1);
+	if (!ASSERT_ERR_PTR(tracer_link, "link creation success"))
+		goto out;
+
+out:
+	bpf_map_trace_loop1__destroy(skel);
+}
+
+void test_bpf_map_trace(void)
+{
+	if (test__start_subtest("update_elem__basic"))
+		update_elem__basic();
+	if (test__start_subtest("delete_elem__basic"))
+		delete_elem__basic();
+	if (test__start_subtest("infinite_loop__direct"))
+		infinite_loop__direct();
+	if (test__start_subtest("infinite_loop__indirect"))
+		infinite_loop__indirect();
+}
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_map_trace_delete_elem.c b/tools/testing/selftests/bpf/progs/bpf_map_trace_delete_elem.c
new file mode 100644
index 000000000000..4e47c13489ea
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_map_trace_delete_elem.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+struct bpf_map_def SEC("maps") traced_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+struct bpf_map_def SEC("maps") tracer_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+SEC("kprobe/__sys_bpf")
+int traced(struct pt_regs *regs)
+{
+	uint32_t key = 0x5;
+
+	bpf_map_delete_elem(&traced_map, &key);
+	return 0;
+}
+
+uint32_t collatz(uint32_t x)
+{
+	return x % 2 ? x * 3 + 1 : x / 2;
+}
+
+SEC("map_trace/traced_map/DELETE_ELEM")
+int tracer(struct bpf_map_trace_ctx__delete_elem *ctx)
+{
+	uint32_t key = 0, val = 0;
+
+	if (bpf_probe_read(&key, sizeof(key), ctx->key))
+		return 1;
+	key = collatz(key);
+	val = (bpf_get_current_pid_tgid() >> 32);
+	bpf_map_update_elem(&tracer_map, &key, &val, /*flags=*/0);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_map_trace_loop0.c b/tools/testing/selftests/bpf/progs/bpf_map_trace_loop0.c
new file mode 100644
index 000000000000..7205e8914380
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_map_trace_loop0.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+struct bpf_map_def SEC("maps") traced_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+/* This traces traced_map and updates it, creating an (invalid) infinite loop.
+ */
+SEC("map_trace/traced_map/UPDATE_ELEM")
+int tracer(struct bpf_map_trace_ctx__update_elem *ctx)
+{
+	uint32_t key = 0, val = 0;
+
+	bpf_map_update_elem(&traced_map, &key, &val, /*flags=*/0);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_map_trace_loop1.c b/tools/testing/selftests/bpf/progs/bpf_map_trace_loop1.c
new file mode 100644
index 000000000000..10e39f05c7c8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_map_trace_loop1.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+struct bpf_map_def SEC("maps") map0 = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+struct bpf_map_def SEC("maps") map1 = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+SEC("map_trace/map0/UPDATE_ELEM")
+int tracer0(struct bpf_map_trace_ctx__update_elem *ctx)
+{
+	uint32_t key = 0, val = 0;
+
+	bpf_map_update_elem(&map1, &key, &val, /*flags=*/0);
+	return 0;
+}
+
+/* Since this traces map1 and updates map0, it forms an infinite loop with
+ * tracer0.
+ */
+SEC("map_trace/map1/UPDATE_ELEM")
+int tracer1(struct bpf_map_trace_ctx__update_elem *ctx)
+{
+	uint32_t key = 0, val = 0;
+
+	bpf_map_update_elem(&map0, &key, &val, /*flags=*/0);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_map_trace_update_elem.c b/tools/testing/selftests/bpf/progs/bpf_map_trace_update_elem.c
new file mode 100644
index 000000000000..35a6026a90f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_map_trace_update_elem.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+struct bpf_map_def SEC("maps") traced_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+struct bpf_map_def SEC("maps") tracer_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(uint32_t),
+	.value_size = sizeof(uint32_t),
+	.max_entries = 64,
+};
+
+SEC("kprobe/__sys_bpf")
+int traced(struct pt_regs *regs)
+{
+	uint32_t key = 0x5;
+	uint32_t val = 0xdeadbeef;
+
+	bpf_map_update_elem(&traced_map, &key, &val, /*flags=*/0);
+	return 0;
+}
+
+uint32_t collatz(uint32_t x)
+{
+	return x % 2 ? x * 3 + 1 : x / 2;
+}
+
+SEC("map_trace/traced_map/UPDATE_ELEM")
+int tracer(struct bpf_map_trace_ctx__update_elem *ctx)
+{
+	uint32_t key = 0, val = 0;
+
+	if (bpf_probe_read(&key, sizeof(key), ctx->key))
+		return 1;
+	if (bpf_probe_read(&val, sizeof(val), ctx->value))
+		return 1;
+	val = collatz(val);
+	bpf_map_update_elem(&tracer_map, &key, &val, /*flags=*/0);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+
diff --git a/tools/testing/selftests/bpf/verifier/map_trace.c b/tools/testing/selftests/bpf/verifier/map_trace.c
new file mode 100644
index 000000000000..a48b6448454e
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_trace.c
@@ -0,0 +1,40 @@
+{
+	"map tracing: full stack is accepted",
+	.insns = {
+		BPF_ST_MEM(BPF_DW, BPF_REG_10, -512, 0),
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -512),
+		BPF_LD_MAP_FD(BPF_REG_1, 0),
+		BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+		BPF_MOV64_IMM(BPF_REG_4, 0),
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+			     BPF_FUNC_map_update_elem),
+		BPF_MOV64_IMM(BPF_REG_0, 0),
+		BPF_EXIT_INSN(),
+	},
+	.fixup_map_hash_8b = { 3 },
+	.result_unpriv = ACCEPT,
+	.result = ACCEPT,
+	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+	"map tracing: overfull stack is not accepted",
+	.insns = {
+		BPF_ST_MEM(BPF_DW, BPF_REG_10, -520, 0),
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -520),
+		BPF_LD_MAP_FD(BPF_REG_1, 0),
+		BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+		BPF_MOV64_IMM(BPF_REG_4, 0),
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+			     BPF_FUNC_map_update_elem),
+		BPF_MOV64_IMM(BPF_REG_0, 0),
+		BPF_EXIT_INSN(),
+	},
+	.fixup_map_hash_8b = { 3 },
+	.result_unpriv = REJECT,
+	.result = REJECT,
+	.errstr = "invalid write to stack R10 off=-520 size=8",
+	.errstr_unpriv = "invalid write to stack R10 off=-520 size=8",
+	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
-- 
2.33.0.685.g46640cef36-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ