[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211102021432.2807760-3-jevburton.kernel@gmail.com>
Date: Tue, 2 Nov 2021 02:14:31 +0000
From: Joe Burton <jevburton.kernel@...il.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>, linux-kernel@...r.kernel.org,
netdev@...r.kernel.org, bpf@...r.kernel.org
Cc: Petar Penkov <ppenkov@...gle.com>,
Stanislav Fomichev <sdf@...gle.com>,
Joe Burton <jevburton@...gle.com>
Subject: [RFC PATCH v3 2/3] bpf: Add selftests
From: Joe Burton <jevburton@...gle.com>
Add selftests verifying that each supported map type is traced.
Signed-off-by: Joe Burton <jevburton@...gle.com>
---
.../selftests/bpf/prog_tests/map_trace.c | 166 ++++++++++++++++++
.../selftests/bpf/progs/bpf_map_trace.c | 95 ++++++++++
.../bpf/progs/bpf_map_trace_common.h | 12 ++
3 files changed, 273 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/map_trace.c
create mode 100644 tools/testing/selftests/bpf/progs/bpf_map_trace.c
create mode 100644 tools/testing/selftests/bpf/progs/bpf_map_trace_common.h
diff --git a/tools/testing/selftests/bpf/prog_tests/map_trace.c b/tools/testing/selftests/bpf/prog_tests/map_trace.c
new file mode 100644
index 000000000000..4b54a8e3769a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_trace.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include <test_progs.h>
+
+#include "bpf_map_trace.skel.h"
+#include "progs/bpf_map_trace_common.h"
+
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+enum BoolOrErr {
+ TRUE = 0,
+ FALSE = 1,
+ ERROR = 2,
+};
+
+enum BoolOrErr percpu_key_is_set(struct bpf_map *map, uint32_t map_key)
+{
+ int num_cpus = libbpf_num_possible_cpus();
+ uint64_t *percpu_map_val = NULL;
+ int map_fd = bpf_map__fd(map);
+ enum BoolOrErr ret = ERROR;
+ int err;
+ int i;
+
+ if (!ASSERT_GE(num_cpus, 1, "get number of cpus"))
+ goto out;
+
+ percpu_map_val = malloc(sizeof(*percpu_map_val) * num_cpus);
+ if (!ASSERT_NEQ(percpu_map_val, NULL, "allocate percpu map array"))
+ goto out;
+
+ err = bpf_map_lookup_elem(map_fd, &map_key, percpu_map_val);
+ if (!ASSERT_EQ(err, 0, "map lookup update_elem"))
+ goto out;
+
+ ret = FALSE;
+ for (i = 0; i < num_cpus; i++)
+ if (percpu_map_val[i] != 0)
+ ret = TRUE;
+
+out:
+ if (percpu_map_val != NULL)
+ free(percpu_map_val);
+
+ return ret;
+}
+
+enum BoolOrErr key_is_set(struct bpf_map *map, uint32_t map_key)
+{
+ int map_fd = bpf_map__fd(map);
+ uint32_t map_val;
+ int rc;
+
+ rc = bpf_map_lookup_elem(map_fd, &map_key, &map_val);
+ if (!ASSERT_EQ(rc, 0, "array map lookup update_elem"))
+ return ERROR;
+
+ return (map_val == 0 ? FALSE : TRUE);
+}
+
+void verify_map_contents(struct bpf_map_trace *skel)
+{
+ enum BoolOrErr rc_or_err;
+ struct bpf_map *map;
+
+ map = skel->maps.array_map;
+ rc_or_err = key_is_set(map, ACCESS_LOC__TRACE_UPDATE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "array map updates are traced"))
+ return;
+ rc_or_err = key_is_set(map, ACCESS_LOC__TRACE_DELETE);
+ if (!ASSERT_EQ(rc_or_err, FALSE, "array map deletions are not traced"))
+ return;
+
+ map = skel->maps.percpu_array_map;
+ rc_or_err = percpu_key_is_set(map, ACCESS_LOC__TRACE_UPDATE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "percpu array map updates are traced"))
+ return;
+ rc_or_err = percpu_key_is_set(map, ACCESS_LOC__TRACE_DELETE);
+ if (!ASSERT_EQ(rc_or_err, FALSE,
+ "percpu array map deletions are not traced"))
+ return;
+
+ map = skel->maps.hash_map;
+ rc_or_err = key_is_set(map, ACCESS_LOC__TRACE_UPDATE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "hash map updates are traced"))
+ return;
+ rc_or_err = key_is_set(map, ACCESS_LOC__TRACE_DELETE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "hash map deletions are traced"))
+ return;
+
+ map = skel->maps.percpu_hash_map;
+ rc_or_err = percpu_key_is_set(map, ACCESS_LOC__TRACE_UPDATE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "percpu hash map updates are traced"))
+ return;
+ rc_or_err = percpu_key_is_set(map, ACCESS_LOC__TRACE_DELETE);
+ if (!ASSERT_EQ(rc_or_err, TRUE,
+ "percpu hash map deletions are traced"))
+ return;
+
+ map = skel->maps.lru_hash_map;
+ rc_or_err = key_is_set(map, ACCESS_LOC__TRACE_UPDATE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "lru_hash map updates are traced"))
+ return;
+ rc_or_err = key_is_set(map, ACCESS_LOC__TRACE_DELETE);
+ if (!ASSERT_EQ(rc_or_err, TRUE, "lru_hash map deletions are traced"))
+ return;
+
+ map = skel->maps.percpu_lru_hash_map;
+ rc_or_err = percpu_key_is_set(map, ACCESS_LOC__TRACE_UPDATE);
+ if (!ASSERT_EQ(rc_or_err, TRUE,
+ "percpu lru hash map updates are traced"))
+ return;
+ rc_or_err = percpu_key_is_set(map, ACCESS_LOC__TRACE_DELETE);
+ if (!ASSERT_EQ(rc_or_err, TRUE,
+ "percpu lru hash map deletions are traced"))
+ return;
+}
+
+void map_trace_test(void)
+{
+ struct bpf_map_trace *skel;
+ ssize_t bytes_written;
+ char write_buf = 'a';
+ int write_fd = -1;
+ int rc;
+
+ /*
+ * Load and attach programs.
+ */
+ skel = bpf_map_trace__open_and_load();
+ if (!ASSERT_NEQ(skel, NULL, "open/load skeleton"))
+ return;
+
+ rc = bpf_map_trace__attach(skel);
+ if (!ASSERT_EQ(rc, 0, "attach skeleton"))
+ goto out;
+
+ /*
+ * Invoke core BPF program.
+ */
+ write_fd = open("/tmp/map_trace_test_file", O_CREAT | O_WRONLY);
+ if (!ASSERT_GE(rc, 0, "open tmp file for writing"))
+ goto out;
+
+ bytes_written = write(write_fd, &write_buf, sizeof(write_buf));
+ if (!ASSERT_EQ(bytes_written, sizeof(write_buf), "write to tmp file"))
+ return;
+
+ /*
+ * Verify that tracing programs were invoked as expected.
+ */
+ verify_map_contents(skel);
+
+out:
+ if (skel)
+ bpf_map_trace__destroy(skel);
+ if (write_fd != -1)
+ close(write_fd);
+}
+
+void test_map_trace(void)
+{
+ map_trace_test();
+}
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_map_trace.c b/tools/testing/selftests/bpf/progs/bpf_map_trace.c
new file mode 100644
index 000000000000..6135cd86b521
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_map_trace.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+#include <string.h>
+
+#include "bpf_map_trace_common.h"
+
+#define DECLARE_MAP(name, map_type) \
+ struct { \
+ __uint(type, map_type); \
+ __uint(max_entries, __ACCESS_LOC__MAX); \
+ __type(key, u32); \
+ __type(value, u32); \
+ } name SEC(".maps")
+
+DECLARE_MAP(array_map, BPF_MAP_TYPE_ARRAY);
+DECLARE_MAP(percpu_array_map, BPF_MAP_TYPE_PERCPU_ARRAY);
+DECLARE_MAP(hash_map, BPF_MAP_TYPE_HASH);
+DECLARE_MAP(percpu_hash_map, BPF_MAP_TYPE_PERCPU_HASH);
+DECLARE_MAP(lru_hash_map, BPF_MAP_TYPE_LRU_HASH);
+DECLARE_MAP(percpu_lru_hash_map, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+
+static inline void __log_location(void *map,
+ enum MapAccessLocations location)
+{
+ u32 key = location;
+ u32 val = 1;
+
+ bpf_map_update_elem(map, &key, &val, /*flags=*/0);
+}
+
+static inline void log_location(struct bpf_map *map,
+ enum MapAccessLocations location)
+{
+ if (map == &array_map)
+ __log_location(&array_map, location);
+ if (map == &percpu_array_map)
+ __log_location(&percpu_array_map, location);
+ if (map == &hash_map)
+ __log_location(&hash_map, location);
+ if (map == &percpu_hash_map)
+ __log_location(&percpu_hash_map, location);
+ if (map == &lru_hash_map)
+ __log_location(&lru_hash_map, location);
+ if (map == &percpu_lru_hash_map)
+ __log_location(&percpu_lru_hash_map, location);
+}
+
+SEC("fentry/bpf_map_trace_update_elem")
+int BPF_PROG(fentry__bpf_map_trace_update_elem,
+ struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ log_location(map, ACCESS_LOC__TRACE_UPDATE);
+ return 0;
+}
+
+SEC("fentry/bpf_map_trace_delete_elem")
+int BPF_PROG(fentry__bpf_map_trace_delete_elem,
+ struct bpf_map *map, void *key)
+{
+ log_location(map, ACCESS_LOC__TRACE_DELETE);
+ return 0;
+}
+
+static inline void do_map_accesses(void *map)
+{
+ u32 key = ACCESS_LOC__APP;
+ u32 val = 1;
+
+ bpf_map_update_elem(map, &key, &val, /*flags=*/0);
+ bpf_map_delete_elem(map, &key);
+}
+
+SEC("fentry/__x64_sys_write")
+int BPF_PROG(fentry__x64_sys_write, struct pt_regs *regs, int ret)
+{
+ /*
+ * Trigger an update and a delete for every map type under test.
+ * We want to verify that bpf_map_trace_{update,delete}_elem() fire
+ * for each map type.
+ */
+ do_map_accesses(&array_map);
+ do_map_accesses(&percpu_array_map);
+ do_map_accesses(&hash_map);
+ do_map_accesses(&percpu_hash_map);
+ do_map_accesses(&lru_hash_map);
+ do_map_accesses(&percpu_lru_hash_map);
+ return 0;
+}
+
diff --git a/tools/testing/selftests/bpf/progs/bpf_map_trace_common.h b/tools/testing/selftests/bpf/progs/bpf_map_trace_common.h
new file mode 100644
index 000000000000..3aac75953508
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_map_trace_common.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Google */
+#pragma once
+
+enum MapAccessLocations {
+ ACCESS_LOC__APP,
+ ACCESS_LOC__TRACE_UPDATE,
+ ACCESS_LOC__TRACE_DELETE,
+
+ __ACCESS_LOC__MAX,
+};
+
--
2.33.1.1089.g2158813163f-goog
Powered by blists - more mailing lists