[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201106220809.3950951-1-kafai@fb.com>
Date: Fri, 6 Nov 2020 14:08:09 -0800
From: Martin KaFai Lau <kafai@...com>
To: <bpf@...r.kernel.org>
CC: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>,
<netdev@...r.kernel.org>
Subject: [PATCH bpf-next 3/3] bpf: selftest: Use bpf_sk_storage in FENTRY/FEXIT/RAW_TP
This patch tests storing the task's related info into the
bpf_sk_storage by fentry/fexit tracing at listen, accept,
and connect. It also tests the raw_tp at inet_sock_set_state.
A negative test is done by tracing the bpf_sk_storage_free()
and using bpf_sk_storage_get() at the same time. It ensures
this bpf program cannot load.
Signed-off-by: Martin KaFai Lau <kafai@...com>
---
.../bpf/prog_tests/sk_storage_tracing.c | 135 ++++++++++++++++++
.../bpf/progs/test_sk_storage_trace_itself.c | 29 ++++
.../bpf/progs/test_sk_storage_tracing.c | 95 ++++++++++++
3 files changed, 259 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c
create mode 100644 tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c
create mode 100644 tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c
new file mode 100644
index 000000000000..2b392590e8ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <sys/types.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_sk_storage_trace_itself.skel.h"
+#include "test_sk_storage_tracing.skel.h"
+
+#define LO_ADDR6 "::1"
+#define TEST_COMM "test_progs"
+
+struct sk_stg {
+ __u32 pid;
+ __u32 last_notclose_state;
+ char comm[16];
+};
+
+static struct test_sk_storage_tracing *skel;
+static __u32 duration;
+static pid_t my_pid;
+
+static int check_sk_stg(int sk_fd, __u32 expected_state)
+{
+ struct sk_stg sk_stg;
+ int err;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.sk_stg_map), &sk_fd,
+ &sk_stg);
+ if (!ASSERT_OK(err, "map_lookup(sk_stg_map)"))
+ return -1;
+
+ if (!ASSERT_EQ(sk_stg.last_notclose_state, expected_state,
+ "last_notclose_state"))
+ return -1;
+
+ if (!ASSERT_EQ(sk_stg.pid, my_pid, "pid"))
+ return -1;
+
+ if (!ASSERT_STREQ(sk_stg.comm, skel->bss->task_comm, "task_comm"))
+ return -1;
+
+ return 0;
+}
+
+static void do_test(void)
+{
+ int listen_fd = -1, passive_fd = -1, active_fd = -1, value = 1, err;
+ char abyte;
+
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
+ if (CHECK(listen_fd == -1, "start_server",
+ "listen_fd:%d errno:%d\n", listen_fd, errno))
+ return;
+
+ active_fd = connect_to_fd(listen_fd, 0);
+ if (CHECK(active_fd == -1, "connect_to_fd", "active_fd:%d errno:%d\n",
+ active_fd, errno))
+ goto out;
+
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.del_sk_stg_map),
+ &active_fd, &value, 0);
+ if (!ASSERT_OK(err, "map_update(del_sk_stg_map)"))
+ goto out;
+
+ passive_fd = accept(listen_fd, NULL, 0);
+ if (CHECK(passive_fd == -1, "accept", "passive_fd:%d errno:%d\n",
+ passive_fd, errno))
+ goto out;
+
+ shutdown(active_fd, SHUT_WR);
+ err = read(passive_fd, &abyte, 1);
+ if (!ASSERT_OK(err, "read(passive_fd)"))
+ goto out;
+
+ shutdown(passive_fd, SHUT_WR);
+ err = read(active_fd, &abyte, 1);
+ if (!ASSERT_OK(err, "read(active_fd)"))
+ goto out;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.del_sk_stg_map),
+ &active_fd, &value);
+ if (!ASSERT_ERR(err, "map_lookup(del_sk_stg_map)"))
+ goto out;
+
+ err = check_sk_stg(listen_fd, BPF_TCP_LISTEN);
+ if (!ASSERT_OK(err, "listen_fd sk_stg"))
+ goto out;
+
+ err = check_sk_stg(active_fd, BPF_TCP_FIN_WAIT2);
+ if (!ASSERT_OK(err, "active_fd sk_stg"))
+ goto out;
+
+ err = check_sk_stg(passive_fd, BPF_TCP_LAST_ACK);
+ ASSERT_OK(err, "passive_fd sk_stg");
+
+out:
+ if (active_fd != -1)
+ close(active_fd);
+ if (passive_fd != -1)
+ close(passive_fd);
+ if (listen_fd != -1)
+ close(listen_fd);
+}
+
+void test_sk_storage_tracing(void)
+{
+ struct test_sk_storage_trace_itself *skel_itself;
+ int err;
+
+ my_pid = getpid();
+
+ skel_itself = test_sk_storage_trace_itself__open_and_load();
+
+ if (!ASSERT_NULL(skel_itself, "test_sk_storage_trace_itself")) {
+ test_sk_storage_trace_itself__destroy(skel_itself);
+ return;
+ }
+
+ skel = test_sk_storage_tracing__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_sk_storage_tracing"))
+ return;
+
+ err = test_sk_storage_tracing__attach(skel);
+ if (!ASSERT_OK(err, "test_sk_storage_tracing__attach")) {
+ test_sk_storage_tracing__destroy(skel);
+ return;
+ }
+
+ do_test();
+
+ test_sk_storage_tracing__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c b/tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c
new file mode 100644
index 000000000000..59ef72d02a61
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_stg_map SEC(".maps");
+
+SEC("fentry/bpf_sk_storage_free")
+int BPF_PROG(trace_bpf_sk_storage_free, struct sock *sk)
+{
+ int *value;
+
+ value = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+
+ if (value)
+ *value = 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
new file mode 100644
index 000000000000..8e94e5c080aa
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+
+struct sk_stg {
+ __u32 pid;
+ __u32 last_notclose_state;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct sk_stg);
+} sk_stg_map SEC(".maps");
+
+/* Testing delete */
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} del_sk_stg_map SEC(".maps");
+
+char task_comm[16] = "";
+
+SEC("tp_btf/inet_sock_set_state")
+int BPF_PROG(trace_inet_sock_set_state, struct sock *sk, int oldstate,
+ int newstate)
+{
+ struct sk_stg *stg;
+
+ if (newstate == BPF_TCP_CLOSE)
+ return 0;
+
+ stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!stg)
+ return 0;
+
+ stg->last_notclose_state = newstate;
+
+ bpf_sk_storage_delete(&del_sk_stg_map, sk);
+
+ return 0;
+}
+
+static void set_task_info(struct sock *sk)
+{
+ struct task_struct *task;
+ struct sk_stg *stg;
+
+ stg = bpf_sk_storage_get(&sk_stg_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!stg)
+ return;
+
+ stg->pid = bpf_get_current_pid_tgid();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ bpf_core_read_str(&stg->comm, sizeof(stg->comm), &task->comm);
+ bpf_core_read_str(&task_comm, sizeof(task_comm), &task->comm);
+}
+
+SEC("fentry/inet_csk_listen_start")
+int BPF_PROG(trace_inet_csk_listen_start, struct sock *sk, int backlog)
+{
+ set_task_info(sk);
+
+ return 0;
+}
+
+SEC("fentry/tcp_connect")
+int BPF_PROG(trace_tcp_connect, struct sock *sk)
+{
+ set_task_info(sk);
+
+ return 0;
+}
+
+SEC("fexit/inet_csk_accept")
+int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+ struct sock *accepted_sk)
+{
+ set_task_info(accepted_sk);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
2.24.1
Powered by blists - more mailing lists