[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210929235910.1765396-2-jevburton.kernel@gmail.com>
Date: Wed, 29 Sep 2021 23:58:58 +0000
From: Joe Burton <jevburton.kernel@...il.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <kafai@...com>
Cc: Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
Petar Penkov <ppenkov@...gle.com>,
Stanislav Fomichev <sdf@...gle.com>,
Hao Luo <haoluo@...gle.com>, netdev@...r.kernel.org,
bpf@...r.kernel.org, Joe Burton <jevburton@...gle.com>
Subject: [RFC PATCH v2 01/13] bpf: Add machinery to register map tracing hooks
From: Joe Burton <jevburton@...gle.com>
Maps may be traced in two ways (so far): after a key-only operation
s.a. a deletion, and after a key-value operation s.a. an update. Each
type is identified by a tracing type.
In order to reject invalid map tracing programs at load time, we export
a function for each tracing type. Programs include this function's name
or BTF ID when loading. The verifier will check that the traced
function is registered for map tracing and that the program has the
right arguments.
Signed-off-by: Joe Burton <jevburton@...gle.com>
---
include/linux/bpf.h | 12 ++++++++++++
include/uapi/linux/bpf.h | 9 +++++++++
kernel/bpf/Makefile | 1 +
kernel/bpf/map_trace.c | 33 +++++++++++++++++++++++++++++++++
4 files changed, 55 insertions(+)
create mode 100644 kernel/bpf/map_trace.c
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 19735d59230a..dad62d5571c9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1510,6 +1510,17 @@ struct bpf_iter_reg {
const struct bpf_iter_seq_info *seq_info;
};
+#define BPF_MAP_TRACE_FUNC_SYM(trace_type) bpf_map_trace__ ## trace_type
+#define DEFINE_BPF_MAP_TRACE_FUNC(trace_type, args...) \
+ extern int BPF_MAP_TRACE_FUNC_SYM(trace_type)(args); \
+ int __init BPF_MAP_TRACE_FUNC_SYM(trace_type)(args) \
+ { return 0; }
+
+struct bpf_map_trace_reg {
+ const char *target;
+ enum bpf_map_trace_type trace_type;
+};
+
struct bpf_iter_meta {
__bpf_md_ptr(struct seq_file *, seq);
u64 session_id;
@@ -1528,6 +1539,7 @@ void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+int bpf_map_trace_reg_target(const struct bpf_map_trace_reg *reg_info);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 6fc59d61937a..17e8f4113369 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -93,6 +93,15 @@ union bpf_iter_link_info {
} map;
};
+enum bpf_map_trace_type {
+ BPF_MAP_TRACE_UPDATE_ELEM = 0,
+ BPF_MAP_TRACE_DELETE_ELEM = 1,
+
+ MAX_BPF_MAP_TRACE_TYPE,
+};
+
+#define BPF_MAP_TRACE_FUNC(trace_type) "bpf_map_trace__" #trace_type
+
/* BPF syscall commands, see bpf(2) man-page for more details. */
/**
* DOC: eBPF Syscall Preamble
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 7f33098ca63f..34eab32e0d9d 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -36,3 +36,4 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
obj-$(CONFIG_BPF_PRELOAD) += preload/
+obj-$(CONFIG_BPF_SYSCALL) += map_trace.o
diff --git a/kernel/bpf/map_trace.c b/kernel/bpf/map_trace.c
new file mode 100644
index 000000000000..d8f829535f7e
--- /dev/null
+++ b/kernel/bpf/map_trace.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021 Google */
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+
+struct bpf_map_trace_target_info {
+ struct list_head list;
+ const struct bpf_map_trace_reg *reg_info;
+ u32 btf_id;
+};
+
+static struct list_head targets = LIST_HEAD_INIT(targets);
+static DEFINE_MUTEX(targets_mutex);
+
+int bpf_map_trace_reg_target(const struct bpf_map_trace_reg *reg_info)
+{
+ struct bpf_map_trace_target_info *tinfo;
+
+ tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ if (!tinfo)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&tinfo->list);
+ tinfo->reg_info = reg_info;
+ tinfo->btf_id = 0;
+
+ mutex_lock(&targets_mutex);
+ list_add(&tinfo->list, &targets);
+ mutex_unlock(&targets_mutex);
+
+ return 0;
+}
--
2.33.0.685.g46640cef36-goog
Powered by blists - more mailing lists