[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240925223023.735947-2-namhyung@kernel.org>
Date: Wed, 25 Sep 2024 15:30:21 -0700
From: Namhyung Kim <namhyung@...nel.org>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>
Cc: Martin KaFai Lau <martin.lau@...ux.dev>,
Eduard Zingerman <eddyz87@...il.com>,
Song Liu <song@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
Stanislav Fomichev <sdf@...ichev.me>,
Hao Luo <haoluo@...gle.com>,
Jiri Olsa <jolsa@...nel.org>,
LKML <linux-kernel@...r.kernel.org>,
bpf@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Vlastimil Babka <vbabka@...e.cz>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>,
linux-mm@...ck.org
Subject: [RFC/PATCH bpf-next 1/3] bpf: Add slab iterator
The new "slab" iterator will traverse the list of slab caches
(kmem_cache) and call attached BPF programs for each entry. It should
check the argument (ctx.s) if it's NULL before using it.
The iteration will be done with slab_mutex held but it'd break and
return to user if the BPF program emits data to seq buffer more than
the buffer size given by the user. IOW the whole iteration would be
protected by the slab_mutex as long as it won't emit anything.
It includes the internal "mm/slab.h" header to access kmem_cache,
slab_caches and slab_mutex. Hope it's ok to mm folks.
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
include/linux/btf_ids.h | 1 +
kernel/bpf/Makefile | 1 +
kernel/bpf/slab_iter.c | 131 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 133 insertions(+)
create mode 100644 kernel/bpf/slab_iter.c
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index c0e3e1426a82f5c4..1474ab7f44a9cff6 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[];
extern u32 bpf_cgroup_btf_id[];
extern u32 bpf_local_storage_map_btf_id[];
extern u32 btf_bpf_map_id[];
+extern u32 bpf_slab_btf_id[];
#endif
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 9b9c151b5c826b31..e18b09069349e1e9 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -52,3 +52,4 @@ obj-$(CONFIG_BPF_PRELOAD) += preload/
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
+obj-$(CONFIG_BPF_SYSCALL) += slab_iter.o
diff --git a/kernel/bpf/slab_iter.c b/kernel/bpf/slab_iter.c
new file mode 100644
index 0000000000000000..bf1e50bd7497220e
--- /dev/null
+++ b/kernel/bpf/slab_iter.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Google */
+#include <linux/bpf.h>
+#include <linux/btf_ids.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+
+#include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
+
+struct bpf_iter__slab {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct kmem_cache *, s);
+};
+
+static void *slab_iter_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ loff_t cnt = 0;
+ struct kmem_cache *s = NULL;
+
+ mutex_lock(&slab_mutex);
+
+ /*
+ * Find an entry at the given position in the slab_caches list instead
+ * of keeping a reference (of the last visited entry, if any) out of
+ * slab_mutex. It might miss something if one is deleted in the middle
+ * while it releases the lock. But it should be rare and there's not
+ * much we can do about it.
+ */
+ list_for_each_entry(s, &slab_caches, list) {
+ if (cnt == *pos)
+ break;
+
+ cnt++;
+ }
+
+ if (cnt != *pos)
+ return NULL;
+
+ ++*pos;
+ return s;
+}
+
+static void slab_iter_seq_stop(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_meta meta;
+ struct bpf_iter__slab ctx = {
+ .meta = &meta,
+ .s = v,
+ };
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, true);
+ if (prog)
+ bpf_iter_run_prog(prog, &ctx);
+
+ mutex_unlock(&slab_mutex);
+}
+
+static void *slab_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct kmem_cache *s = v;
+
+ ++*pos;
+
+ if (list_last_entry(&slab_caches, struct kmem_cache, list) == s)
+ return NULL;
+
+ return list_next_entry(s, list);
+}
+
+static int slab_iter_seq_show(struct seq_file *seq, void *v)
+{
+ struct bpf_iter_meta meta;
+ struct bpf_iter__slab ctx = {
+ .meta = &meta,
+ .s = v,
+ };
+ struct bpf_prog *prog;
+ int ret = 0;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, false);
+ if (prog)
+ ret = bpf_iter_run_prog(prog, &ctx);
+
+ return ret;
+}
+
+static const struct seq_operations slab_iter_seq_ops = {
+ .start = slab_iter_seq_start,
+ .next = slab_iter_seq_next,
+ .stop = slab_iter_seq_stop,
+ .show = slab_iter_seq_show,
+};
+
+BTF_ID_LIST_GLOBAL_SINGLE(bpf_slab_btf_id, struct, kmem_cache)
+
+static const struct bpf_iter_seq_info slab_iter_seq_info = {
+ .seq_ops = &slab_iter_seq_ops,
+};
+
+static void bpf_iter_slab_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq)
+{
+ seq_puts(seq, "slab iter\n");
+}
+
+DEFINE_BPF_ITER_FUNC(slab, struct bpf_iter_meta *meta,
+ struct kmem_cache *s)
+
+static struct bpf_iter_reg bpf_slab_reg_info = {
+ .target = "slab",
+ .feature = BPF_ITER_RESCHED,
+ .show_fdinfo = bpf_iter_slab_show_fdinfo,
+ .ctx_arg_info_size = 1,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__slab, s),
+ PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
+ },
+ .seq_info = &slab_iter_seq_info,
+};
+
+static int __init bpf_slab_iter_init(void)
+{
+ bpf_slab_reg_info.ctx_arg_info[0].btf_id = bpf_slab_btf_id[0];
+ return bpf_iter_reg_target(&bpf_slab_reg_info);
+}
+
+late_initcall(bpf_slab_iter_init);
--
2.46.0.792.g87dc391469-goog
Powered by blists - more mailing lists