lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200415192749.4083310-1-yhs@fb.com>
Date:   Wed, 15 Apr 2020 12:27:49 -0700
From:   Yonghong Song <yhs@...com>
To:     Andrii Nakryiko <andriin@...com>, <bpf@...r.kernel.org>,
        Martin KaFai Lau <kafai@...com>, <netdev@...r.kernel.org>
CC:     Alexei Starovoitov <ast@...com>,
        Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>
Subject: [RFC PATCH bpf-next v2 08/17] bpf: add bpf_map target

This patch added bpf_map target. Traversing all bpf_maps
through map_idr. A reference is held for the map during
the show() to ensure safety and correctness for field accesses.

Signed-off-by: Yonghong Song <yhs@...com>
---
 kernel/bpf/syscall.c | 116 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 116 insertions(+)

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4a3c9fceebb8..e6a4514435c4 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3800,3 +3800,119 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
 
 	return err;
 }
+
+struct bpfdump_seq_map_info {
+	struct bpf_map *map;
+	u32 id;
+};
+
+static struct bpf_map *bpf_map_seq_get_next(u32 *id)
+{
+	struct bpf_map *map;
+
+	spin_lock_bh(&map_idr_lock);
+	map = idr_get_next(&map_idr, id);
+	if (map)
+		map = __bpf_map_inc_not_zero(map, false);
+	spin_unlock_bh(&map_idr_lock);
+
+	return map;
+}
+
+static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct bpfdump_seq_map_info *info = seq->private;
+	struct bpf_map *map;
+	u32 id = info->id + 1;
+
+	map = bpf_map_seq_get_next(&id);
+	if (!map)
+		return NULL;
+
+	++*pos;
+	info->map = map;
+	info->id = id;
+	return map;
+}
+
+static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct bpfdump_seq_map_info *info = seq->private;
+	struct bpf_map *map;
+	u32 id = info->id + 1;
+
+	++*pos;
+	map = bpf_map_seq_get_next(&id);
+	if (!map)
+		return NULL;
+
+	__bpf_map_put(info->map, true);
+	info->map = map;
+	info->id = id;
+	return map;
+}
+
+struct bpfdump__bpf_map {
+	struct bpf_dump_meta *meta;
+	struct bpf_map *map;
+};
+
+int __init __bpfdump__bpf_map(struct bpf_dump_meta *meta, struct bpf_map *map)
+{
+	return 0;
+}
+
+static int bpf_map_seq_show(struct seq_file *seq, void *v)
+{
+	struct bpf_dump_meta meta;
+	struct bpfdump__bpf_map ctx;
+	struct bpf_prog *prog;
+	int ret = 0;
+
+	ctx.meta = &meta;
+	ctx.map = v;
+	meta.seq = seq;
+	prog = bpf_dump_get_prog(seq, sizeof(struct bpfdump_seq_map_info),
+				 &meta.session_id, &meta.seq_num,
+				 v == (void *)0);
+	if (prog)
+		ret = bpf_dump_run_prog(prog, &ctx);
+
+	return ret == 0 ? 0 : -EINVAL;
+}
+
+static void bpf_map_seq_stop(struct seq_file *seq, void *v)
+{
+	struct bpfdump_seq_map_info *info = seq->private;
+
+	if (!v)
+		bpf_map_seq_show(seq, v);
+
+	if (info->map) {
+		__bpf_map_put(info->map, true);
+		info->map = NULL;
+	}
+}
+
+static const struct seq_operations bpf_map_seq_ops = {
+	.start	= bpf_map_seq_start,
+	.next	= bpf_map_seq_next,
+	.stop	= bpf_map_seq_stop,
+	.show	= bpf_map_seq_show,
+};
+
+static int __init bpf_map_dump_init(void)
+{
+	struct bpf_dump_reg reg_info = {
+		.target			= "bpf_map",
+		.target_proto 		= "__bpfdump__bpf_map",
+		.prog_ctx_type_name	= "bpfdump__bpf_map",
+		.seq_ops		= &bpf_map_seq_ops,
+		.seq_priv_size		= sizeof(struct bpfdump_seq_map_info),
+		.target_feature		= 0,
+	};
+
+	return bpf_dump_reg_target(&reg_info);
+}
+
+late_initcall(bpf_map_dump_init);
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ