lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200408232528.2675856-1-yhs@fb.com>
Date:   Wed, 8 Apr 2020 16:25:28 -0700
From:   Yonghong Song <yhs@...com>
To:     Andrii Nakryiko <andriin@...com>, <bpf@...r.kernel.org>,
        Martin KaFai Lau <kafai@...com>, <netdev@...r.kernel.org>
CC:     Alexei Starovoitov <ast@...com>,
        Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>
Subject: [RFC PATCH bpf-next 07/16] bpf: add bpf_map target

This patch added bpf_map target. Traversing all bpf_maps
through map_idr. A reference is held for the map during
the show() to ensure safety and correctness for field accesses.

Signed-off-by: Yonghong Song <yhs@...com>
---
 kernel/bpf/syscall.c | 104 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)

diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index b5e4f18cc633..62a872a406ca 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3797,3 +3797,107 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
 
 	return err;
 }
+
+struct bpfdump_seq_map_info {
+	struct bpf_map *map;
+	u32 id;
+};
+
+static struct bpf_map *bpf_map_seq_get_next(u32 *id)
+{
+	struct bpf_map *map;
+
+	spin_lock_bh(&map_idr_lock);
+	map = idr_get_next(&map_idr, id);
+	if (map)
+		map = __bpf_map_inc_not_zero(map, false);
+	spin_unlock_bh(&map_idr_lock);
+
+	return map;
+}
+
+static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct bpfdump_seq_map_info *info = seq->private;
+	struct bpf_map *map;
+	u32 id = info->id + 1;
+
+	map = bpf_map_seq_get_next(&id);
+	if (!map)
+		return NULL;
+
+	++*pos;
+	info->map = map;
+	info->id = id;
+	return map;
+}
+
+static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct bpfdump_seq_map_info *info = seq->private;
+	struct bpf_map *map;
+	u32 id = info->id + 1;
+
+	++*pos;
+	map = bpf_map_seq_get_next(&id);
+	if (!map)
+		return NULL;
+
+	__bpf_map_put(info->map, true);
+	info->map = map;
+	info->id = id;
+	return map;
+}
+
+static void bpf_map_seq_stop(struct seq_file *seq, void *v)
+{
+	struct bpfdump_seq_map_info *info = seq->private;
+
+	if (info->map) {
+		__bpf_map_put(info->map, true);
+		info->map = NULL;
+	}
+}
+
+static int bpf_map_seq_show(struct seq_file *seq, void *v)
+{
+	struct {
+		struct bpf_map *map;
+		struct seq_file *seq;
+		u64 seq_num;
+	} ctx = {
+		.map = v,
+		.seq = seq,
+	};
+	struct bpf_prog *prog;
+	int ret;
+
+	prog = bpf_dump_get_prog(seq, sizeof(struct bpfdump_seq_map_info),
+				 &ctx.seq_num);
+	ret = bpf_dump_run_prog(prog, &ctx);
+
+	return ret == 0 ? 0 : -EINVAL;
+}
+
+static const struct seq_operations bpf_map_seq_ops = {
+	.start	= bpf_map_seq_start,
+	.next	= bpf_map_seq_next,
+	.stop	= bpf_map_seq_stop,
+	.show	= bpf_map_seq_show,
+};
+
+int __init bpfdump__bpf_map(struct bpf_map *map, struct seq_file *seq,
+			    u64 seq_num)
+{
+	return 0;
+}
+
+static int __init bpf_map_dump_init(void)
+{
+	return bpf_dump_reg_target("bpf_map",
+				   "bpfdump__bpf_map",
+				   &bpf_map_seq_ops,
+				   sizeof(struct bpfdump_seq_map_info), 0);
+}
+
+late_initcall(bpf_map_dump_init);
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ