lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 29 Sep 2021 23:59:00 +0000
From:   Joe Burton <jevburton.kernel@...il.com>
To:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Martin KaFai Lau <kafai@...com>
Cc:     Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        John Fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...nel.org>,
        Petar Penkov <ppenkov@...gle.com>,
        Stanislav Fomichev <sdf@...gle.com>,
        Hao Luo <haoluo@...gle.com>, netdev@...r.kernel.org,
        bpf@...r.kernel.org, Joe Burton <jevburton@...gle.com>
Subject: [RFC PATCH v2 03/13] bpf: Add list of tracing programs to struct bpf_map

From: Joe Burton <jevburton@...gle.com>

Add array of programs to struct bpf_map. To minimize cost, this array
is allocated at the time that the first program is attached to a map.
It is installed using a cmpxchg() to avoid adding another mutex inside
bpf_map.

Signed-off-by: Joe Burton <jevburton@...gle.com>
---
 include/linux/bpf.h    | 18 +++++++++++++++++-
 kernel/bpf/map_trace.c | 21 +++++++++++++++++++++
 kernel/bpf/syscall.c   |  4 ++++
 3 files changed, 42 insertions(+), 1 deletion(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 272f0ac49285..3ae12ab97720 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -43,6 +43,7 @@ struct kobject;
 struct mem_cgroup;
 struct module;
 struct bpf_func_state;
+struct bpf_map_trace_progs;
 
 extern struct idr btf_idr;
 extern spinlock_t btf_idr_lock;
@@ -180,10 +181,11 @@ struct bpf_map {
 	struct mem_cgroup *memcg;
 #endif
 	char name[BPF_OBJ_NAME_LEN];
+	struct bpf_map_trace_progs *trace_progs;
 	u32 btf_vmlinux_value_type_id;
 	bool bypass_spec_v1;
 	bool frozen; /* write-once; write-protected by freeze_mutex */
-	/* 22 bytes hole */
+	/* 14 bytes hole */
 
 	/* The 3rd and 4th cacheline with misc members to avoid false sharing
 	 * particularly with refcounting.
@@ -1510,12 +1512,26 @@ struct bpf_iter_reg {
 	const struct bpf_iter_seq_info *seq_info;
 };
 
+/* Maximum number of tracing programs that may be attached to one map */
+#define BPF_MAP_TRACE_MAX_PROGS 16
 #define BPF_MAP_TRACE_FUNC_SYM(trace_type) bpf_map_trace__ ## trace_type
 #define DEFINE_BPF_MAP_TRACE_FUNC(trace_type, args...)	\
 	extern int BPF_MAP_TRACE_FUNC_SYM(trace_type)(args);	\
 	int __init BPF_MAP_TRACE_FUNC_SYM(trace_type)(args)	\
 	{ return 0; }
 
+struct bpf_map_trace_prog {
+	struct list_head list;
+	struct bpf_prog *prog;
+	struct rcu_head rcu;
+};
+
+struct bpf_map_trace_progs {
+	struct bpf_map_trace_prog __rcu progs[MAX_BPF_MAP_TRACE_TYPE];
+	u32 length[MAX_BPF_MAP_TRACE_TYPE];
+	struct mutex mutex; /* protects writes to progs, length */
+};
+
 struct bpf_map_trace_reg {
 	const char *target;
 	enum bpf_map_trace_type trace_type;
diff --git a/kernel/bpf/map_trace.c b/kernel/bpf/map_trace.c
index d2c6df20f55c..7776b8ccfe88 100644
--- a/kernel/bpf/map_trace.c
+++ b/kernel/bpf/map_trace.c
@@ -3,6 +3,7 @@
 
 #include <linux/filter.h>
 #include <linux/bpf.h>
+#include <linux/rcupdate.h>
 
 struct bpf_map_trace_target_info {
 	struct list_head list;
@@ -56,3 +57,23 @@ bool bpf_map_trace_prog_supported(struct bpf_prog *prog)
 	return supported;
 }
 
+int bpf_map_initialize_trace_progs(struct bpf_map *map)
+{
+	struct bpf_map_trace_progs *new_trace_progs;
+	int i;
+
+	if (!READ_ONCE(map->trace_progs)) {
+		new_trace_progs = kzalloc(sizeof(struct bpf_map_trace_progs),
+					  GFP_KERNEL);
+		if (!new_trace_progs)
+			return -ENOMEM;
+		mutex_init(&new_trace_progs->mutex);
+		for (i = 0; i < MAX_BPF_MAP_TRACE_TYPE; i++)
+			INIT_LIST_HEAD(&new_trace_progs->progs[i].list);
+		if (cmpxchg(&map->trace_progs, NULL, new_trace_progs))
+			kfree(new_trace_progs);
+	}
+
+	return 0;
+}
+
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4e50c0bfdb7d..e6179755fd3b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -460,6 +460,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
 
 	security_bpf_map_free(map);
 	bpf_map_release_memcg(map);
+	kfree(map->trace_progs);
 	/* implementation dependent freeing */
 	map->ops->map_free(map);
 }
@@ -913,6 +914,9 @@ static int map_create(union bpf_attr *attr)
 		return err;
 	}
 
+	/* tracing programs lists are allocated when attached */
+	map->trace_progs = NULL;
+
 	return err;
 
 free_map_sec:
-- 
2.33.0.685.g46640cef36-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ