lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 30 Aug 2021 23:04:22 +0530
From:   Kumar Kartikeya Dwivedi <memxor@...il.com>
To:     bpf@...r.kernel.org
Cc:     Kumar Kartikeya Dwivedi <memxor@...il.com>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        Martin KaFai Lau <kafai@...com>,
        Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        Toke Høiland-Jørgensen <toke@...hat.com>,
        netdev@...r.kernel.org
Subject: [PATCH bpf-next RFC v1 6/8] bpf: btf: Introduce helpers for dynamic BTF set registration

This adds macros that generate BTF set registration APIs and
check_kfunc_call callback. These require a type, which namespaces each
BTF set. This is in preparation to allow for nf_conntrack registering
unstable helpers it wants to expose to XDP and SCHED_CLS programs in
subsequent patches.

With in kernel sets, the way this is supposed to work is, in kernel
callback looks up within the in-kernel kfunc whitelist, and then defers
to the dynamic BTF set lookup if it doesn't find the BTF id. If there is
no in-kernel BTF id set, this callback can be directly used.

Also fix includes for btf.h and bpfptr.h so that they can included in
isolation. This is in preparation for their usage in tcp_bbr, tcp_cubic
and tcp_dctcp modules in the next patch.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@...il.com>
---
 include/linux/bpfptr.h |  1 +
 include/linux/btf.h    | 15 +++++++++++++++
 kernel/bpf/btf.c       | 34 ++++++++++++++++++++++++++++++++++
 3 files changed, 50 insertions(+)

diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
index 546e27fc6d46..46e1757d06a3 100644
--- a/include/linux/bpfptr.h
+++ b/include/linux/bpfptr.h
@@ -3,6 +3,7 @@
 #ifndef _LINUX_BPFPTR_H
 #define _LINUX_BPFPTR_H
 
+#include <linux/mm.h>
 #include <linux/sockptr.h>
 
 typedef sockptr_t bpfptr_t;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 214fde93214b..d024b0eb43f9 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,6 +5,7 @@
 #define _LINUX_BTF_H 1
 
 #include <linux/types.h>
+#include <linux/bpfptr.h>
 #include <uapi/linux/btf.h>
 #include <uapi/linux/bpf.h>
 
@@ -238,4 +239,18 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
 }
 #endif
 
+struct kfunc_btf_set {
+	struct list_head list;
+	struct btf_id_set *set;
+};
+
+/* Register set of BTF ids */
+#define DECLARE_KFUNC_BTF_SET_REG(type)                                        \
+	void register_##type##_kfunc_btf_set(struct kfunc_btf_set *s);         \
+	bool __bpf_check_##type##_kfunc_call(u32 kfunc_id);                    \
+	void unregister_##type##_kfunc_btf_set(struct kfunc_btf_set *s)
+
+#define DEFINE_KFUNC_BTF_SET(set, name)                                        \
+	struct kfunc_btf_set name = { LIST_HEAD_INIT(name.list), (set) }
+
 #endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index dfe61df4f974..35873495761d 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6215,3 +6215,37 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
 };
 
 BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
+
+/* Typesafe helpers to register BTF ID sets for modules */
+#define DEFINE_KFUNC_BTF_SET_REG(type)                                         \
+	static DEFINE_MUTEX(type##_kfunc_btf_set_mutex);                       \
+	static LIST_HEAD(type##_kfunc_btf_set_list);                           \
+	void register_##type##_kfunc_btf_set(struct kfunc_btf_set *s)          \
+	{                                                                      \
+		mutex_lock(&type##_kfunc_btf_set_mutex);                       \
+		list_add(&s->list, &type##_kfunc_btf_set_list);                \
+		mutex_unlock(&type##_kfunc_btf_set_mutex);                     \
+	}                                                                      \
+	EXPORT_SYMBOL_GPL(register_##type##_kfunc_btf_set);                    \
+	bool __bpf_check_##type##_kfunc_call(u32 kfunc_id)                     \
+	{                                                                      \
+		struct kfunc_btf_set *s;                                       \
+		mutex_lock(&type##_kfunc_btf_set_mutex);                       \
+		list_for_each_entry(s, &type##_kfunc_btf_set_list, list) {     \
+			if (btf_id_set_contains(s->set, kfunc_id)) {           \
+				mutex_unlock(&type##_kfunc_btf_set_mutex);     \
+				return true;                                   \
+			}                                                      \
+		}                                                              \
+		mutex_unlock(&type##_kfunc_btf_set_mutex);                     \
+		return false;                                                  \
+	}                                                                      \
+	void unregister_##type##_kfunc_btf_set(struct kfunc_btf_set *s)        \
+	{                                                                      \
+		if (!s)                                                        \
+			return;                                                \
+		mutex_lock(&type##_kfunc_btf_set_mutex);                       \
+		list_del_init(&s->list);                                       \
+		mutex_unlock(&type##_kfunc_btf_set_mutex);                     \
+	}                                                                      \
+	EXPORT_SYMBOL_GPL(unregister_##type##_kfunc_btf_set)
-- 
2.33.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ