lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250528034712.138701-5-dongml2@chinatelecom.cn>
Date: Wed, 28 May 2025 11:46:51 +0800
From: Menglong Dong <menglong8.dong@...il.com>
To: alexei.starovoitov@...il.com,
	rostedt@...dmis.org,
	jolsa@...nel.org
Cc: bpf@...r.kernel.org,
	Menglong Dong <dongml2@...natelecom.cn>,
	linux-kernel@...r.kernel.org
Subject: [PATCH bpf-next 04/25] bpf: make kfunc_md support global trampoline link

Introduce the struct kfunc_md_tramp_prog for BPF_PROG_TYPE_TRACING, and
add the field "bpf_progs" to struct kfunc_md. These filed will be used
in the next patch of bpf global trampoline.

And the KFUNC_MD_FL_TRACING_ORIGIN is introduced to indicate that origin
call is needed on this function.

Add the function kfunc_md_bpf_link and kfunc_md_bpf_unlink to add or
remove bpf prog to kfunc_md. Meanwhile, introduce kunfc_md_bpf_ips() to
get all the kernel functions in kfunc_mds that contains bpf progs.

The KFUNC_MD_FL_BPF_REMOVING indicate that a removing operation is in
progress, and we shouldn't return it if "bpf_prog_cnt<=1" in
kunfc_md_bpf_ips().

Signed-off-by: Menglong Dong <dongml2@...natelecom.cn>
---
 include/linux/kfunc_md.h |  17 ++++++
 kernel/trace/kfunc_md.c  | 118 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 135 insertions(+)

diff --git a/include/linux/kfunc_md.h b/include/linux/kfunc_md.h
index 21c0b879cc03..f1b1012eeab2 100644
--- a/include/linux/kfunc_md.h
+++ b/include/linux/kfunc_md.h
@@ -3,12 +3,21 @@
 #define _LINUX_KFUNC_MD_H
 
 #define KFUNC_MD_FL_DEAD		(1 << 0) /* the md shouldn't be reused */
+#define KFUNC_MD_FL_TRACING_ORIGIN	(1 << 1)
+#define KFUNC_MD_FL_BPF_REMOVING	(1 << 2)
 
 #ifndef __ASSEMBLER__
 
 #include <linux/kernel.h>
 #include <linux/bpf.h>
 
+struct kfunc_md_tramp_prog {
+	struct kfunc_md_tramp_prog *next;
+	struct bpf_prog *prog;
+	u64 cookie;
+	struct rcu_head rcu;
+};
+
 struct kfunc_md_array;
 
 struct kfunc_md {
@@ -19,6 +28,7 @@ struct kfunc_md {
 	struct rcu_head rcu;
 #endif
 	unsigned long func;
+	struct kfunc_md_tramp_prog *bpf_progs[BPF_TRAMP_MAX];
 #ifdef CONFIG_FUNCTION_METADATA
 	/* the array is used for the fast mode */
 	struct kfunc_md_array *array;
@@ -26,6 +36,7 @@ struct kfunc_md {
 	struct percpu_ref pcref;
 	u32 flags;
 	u16 users;
+	u8 bpf_prog_cnt;
 	u8 nr_args;
 };
 
@@ -40,5 +51,11 @@ void kfunc_md_exit(struct kfunc_md *md);
 void kfunc_md_enter(struct kfunc_md *md);
 bool kfunc_md_arch_support(int *insn, int *data);
 
+int kfunc_md_bpf_ips(void ***ips);
+
+int kfunc_md_bpf_unlink(struct kfunc_md *md, struct bpf_prog *prog, int type);
+int kfunc_md_bpf_link(struct kfunc_md *md, struct bpf_prog *prog, int type,
+		      u64 cookie);
+
 #endif
 #endif
diff --git a/kernel/trace/kfunc_md.c b/kernel/trace/kfunc_md.c
index 9571081f6560..ebb4e46d482d 100644
--- a/kernel/trace/kfunc_md.c
+++ b/kernel/trace/kfunc_md.c
@@ -131,6 +131,23 @@ static bool kfunc_md_fast(void)
 {
 	return static_branch_likely(&kfunc_md_use_padding);
 }
+
+static int kfunc_md_hash_bpf_ips(void **ips)
+{
+	struct hlist_head *head;
+	struct kfunc_md *md;
+	int c = 0, i;
+
+	for (i = 0; i < (1 << KFUNC_MD_HASH_BITS); i++) {
+		head = &kfunc_md_table[i];
+		hlist_for_each_entry(md, head, hash) {
+			if (md->bpf_prog_cnt > !!(md->flags & KFUNC_MD_FL_BPF_REMOVING))
+				ips[c++] = (void *)md->func;
+		}
+	}
+
+	return c;
+}
 #else
 
 static void kfunc_md_hash_put(struct kfunc_md *md)
@@ -148,6 +165,11 @@ static struct kfunc_md *kfunc_md_hash_create(unsigned long ip, int nr_args)
 }
 
 #define kfunc_md_fast() 1
+
+static int kfunc_md_hash_bpf_ips(void **ips)
+{
+	return 0;
+}
 #endif /* CONFIG_FUNCTION_METADATA_PADDING */
 
 #ifdef CONFIG_FUNCTION_METADATA
@@ -442,6 +464,19 @@ static struct kfunc_md *kfunc_md_fast_create(unsigned long ip, int nr_args)
 
 	return md;
 }
+
+static int kfunc_md_fast_bpf_ips(void **ips)
+{
+	struct kfunc_md *md;
+	int i, c = 0;
+
+	for (i = 0; i < kfunc_mds->kfunc_md_count; i++) {
+		md = &kfunc_mds->mds[i];
+		if (md->users && md->bpf_prog_cnt > !!(md->flags & KFUNC_MD_FL_BPF_REMOVING))
+			ips[c++] = (void *)md->func;
+	}
+	return c;
+}
 #else
 
 static void kfunc_md_fast_put(struct kfunc_md *md)
@@ -458,6 +493,10 @@ static struct kfunc_md *kfunc_md_fast_create(unsigned long ip, int nr_args)
 	return NULL;
 }
 
+static int kfunc_md_fast_bpf_ips(void **ips)
+{
+	return 0;
+}
 #endif /* !CONFIG_FUNCTION_METADATA */
 
 void kfunc_md_enter(struct kfunc_md *md)
@@ -547,6 +586,85 @@ struct kfunc_md *kfunc_md_create(unsigned long ip, int nr_args)
 }
 EXPORT_SYMBOL_GPL(kfunc_md_create);
 
+int kfunc_md_bpf_ips(void ***ips)
+{
+	void **tmp;
+	int c;
+
+	c = atomic_read(&kfunc_mds->kfunc_md_used);
+	if (!c)
+		return 0;
+
+	tmp = kmalloc_array(c, sizeof(*tmp), GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	rcu_read_lock();
+	c = CALL(kfunc_md_fast_bpf_ips, kfunc_md_hash_bpf_ips, int, tmp);
+	rcu_read_unlock();
+
+	*ips = tmp;
+
+	return c;
+}
+
+int kfunc_md_bpf_link(struct kfunc_md *md, struct bpf_prog *prog, int type,
+		      u64 cookie)
+{
+	struct kfunc_md_tramp_prog *tramp_prog, **last;
+
+	tramp_prog = md->bpf_progs[type];
+	/* check if the prog is already linked */
+	while (tramp_prog) {
+		if (tramp_prog->prog == prog)
+			return -EEXIST;
+		tramp_prog = tramp_prog->next;
+	}
+
+	tramp_prog = kmalloc(sizeof(*tramp_prog), GFP_KERNEL);
+	if (!tramp_prog)
+		return -ENOMEM;
+
+	tramp_prog->prog = prog;
+	tramp_prog->cookie = cookie;
+	tramp_prog->next = NULL;
+
+	/* add the new prog to the list tail */
+	last = &md->bpf_progs[type];
+	while (*last)
+		last = &(*last)->next;
+	*last = tramp_prog;
+
+	md->bpf_prog_cnt++;
+	if (type == BPF_TRAMP_FEXIT || type == BPF_TRAMP_MODIFY_RETURN)
+		md->flags |= KFUNC_MD_FL_TRACING_ORIGIN;
+
+	return 0;
+}
+
+int kfunc_md_bpf_unlink(struct kfunc_md *md, struct bpf_prog *prog, int type)
+{
+	struct kfunc_md_tramp_prog *cur, **prev;
+
+	prev = &md->bpf_progs[type];
+	while (*prev && (*prev)->prog != prog)
+		prev = &(*prev)->next;
+
+	cur = *prev;
+	if (!cur)
+		return -EINVAL;
+
+	*prev = cur->next;
+	kfree_rcu(cur, rcu);
+	md->bpf_prog_cnt--;
+
+	if (!md->bpf_progs[BPF_TRAMP_FEXIT] &&
+	    !md->bpf_progs[BPF_TRAMP_MODIFY_RETURN])
+		md->flags &= ~KFUNC_MD_FL_TRACING_ORIGIN;
+
+	return 0;
+}
+
 bool __weak kfunc_md_arch_support(int *insn, int *data)
 {
 	return false;
-- 
2.39.5


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ