[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c73f939de0f6af022e009dda057b6f941c6fce59.1744169424.git.dxu@dxuuu.xyz>
Date: Tue, 8 Apr 2025 21:34:01 -0600
From: Daniel Xu <dxu@...uu.xyz>
To: andrii@...nel.org,
ast@...nel.org,
daniel@...earbox.net
Cc: john.fastabend@...il.com,
martin.lau@...ux.dev,
eddyz87@...il.com,
song@...nel.org,
yonghong.song@...ux.dev,
kpsingh@...nel.org,
sdf@...ichev.me,
haoluo@...gle.com,
jolsa@...nel.org,
bpf@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [RFC bpf-next 06/13] bpf: Move kfunc definitions out of verifier.c
Multiple files reference bpf_get_kfunc_addr(). But in order to move the
definition out into core, we need to drag along the various struct
definitions.
Doing this also makes moving bpf_free_kfunc_btf_tab() in the next commit
simpler as well.
Signed-off-by: Daniel Xu <dxu@...uu.xyz>
---
include/linux/bpf.h | 42 ++++++++++++++++++++++++++
kernel/bpf/core.c | 29 ++++++++++++++++++
kernel/bpf/verifier.c | 68 -------------------------------------------
3 files changed, 71 insertions(+), 68 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 1785901330b2..44133727820d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1470,6 +1470,38 @@ static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
}
#endif
+#define MAX_KFUNC_DESCS 256
+#define MAX_KFUNC_BTFS 256
+
+struct bpf_kfunc_desc {
+ struct btf_func_model func_model;
+ u32 func_id;
+ s32 imm;
+ u16 offset;
+ unsigned long addr;
+};
+
+struct bpf_kfunc_btf {
+ struct btf *btf;
+ struct module *module;
+ u16 offset;
+};
+
+struct bpf_kfunc_desc_tab {
+ /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
+ * verification. JITs do lookups by bpf_insn, where func_id may not be
+ * available, therefore at the end of verification do_misc_fixups()
+ * sorts this by imm and offset.
+ */
+ struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
+ u32 nr_descs;
+};
+
+struct bpf_kfunc_btf_tab {
+ struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
+ u32 nr_descs;
+};
+
struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
@@ -2755,6 +2787,16 @@ bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
const struct bpf_insn *insn);
+static inline int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
+{
+ const struct bpf_kfunc_desc *d0 = a;
+ const struct bpf_kfunc_desc *d1 = b;
+
+ /* func_id is not greater than BTF_MAX_TYPE */
+ return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
+}
+const struct bpf_kfunc_desc *
+find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset);
int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
u16 btf_fd_idx, u8 **func_addr);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index aaf0841140c0..8cbfe7d33c0a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1624,6 +1624,35 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+const struct bpf_kfunc_desc *
+find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
+{
+ struct bpf_kfunc_desc desc = {
+ .func_id = func_id,
+ .offset = offset,
+ };
+ struct bpf_kfunc_desc_tab *tab;
+
+ tab = prog->aux->kfunc_tab;
+ return bsearch(&desc, tab->descs, tab->nr_descs,
+ sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
+}
+EXPORT_SYMBOL_GPL(find_kfunc_desc);
+
+int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr)
+{
+ const struct bpf_kfunc_desc *desc;
+
+ desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
+ if (!desc)
+ return -EFAULT;
+
+ *func_addr = (u8 *)desc->addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bpf_get_kfunc_addr);
+
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d3bb65f721c9..161280f3371f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2957,47 +2957,6 @@ static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env)
return ret;
}
-#define MAX_KFUNC_DESCS 256
-#define MAX_KFUNC_BTFS 256
-
-struct bpf_kfunc_desc {
- struct btf_func_model func_model;
- u32 func_id;
- s32 imm;
- u16 offset;
- unsigned long addr;
-};
-
-struct bpf_kfunc_btf {
- struct btf *btf;
- struct module *module;
- u16 offset;
-};
-
-struct bpf_kfunc_desc_tab {
- /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
- * verification. JITs do lookups by bpf_insn, where func_id may not be
- * available, therefore at the end of verification do_misc_fixups()
- * sorts this by imm and offset.
- */
- struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
- u32 nr_descs;
-};
-
-struct bpf_kfunc_btf_tab {
- struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
- u32 nr_descs;
-};
-
-static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
-{
- const struct bpf_kfunc_desc *d0 = a;
- const struct bpf_kfunc_desc *d1 = b;
-
- /* func_id is not greater than BTF_MAX_TYPE */
- return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
-}
-
static int kfunc_btf_cmp_by_off(const void *a, const void *b)
{
const struct bpf_kfunc_btf *d0 = a;
@@ -3006,33 +2965,6 @@ static int kfunc_btf_cmp_by_off(const void *a, const void *b)
return d0->offset - d1->offset;
}
-static const struct bpf_kfunc_desc *
-find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
-{
- struct bpf_kfunc_desc desc = {
- .func_id = func_id,
- .offset = offset,
- };
- struct bpf_kfunc_desc_tab *tab;
-
- tab = prog->aux->kfunc_tab;
- return bsearch(&desc, tab->descs, tab->nr_descs,
- sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
-}
-
-int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
- u16 btf_fd_idx, u8 **func_addr)
-{
- const struct bpf_kfunc_desc *desc;
-
- desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
- if (!desc)
- return -EFAULT;
-
- *func_addr = (u8 *)desc->addr;
- return 0;
-}
-
static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
s16 offset)
{
--
2.47.1
Powered by blists - more mailing lists