[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220328175033.2437312-9-roberto.sassu@huawei.com>
Date: Mon, 28 Mar 2022 19:50:23 +0200
From: Roberto Sassu <roberto.sassu@...wei.com>
To: <corbet@....net>, <viro@...iv.linux.org.uk>, <ast@...nel.org>,
<daniel@...earbox.net>, <andrii@...nel.org>, <kpsingh@...nel.org>,
<shuah@...nel.org>, <mcoquelin.stm32@...il.com>,
<alexandre.torgue@...s.st.com>, <zohar@...ux.ibm.com>
CC: <linux-doc@...r.kernel.org>, <linux-fsdevel@...r.kernel.org>,
<netdev@...r.kernel.org>, <bpf@...r.kernel.org>,
<linux-kselftest@...r.kernel.org>,
<linux-stm32@...md-mailman.stormreply.com>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-integrity@...r.kernel.org>,
<linux-security-module@...r.kernel.org>,
<linux-kernel@...r.kernel.org>,
Roberto Sassu <roberto.sassu@...wei.com>
Subject: [PATCH 08/18] bpf-preload: Generate load_skel()
Generate load_skel() to load and attach the eBPF program, and to retrieve
the objects to be pinned.
Signed-off-by: Roberto Sassu <roberto.sassu@...wei.com>
---
kernel/bpf/preload/bpf_preload_kern.c | 36 -----------
.../bpf/preload/iterators/iterators.lskel.h | 42 ++++++++++++
tools/bpf/bpftool/gen.c | 64 +++++++++++++++++++
3 files changed, 106 insertions(+), 36 deletions(-)
diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
index 0869c889255c..35e9abd1a668 100644
--- a/kernel/bpf/preload/bpf_preload_kern.c
+++ b/kernel/bpf/preload/bpf_preload_kern.c
@@ -10,42 +10,6 @@ static struct bpf_preload_ops ops = {
.owner = THIS_MODULE,
};
-static int load_skel(void)
-{
- int err;
-
- skel = iterators_bpf__open();
- if (!skel)
- return -ENOMEM;
- err = iterators_bpf__load(skel);
- if (err)
- goto out;
- err = iterators_bpf__attach(skel);
- if (err)
- goto out;
- dump_bpf_map_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
- if (IS_ERR(dump_bpf_map_link)) {
- err = PTR_ERR(dump_bpf_map_link);
- goto out;
- }
- dump_bpf_prog_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
- if (IS_ERR(dump_bpf_prog_link)) {
- err = PTR_ERR(dump_bpf_prog_link);
- goto out;
- }
- /* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
- * makes skel_closenz() a no-op later in iterators_bpf__destroy().
- */
- close_fd(skel->links.dump_bpf_map_fd);
- skel->links.dump_bpf_map_fd = 0;
- close_fd(skel->links.dump_bpf_prog_fd);
- skel->links.dump_bpf_prog_fd = 0;
- return 0;
-out:
- free_objs_and_skel();
- return err;
-}
-
static int __init load(void)
{
int err;
diff --git a/kernel/bpf/preload/iterators/iterators.lskel.h b/kernel/bpf/preload/iterators/iterators.lskel.h
index 75b2e94b7547..6faf3708be01 100644
--- a/kernel/bpf/preload/iterators/iterators.lskel.h
+++ b/kernel/bpf/preload/iterators/iterators.lskel.h
@@ -474,4 +474,46 @@ static int preload(struct dentry *parent)
return err;
}
+static int load_skel(void)
+{
+ int err;
+
+ skel = iterators_bpf__open();
+ if (!skel)
+ return -ENOMEM;
+
+ err = iterators_bpf__load(skel);
+ if (err)
+ goto out;
+
+ err = iterators_bpf__attach(skel);
+ if (err)
+ goto out;
+
+ dump_bpf_map_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
+ if (IS_ERR(dump_bpf_map_link)) {
+ err = PTR_ERR(dump_bpf_map_link);
+ goto out;
+ }
+
+ dump_bpf_prog_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
+ if (IS_ERR(dump_bpf_prog_link)) {
+ err = PTR_ERR(dump_bpf_prog_link);
+ goto out;
+ }
+
+ /* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
+ * makes skel_closenz() a no-op later in iterators_bpf__destroy().
+ */
+ close_fd(skel->links.dump_bpf_map_fd);
+ skel->links.dump_bpf_map_fd = 0;
+ close_fd(skel->links.dump_bpf_prog_fd);
+ skel->links.dump_bpf_prog_fd = 0;
+
+ return 0;
+out:
+ free_objs_and_skel();
+ return err;
+}
+
#endif /* __ITERATORS_BPF_SKEL_H__ */
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index fa2c6022b80d..ad948f1c90b5 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -764,6 +764,69 @@ static void codegen_preload(struct bpf_object *obj, const char *obj_name)
");
}
+static void codegen_preload_load(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+
+ codegen("\
+ \n\
+ \n\
+ static int load_skel(void) \n\
+ { \n\
+ int err; \n\
+ \n\
+ skel = %1$s__open(); \n\
+ if (!skel) \n\
+ return -ENOMEM; \n\
+ \n\
+ err = %1$s__load(skel); \n\
+ if (err) \n\
+ goto out; \n\
+ \n\
+ err = %1$s__attach(skel); \n\
+ if (err) \n\
+ goto out; \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ \n\
+ %1$s_link = bpf_link_get_from_fd(skel->links.%1$s_fd); \n\
+ if (IS_ERR(%1$s_link)) { \n\
+ err = PTR_ERR(%1$s_link); \n\
+ goto out; \n\
+ } \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ \n\
+ /* Avoid taking over stdin/stdout/stderr of init process. Zeroing out \n\
+ * makes skel_closenz() a no-op later in iterators_bpf__destroy(). \n\
+ */ \n\
+ ");
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ close_fd(skel->links.%1$s_fd); \n\
+ skel->links.%1$s_fd = 0; \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ \n\
+ return 0; \n\
+ out: \n\
+ free_objs_and_skel(); \n\
+ return err; \n\
+ } \n\
+ ");
+}
+
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
{
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
@@ -916,6 +979,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
codegen_preload_vars(obj, obj_name);
codegen_preload_free(obj, obj_name);
codegen_preload(obj, obj_name);
+ codegen_preload_load(obj, obj_name);
}
codegen("\
--
2.32.0
Powered by blists - more mailing lists