[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250528034712.138701-22-dongml2@chinatelecom.cn>
Date: Wed, 28 May 2025 11:47:08 +0800
From: Menglong Dong <menglong8.dong@...il.com>
To: alexei.starovoitov@...il.com,
rostedt@...dmis.org,
jolsa@...nel.org
Cc: bpf@...r.kernel.org,
Menglong Dong <dongml2@...natelecom.cn>,
linux-kernel@...r.kernel.org
Subject: [PATCH bpf-next 21/25] libbpf: add skip_invalid and attach_tracing for tracing_multi
We add skip_invalid and attach_tracing for tracing_multi for the
selftests.
When we try to attach all the functions in available_filter_functions with
tracing_multi, we can't tell if the target symbol can be attached
successfully, and the attaching will fail. When skip_invalid is set to
true, we will check if it can be attached in libbpf, and skip the invalid
entries.
We will skip the symbols in the following cases:
1. the btf type not exist
2. the btf type is not a function proto
3. the function args count more that 6
4. the return type is struct or union
5. any function args is struct or union
The 5th rule can be a manslaughter, but it's ok for the testings.
"attach_tracing" is used to convert a TRACING prog to TRACING_MULTI. For
example, we can set the attach type to FENTRY_MULTI before we load the
skel. And we can attach the prog with
bpf_program__attach_trace_multi_opts() with "attach_tracing=1". The libbpf
will attach the target btf type of the prog automatically. This is also
used to reuse the selftests of tracing.
(Oh my goodness! What am I doing?)
Signed-off-by: Menglong Dong <dongml2@...natelecom.cn>
---
tools/lib/bpf/libbpf.c | 97 ++++++++++++++++++++++++++++++++++++------
tools/lib/bpf/libbpf.h | 6 ++-
2 files changed, 89 insertions(+), 14 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 4a903102e0c7..911fda3f678c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -10132,7 +10132,8 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd, int t
static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
enum bpf_attach_type attach_type,
- int *btf_obj_fd, int *btf_type_id, bool use_hash)
+ int *btf_obj_fd, int *btf_type_id, bool use_hash,
+ const struct btf **btf)
{
int ret, i, mod_len, err;
const char *fn_name, *mod_name = NULL;
@@ -10156,6 +10157,8 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
if (ret > 0) {
*btf_obj_fd = 0; /* vmlinux BTF */
*btf_type_id = ret;
+ if (btf)
+ *btf = obj->btf_vmlinux;
return 0;
}
if (ret != -ENOENT)
@@ -10183,6 +10186,8 @@ static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
if (ret > 0) {
*btf_obj_fd = mod->fd;
*btf_type_id = ret;
+ if (btf)
+ *btf = mod->btf;
return 0;
}
if (ret == -ENOENT)
@@ -10226,7 +10231,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
} else {
err = find_kernel_btf_id(prog->obj, attach_name,
attach_type, btf_obj_fd,
- btf_type_id, false);
+ btf_type_id, false, NULL);
}
if (err) {
pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %s\n",
@@ -12836,6 +12841,53 @@ static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_
return libbpf_get_error(*link);
}
+static bool is_trace_valid(const struct btf *btf, int btf_type_id, const char *name)
+{
+ const struct btf_type *t;
+
+ t = skip_mods_and_typedefs(btf, btf_type_id, NULL);
+ if (btf_is_func(t)) {
+ const struct btf_param *args;
+ __u32 nargs, m;
+
+ t = skip_mods_and_typedefs(btf, t->type, NULL);
+ if (!btf_is_func_proto(t)) {
+ pr_debug("skipping no function btf type for %s\n",
+ name);
+ return false;
+ }
+
+ args = (const struct btf_param *)(t + 1);
+ nargs = btf_vlen(t);
+ if (nargs > 6) {
+ pr_debug("skipping args count more than 6 for %s\n",
+ name);
+ return false;
+ }
+
+ t = skip_mods_and_typedefs(btf, t->type, NULL);
+ if (btf_is_struct(t) || btf_is_union(t) ||
+ (nargs && args[nargs - 1].type == 0)) {
+ pr_debug("skipping invalid return type for %s\n",
+ name);
+ return false;
+ }
+
+ for (m = 0; m < nargs; m++) {
+ t = skip_mods_and_typedefs(btf, args[m].type, NULL);
+ if (btf_is_struct(t) || btf_is_union(t)) {
+ pr_debug("skipping not supported arg type %s\n",
+ name);
+ break;
+ }
+ }
+ if (m < nargs)
+ return false;
+ }
+
+ return true;
+}
+
struct bpf_link *bpf_program__attach_trace_multi_opts(const struct bpf_program *prog,
const struct bpf_trace_multi_opts *opts)
{
@@ -12856,7 +12908,7 @@ struct bpf_link *bpf_program__attach_trace_multi_opts(const struct bpf_program *
cnt = OPTS_GET(opts, cnt, 0);
if (opts->syms) {
- int btf_obj_fd, btf_type_id, i;
+ int btf_obj_fd, btf_type_id, i, j = 0;
if (opts->btf_ids || opts->tgt_fds) {
pr_warn("can set both opts->syms and opts->btf_ids\n");
@@ -12870,23 +12922,41 @@ struct bpf_link *bpf_program__attach_trace_multi_opts(const struct bpf_program *
goto err_free;
}
for (i = 0; i < cnt; i++) {
+ const struct btf *btf = NULL;
+ bool func_hash;
+
/* only use btf type function hashmap when the count
* is big enough.
*/
- bool func_hash = cnt > 1024;
-
-
+ func_hash = cnt > 1024;
btf_obj_fd = btf_type_id = 0;
err = find_kernel_btf_id(prog->obj, opts->syms[i],
- prog->expected_attach_type, &btf_obj_fd,
- &btf_type_id, func_hash);
- if (err)
- goto err_free;
- btf_ids[i] = btf_type_id;
- tgt_fds[i] = btf_obj_fd;
+ prog->expected_attach_type, &btf_obj_fd,
+ &btf_type_id, func_hash, &btf);
+ if (err) {
+ if (!opts->skip_invalid)
+ goto err_free;
+
+ pr_debug("can't find btf type for %s, skip\n",
+ opts->syms[i]);
+ continue;
+ }
+
+ if (opts->skip_invalid &&
+ !is_trace_valid(btf, btf_type_id, opts->syms[i]))
+ continue;
+
+ btf_ids[j] = btf_type_id;
+ tgt_fds[j] = btf_obj_fd;
+ j++;
}
+ cnt = j;
link_opts.tracing_multi.btf_ids = btf_ids;
link_opts.tracing_multi.tgt_fds = tgt_fds;
+ } else if (opts->attach_tracing) {
+ link_opts.tracing_multi.btf_ids = &prog->attach_btf_id;
+ link_opts.tracing_multi.tgt_fds = &prog->attach_btf_obj_fd;
+ cnt = 1;
} else {
link_opts.tracing_multi.btf_ids = OPTS_GET(opts, btf_ids, 0);
link_opts.tracing_multi.tgt_fds = OPTS_GET(opts, tgt_fds, 0);
@@ -13957,7 +14027,8 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
return libbpf_err(err);
err = find_kernel_btf_id(prog->obj, attach_func_name,
prog->expected_attach_type,
- &btf_obj_fd, &btf_id, false);
+ &btf_obj_fd, &btf_id, false,
+ NULL);
if (err)
return libbpf_err(err);
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index d7f0db7ab586..c087525ad25a 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -846,8 +846,12 @@ struct bpf_trace_multi_opts {
__u64 *cookies;
/* number of elements in syms/btf_ids/cookies arrays */
size_t cnt;
+ /* skip the invalid btf type before attaching */
+ bool skip_invalid;
+ /* attach a TRACING prog as TRACING_MULTI */
+ bool attach_tracing;
};
-#define bpf_trace_multi_opts__last_field cnt
+#define bpf_trace_multi_opts__last_field attach_tracing
LIBBPF_API struct bpf_link *
bpf_program__attach_trace_multi_opts(const struct bpf_program *prog,
--
2.39.5
Powered by blists - more mailing lists