[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211124084119.260239-7-jolsa@kernel.org>
Date: Wed, 24 Nov 2021 09:41:17 +0100
From: Jiri Olsa <jolsa@...hat.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Masami Hiramatsu <mhiramat@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
lkml <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Martin KaFai Lau <kafai@...com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...omium.org>,
Ravi Bangoria <ravi.bangoria@....com>
Subject: [PATCH 6/8] libbpf: Add support for k[ret]probe.multi program section
Adding new sections kprobe.multi/kretprobe.multi for multi
kprobe programs.
It's now possible to define kprobe/kretprobe program like:
SEC("kprobe.multi/bpf_fentry_test*")
and it will be automatically attached to bpf_fentry_test*
functions.
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
tools/lib/bpf/libbpf.c | 105 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b570e93de735..c1feb5f389a0 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -8348,6 +8348,7 @@ int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
}
static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie);
+static struct bpf_link *attach_kprobe_multi(const struct bpf_program *prog, long cookie);
static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie);
static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie);
static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie);
@@ -8362,6 +8363,8 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
+ SEC_DEF("kprobe.multi/", KPROBE, 0, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("kretprobe.multi/", KPROBE, 0, SEC_NONE, attach_kprobe_multi),
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX),
SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
@@ -9918,6 +9921,108 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cooki
return link;
}
+struct kprobe_resolve_multi {
+ const char *name;
+ char **funcs;
+ __u32 alloc;
+ __u32 cnt;
+};
+
+static bool glob_matches(const char *glob, const char *s)
+{
+ int n = strlen(glob);
+
+ if (n == 1 && glob[0] == '*')
+ return true;
+
+ if (glob[0] == '*' && glob[n - 1] == '*') {
+ const char *subs;
+ /* substring match */
+
+ /* this is hacky, but we don't want to allocate
+ * for no good reason
+ */
+ ((char *)glob)[n - 1] = '\0';
+ subs = strstr(s, glob + 1);
+ ((char *)glob)[n - 1] = '*';
+
+ return subs != NULL;
+ } else if (glob[0] == '*') {
+ size_t nn = strlen(s);
+ /* suffix match */
+
+ /* too short for a given suffix */
+ if (nn < n - 1)
+ return false;
+ return strcmp(s + nn - (n - 1), glob + 1) == 0;
+ } else if (glob[n - 1] == '*') {
+ /* prefix match */
+ return strncmp(s, glob, n - 1) == 0;
+ } else {
+ /* exact match */
+ return strcmp(glob, s) == 0;
+ }
+}
+
+static int kprobe_resolve_multi_cb(void *arg, unsigned long long sym_addr,
+ char sym_type, const char *sym_name)
+{
+ struct kprobe_resolve_multi *res = arg;
+ char **p, *sym;
+
+ if (!glob_matches(res->name, sym_name))
+ return 0;
+
+ if (res->cnt == res->alloc) {
+ res->alloc = max((__u32) 16, res->alloc * 3 / 2);
+ p = libbpf_reallocarray(res->funcs, res->alloc, sizeof(__u32));
+ if (!p)
+ return -ENOMEM;
+ res->funcs = p;
+ }
+ sym = strdup(sym_name);
+ if (!sym)
+ return -ENOMEM;
+ res->funcs[res->cnt++] = sym;
+ return 0;
+}
+
+static void free_str_array(char **func, __u32 cnt)
+{
+ __u32 i;
+
+ for (i = 0; i < cnt; i++)
+ free(func[i]);
+ free(func);
+}
+
+static struct bpf_link *attach_kprobe_multi(const struct bpf_program *prog, long cookie)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
+ struct kprobe_resolve_multi res = { };
+ struct bpf_link *link;
+ int err;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
+ if (opts.retprobe)
+ res.name = prog->sec_name + sizeof("kretprobe.multi/") - 1;
+ else
+ res.name = prog->sec_name + sizeof("kprobe.multi/") - 1;
+
+ err = libbpf__kallsyms_parse(&res, kprobe_resolve_multi_cb);
+ if (err) {
+ free_str_array(res.funcs, res.cnt);
+ return libbpf_err_ptr(err);
+ }
+ if (!res.cnt)
+ return libbpf_err_ptr(-ENOENT);
+ opts.multi.cnt = res.cnt;
+ opts.multi.funcs = res.funcs;
+ link = bpf_program__attach_kprobe_opts(prog, NULL, &opts);
+ free_str_array(res.funcs, res.cnt);
+ return link;
+}
+
static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
const char *binary_path, uint64_t offset)
{
--
2.33.1
Powered by blists - more mailing lists