[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210929235910.1765396-5-jevburton.kernel@gmail.com>
Date: Wed, 29 Sep 2021 23:59:01 +0000
From: Joe Burton <jevburton.kernel@...il.com>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <kafai@...com>
Cc: Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
Petar Penkov <ppenkov@...gle.com>,
Stanislav Fomichev <sdf@...gle.com>,
Hao Luo <haoluo@...gle.com>, netdev@...r.kernel.org,
bpf@...r.kernel.org, Joe Burton <jevburton@...gle.com>
Subject: [RFC PATCH v2 04/13] bpf: Define a few bpf_link_ops for BPF_TRACE_MAP
From: Joe Burton <jevburton@...gle.com>
Define release, dealloc, and update_prog for the new tracing programs.
Updates are protected by a single global mutex.
Signed-off-by: Joe Burton <jevburton@...gle.com>
---
kernel/bpf/map_trace.c | 71 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/kernel/bpf/map_trace.c b/kernel/bpf/map_trace.c
index 7776b8ccfe88..35906d59ba3c 100644
--- a/kernel/bpf/map_trace.c
+++ b/kernel/bpf/map_trace.c
@@ -14,6 +14,14 @@ struct bpf_map_trace_target_info {
static struct list_head targets = LIST_HEAD_INIT(targets);
static DEFINE_MUTEX(targets_mutex);
+struct bpf_map_trace_link {
+ struct bpf_link link;
+ struct bpf_map *map;
+ struct bpf_map_trace_target_info *tinfo;
+};
+
+static DEFINE_MUTEX(link_mutex);
+
int bpf_map_trace_reg_target(const struct bpf_map_trace_reg *reg_info)
{
struct bpf_map_trace_target_info *tinfo;
@@ -77,3 +85,66 @@ int bpf_map_initialize_trace_progs(struct bpf_map *map)
return 0;
}
+static void bpf_map_trace_link_release(struct bpf_link *link)
+{
+ struct bpf_map_trace_link *map_trace_link =
+ container_of(link, struct bpf_map_trace_link, link);
+ enum bpf_map_trace_type trace_type =
+ map_trace_link->tinfo->reg_info->trace_type;
+ struct bpf_map_trace_prog *cur_prog;
+ struct bpf_map_trace_progs *progs;
+
+ progs = map_trace_link->map->trace_progs;
+ mutex_lock(&progs->mutex);
+ list_for_each_entry(cur_prog, &progs->progs[trace_type].list, list) {
+ if (cur_prog->prog == link->prog) {
+ progs->length[trace_type] -= 1;
+ list_del_rcu(&cur_prog->list);
+ kfree_rcu(cur_prog, rcu);
+ }
+ }
+ mutex_unlock(&progs->mutex);
+ bpf_map_put_with_uref(map_trace_link->map);
+}
+
+static void bpf_map_trace_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_map_trace_link *map_trace_link =
+ container_of(link, struct bpf_map_trace_link, link);
+
+ kfree(map_trace_link);
+}
+
+static int bpf_map_trace_link_replace(struct bpf_link *link,
+ struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ int ret = 0;
+
+ mutex_lock(&link_mutex);
+ if (old_prog && link->prog != old_prog) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (link->prog->type != new_prog->type ||
+ link->prog->expected_attach_type != new_prog->expected_attach_type ||
+ link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ old_prog = xchg(&link->prog, new_prog);
+ bpf_prog_put(old_prog);
+
+out_unlock:
+ mutex_unlock(&link_mutex);
+ return ret;
+}
+
+static const struct bpf_link_ops bpf_map_trace_link_ops = {
+ .release = bpf_map_trace_link_release,
+ .dealloc = bpf_map_trace_link_dealloc,
+ .update_prog = bpf_map_trace_link_replace,
+};
+
--
2.33.0.685.g46640cef36-goog
Powered by blists - more mailing lists