[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200302143154.258569-10-jolsa@kernel.org>
Date: Mon, 2 Mar 2020 15:31:48 +0100
From: Jiri Olsa <jolsa@...nel.org>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: Song Liu <songliubraving@...com>, netdev@...r.kernel.org,
bpf@...r.kernel.org, Andrii Nakryiko <andriin@...com>,
Yonghong Song <yhs@...com>, Martin KaFai Lau <kafai@...com>,
Jakub Kicinski <kuba@...nel.org>,
David Miller <davem@...hat.com>,
Björn Töpel <bjorn.topel@...el.com>,
John Fastabend <john.fastabend@...il.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Song Liu <song@...nel.org>
Subject: [PATCH 09/15] bpf: Add bpf_ksym_add/del functions
Adding bpf_ksym_add/del functions as locked version
for __bpf_ksym_add/del. It will be used in following
patches for bpf_trampoline and bpf_dispatcher.
Acked-by: Song Liu <songliubraving@...com>
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
include/linux/bpf.h | 3 +++
kernel/bpf/core.c | 14 ++++++++++++++
2 files changed, 17 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5d6649cdc3df..76934893bccf 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -573,6 +573,9 @@ struct bpf_image {
#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
bool is_bpf_image_address(unsigned long address);
void *bpf_image_alloc(void);
+/* Called only from code, so there's no need for stubs. */
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 387e1bac3a45..46ef6f66aab4 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -652,6 +652,13 @@ static void __bpf_ksym_add(struct bpf_ksym *ksym)
latch_tree_insert(&ksym->tnode, &bpf_ksym_tree, &bpf_ksym_tree_ops);
}
+void bpf_ksym_add(struct bpf_ksym *ksym)
+{
+ spin_lock_bh(&bpf_lock);
+ __bpf_ksym_add(ksym);
+ spin_unlock_bh(&bpf_lock);
+}
+
static void __bpf_ksym_del(struct bpf_ksym *ksym)
{
if (list_empty(&ksym->lnode))
@@ -661,6 +668,13 @@ static void __bpf_ksym_del(struct bpf_ksym *ksym)
list_del_rcu(&ksym->lnode);
}
+void bpf_ksym_del(struct bpf_ksym *ksym)
+{
+ spin_lock_bh(&bpf_lock);
+ __bpf_ksym_del(ksym);
+ spin_unlock_bh(&bpf_lock);
+}
+
static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
{
return fp->jited && !bpf_prog_was_classic(fp);
--
2.24.1
Powered by blists - more mailing lists