[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200312195610.346362-9-jolsa@kernel.org>
Date: Thu, 12 Mar 2020 20:56:03 +0100
From: Jiri Olsa <jolsa@...nel.org>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
Andrii Nakryiko <andriin@...com>, Yonghong Song <yhs@...com>,
Song Liu <songliubraving@...com>,
Martin KaFai Lau <kafai@...com>,
Jakub Kicinski <kuba@...nel.org>,
David Miller <davem@...hat.com>,
Björn Töpel <bjorn.topel@...el.com>,
John Fastabend <john.fastabend@...il.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Song Liu <song@...nel.org>
Subject: [PATCH 08/15] bpf: Add prog flag to struct bpf_ksym object
Adding 'prog' bool flag to 'struct bpf_ksym' to mark that
this object belongs to bpf_prog object.
This change allows having bpf_prog objects together with
other types (trampolines and dispatchers) in the single
bpf_tree. It's used when searching for bpf_prog exception
tables by the bpf_prog_ksym_find function, where we need
to get the bpf_prog pointer.
>From now we can safely add bpf_ksym support for trampoline
or dispatcher objects, because we can differentiate them
from bpf_prog objects.
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
include/linux/bpf.h | 1 +
kernel/bpf/core.c | 22 +++++++++++-----------
2 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c5afff03b0f4..739ec3f562bf 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -478,6 +478,7 @@ struct bpf_ksym {
char name[KSYM_NAME_LEN];
struct list_head lnode;
struct latch_tree_node tnode;
+ bool prog;
};
enum bpf_tramp_prog_type {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6b58c2f1c9c0..92577a81122a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -642,6 +642,7 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
bpf_prog_ksym_set_addr(fp);
bpf_prog_ksym_set_name(fp);
+ fp->aux->ksym.prog = true;
spin_lock_bh(&bpf_lock);
bpf_prog_ksym_node_add(fp->aux);
@@ -658,16 +659,6 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp)
spin_unlock_bh(&bpf_lock);
}
-static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
-{
- struct latch_tree_node *n;
-
- n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
- return n ?
- container_of(n, struct bpf_prog_aux, ksym.tnode)->prog :
- NULL;
-}
-
static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
{
struct latch_tree_node *n;
@@ -712,13 +703,22 @@ bool is_bpf_text_address(unsigned long addr)
return ret;
}
+static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ struct bpf_ksym *ksym = bpf_ksym_find(addr);
+
+ return ksym && ksym->prog ?
+ container_of(ksym, struct bpf_prog_aux, ksym)->prog :
+ NULL;
+}
+
const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{
const struct exception_table_entry *e = NULL;
struct bpf_prog *prog;
rcu_read_lock();
- prog = bpf_prog_kallsyms_find(addr);
+ prog = bpf_prog_ksym_find(addr);
if (!prog)
goto out;
if (!prog->aux->num_exentries)
--
2.24.1
Powered by blists - more mailing lists