[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200302143154.258569-11-jolsa@kernel.org>
Date: Mon, 2 Mar 2020 15:31:49 +0100
From: Jiri Olsa <jolsa@...nel.org>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: Song Liu <songliubraving@...com>, netdev@...r.kernel.org,
bpf@...r.kernel.org, Andrii Nakryiko <andriin@...com>,
Yonghong Song <yhs@...com>, Martin KaFai Lau <kafai@...com>,
Jakub Kicinski <kuba@...nel.org>,
David Miller <davem@...hat.com>,
Björn Töpel <bjorn.topel@...el.com>,
John Fastabend <john.fastabend@...il.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Song Liu <song@...nel.org>
Subject: [PATCH 10/15] bpf: Rename bpf_tree to bpf_progs_tree
Renaming bpf_tree to bpf_progs_tree and bpf_tree_ops
to bpf_progs_tree_ops to better capture the usage of
the tree which is for the bpf_prog objects only.
Acked-by: Song Liu <songliubraving@...com>
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
kernel/bpf/core.c | 24 ++++++++++++++----------
1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 46ef6f66aab4..ea919672e42e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -581,13 +581,14 @@ bpf_get_prog_addr_start(struct latch_tree_node *n)
return aux->ksym.start;
}
-static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
- struct latch_tree_node *b)
+static __always_inline bool
+bpf_progs_tree_less(struct latch_tree_node *a,
+ struct latch_tree_node *b)
{
return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
}
-static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+static __always_inline int bpf_progs_tree_comp(void *key, struct latch_tree_node *n)
{
unsigned long val = (unsigned long)key;
const struct bpf_prog_aux *aux;
@@ -602,9 +603,9 @@ static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
return 0;
}
-static const struct latch_tree_ops bpf_tree_ops = {
- .less = bpf_tree_less,
- .comp = bpf_tree_comp,
+static const struct latch_tree_ops bpf_progs_tree_ops = {
+ .less = bpf_progs_tree_less,
+ .comp = bpf_progs_tree_comp,
};
static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
@@ -643,7 +644,7 @@ static const struct latch_tree_ops bpf_ksym_tree_ops = {
static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_ksym_tree __cacheline_aligned;
-static struct latch_tree_root bpf_tree __cacheline_aligned;
+static struct latch_tree_root bpf_progs_tree __cacheline_aligned;
static void __bpf_ksym_add(struct bpf_ksym *ksym)
{
@@ -696,7 +697,8 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
bpf_prog_ksym_set_name(fp);
spin_lock_bh(&bpf_lock);
- latch_tree_insert(&fp->aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+ latch_tree_insert(&fp->aux->ksym_tnode, &bpf_progs_tree,
+ &bpf_progs_tree_ops);
__bpf_ksym_add(&fp->aux->ksym);
spin_unlock_bh(&bpf_lock);
}
@@ -707,7 +709,8 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp)
return;
spin_lock_bh(&bpf_lock);
- latch_tree_erase(&fp->aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+ latch_tree_erase(&fp->aux->ksym_tnode, &bpf_progs_tree,
+ &bpf_progs_tree_ops);
__bpf_ksym_del(&fp->aux->ksym);
spin_unlock_bh(&bpf_lock);
}
@@ -716,7 +719,8 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
{
struct latch_tree_node *n;
- n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
+ n = latch_tree_find((void *)addr, &bpf_progs_tree,
+ &bpf_progs_tree_ops);
return n ?
container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
NULL;
--
2.24.1
Powered by blists - more mailing lists