lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191229143740.29143-6-jolsa@kernel.org>
Date:   Sun, 29 Dec 2019 15:37:40 +0100
From:   Jiri Olsa <jolsa@...nel.org>
To:     Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>
Cc:     netdev@...r.kernel.org, bpf@...r.kernel.org,
        Andrii Nakryiko <andriin@...com>, Yonghong Song <yhs@...com>,
        Martin KaFai Lau <kafai@...com>,
        Jakub Kicinski <jakub.kicinski@...ronome.com>,
        David Miller <davem@...hat.com>
Subject: [PATCH 5/5] bpf: Allow to resolve bpf trampoline in unwind

When unwinding the stack we need to identify each
address to successfully continue. Adding latch tree
to keep trampolines for quick lookup during the
unwind.

Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
 include/linux/bpf.h     |  6 ++++++
 kernel/bpf/core.c       |  2 ++
 kernel/bpf/trampoline.c | 35 +++++++++++++++++++++++++++++++++++
 3 files changed, 43 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b14e51d56a82..66825c821ac9 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -470,6 +470,7 @@ struct bpf_trampoline {
 	/* Executable image of trampoline */
 	void *image;
 	u64 selector;
+	struct latch_tree_node tnode;
 };
 
 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
@@ -502,6 +503,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
 int bpf_trampoline_link_prog(struct bpf_prog *prog);
 int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
 void bpf_trampoline_put(struct bpf_trampoline *tr);
+bool is_bpf_trampoline(void *addr);
 void *bpf_jit_alloc_exec_page(void);
 #define BPF_DISPATCHER_INIT(name) {			\
 	.mutex = __MUTEX_INITIALIZER(name.mutex),	\
@@ -555,6 +557,10 @@ static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
 					      struct bpf_prog *from,
 					      struct bpf_prog *to) {}
+static inline bool is_bpf_trampoline(void *addr)
+{
+	return false;
+}
 #endif
 
 struct bpf_func_info_aux {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 29d47aae0dd1..63a515b5aa7b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -704,6 +704,8 @@ bool is_bpf_text_address(unsigned long addr)
 
 	rcu_read_lock();
 	ret = bpf_prog_kallsyms_find(addr) != NULL;
+	if (!ret)
+		ret = is_bpf_trampoline((void*) addr);
 	rcu_read_unlock();
 
 	return ret;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 505f4e4b31d2..4b5f0d0b0072 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -4,16 +4,44 @@
 #include <linux/bpf.h>
 #include <linux/filter.h>
 #include <linux/ftrace.h>
+#include <linux/rbtree_latch.h>
 
 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
 #define TRAMPOLINE_HASH_BITS 10
 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
 
 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
+static struct latch_tree_root tree __cacheline_aligned;
 
 /* serializes access to trampoline_table */
 static DEFINE_MUTEX(trampoline_mutex);
 
+static __always_inline bool tree_less(struct latch_tree_node *a,
+				      struct latch_tree_node *b)
+{
+	struct bpf_trampoline *ta = container_of(a, struct bpf_trampoline, tnode);
+	struct bpf_trampoline *tb = container_of(b, struct bpf_trampoline, tnode);
+
+	return ta->image < tb->image;
+}
+
+static __always_inline int tree_comp(void *addr, struct latch_tree_node *n)
+{
+	struct bpf_trampoline *tr = container_of(n, struct bpf_trampoline, tnode);
+
+	if (addr < tr->image)
+		return -1;
+	if (addr >= tr->image + PAGE_SIZE)
+		return  1;
+
+	return 0;
+}
+
+static const struct latch_tree_ops tree_ops = {
+	.less	= tree_less,
+	.comp	= tree_comp,
+};
+
 void *bpf_jit_alloc_exec_page(void)
 {
 	void *image;
@@ -30,6 +58,11 @@ void *bpf_jit_alloc_exec_page(void)
 	return image;
 }
 
+bool is_bpf_trampoline(void *addr)
+{
+	return latch_tree_find(addr, &tree, &tree_ops) != NULL;
+}
+
 struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
 {
 	struct bpf_trampoline *tr;
@@ -65,6 +98,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
 	for (i = 0; i < BPF_TRAMP_MAX; i++)
 		INIT_HLIST_HEAD(&tr->progs_hlist[i]);
 	tr->image = image;
+	latch_tree_insert(&tr->tnode, &tree, &tree_ops);
 out:
 	mutex_unlock(&trampoline_mutex);
 	return tr;
@@ -252,6 +286,7 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
 		goto out;
 	bpf_jit_free_exec(tr->image);
 	hlist_del(&tr->hlist);
+	latch_tree_erase(&tr->tnode, &tree, &tree_ops);
 	kfree(tr);
 out:
 	mutex_unlock(&trampoline_mutex);
-- 
2.21.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ