lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20191207.160413.1238774755333289186.davem@davemloft.net>
Date:   Sat, 07 Dec 2019 16:04:13 -0800 (PST)
From:   David Miller <davem@...emloft.net>
To:     bpf@...r.kernel.org, netdev@...r.kernel.org
CC:     ast@...nel.org, daniel@...earbox.net, tglx@...utronix.de
Subject: [RFC v1 PATCH 2/7] bpf: Add basic RT local locking for invocation
 of BPF programs.


For now simply surround every invocation of BPF programs with a call
to the locking primitive.

The next step will be pulling the local lock out to the necessary
areas of the various call sites.

Signed-off-by: David S. Miller <davem@...emloft.net>
---
 include/linux/filter.h | 23 ++++++++++++++++++++++-
 kernel/bpf/core.c      |  5 +++++
 2 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1b1e8b8f88da..1f4a782b6184 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -21,6 +21,7 @@
 #include <linux/kallsyms.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/locallock.h>
 
 #include <net/sch_generic.h>
 
@@ -559,7 +560,20 @@ struct sk_filter {
 
 DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 
-#define BPF_PROG_RUN(prog, ctx)	({				\
+#ifdef CONFIG_PREEMPT_RT_FULL
+DECLARE_LOCAL_IRQ_LOCK(bpf_invoke_lock);
+#define bpf_prog_lock() local_lock(bpf_invoke_lock)
+#define bpf_prog_unlock() local_unlock(bpf_invoke_lock)
+#else
+#define bpf_prog_lock() preempt_disable()
+#define bpf_prog_unlock() preempt_enable()
+#endif
+
+/* We cannot migrate off of the current cpu because BPF programs
+ * access per-cpu maps and other per-cpu data structures which are
+ * shared between BPF program execution and kernel execution.
+ */
+#define __BPF_PROG_RUN(prog, ctx)	({			\
 	u32 ret;						\
 	cant_sleep();						\
 	if (static_branch_unlikely(&bpf_stats_enabled_key)) {	\
@@ -576,6 +590,13 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 	}							\
 	ret; })
 
+#define BPF_PROG_RUN(prog, ctx)	({				\
+	u32 ret;						\
+	bpf_prog_lock();					\
+	ret = __BPF_PROG_RUN(prog, ctx);			\
+	bpf_prog_unlock();					\
+	ret; })
+
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
 struct bpf_skb_data_end {
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 49e32acad7d8..6e97bfb9f24a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2217,6 +2217,11 @@ int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 EXPORT_SYMBOL(bpf_stats_enabled_key);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+DEFINE_LOCAL_IRQ_LOCK(bpf_invoke_lock);
+EXPORT_SYMBOL(bpf_invoke_lock);
+#endif
+
 /* All definitions of tracepoints related to BPF. */
 #define CREATE_TRACE_POINTS
 #include <linux/bpf_trace.h>
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ