[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190129012152.251061-1-ast@kernel.org>
Date: Mon, 28 Jan 2019 17:21:52 -0800
From: Alexei Starovoitov <ast@...nel.org>
To: <davem@...emloft.net>
CC: <daniel@...earbox.net>, <peterz@...radead.org>, <jannh@...gle.com>,
<paulmck@...ux.ibm.com>, <will.deacon@....com>, <mingo@...hat.com>,
<netdev@...r.kernel.org>, <kernel-team@...com>
Subject: [PATCH bpf-next] bpf: check that BPF programs run with preemption disabled
From: Peter Zijlstra <peterz@...radead.org>
Introduce cant_sleep() macro for annotation of functions that cannot sleep.
Use it in BPF_PROG_RUN to catch execution of BPF programs
in preemptable context.
Suggested-by: Jann Horn <jannh@...gle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Alexei Starovoitov <ast@...nel.org>
---
include/linux/filter.h | 2 +-
include/linux/kernel.h | 14 ++++++++++++--
kernel/sched/core.c | 28 ++++++++++++++++++++++++++++
3 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index e4b473f85b46..7e87863617b3 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -533,7 +533,7 @@ struct sk_filter {
struct bpf_prog *prog;
};
-#define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi)
+#define BPF_PROG_RUN(filter, ctx) ({ cant_sleep(); (*(filter)->bpf_func)(ctx, (filter)->insnsi); })
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 8f0e68e250a7..a8868a32098c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -245,8 +245,10 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- void ___might_sleep(const char *file, int line, int preempt_offset);
- void __might_sleep(const char *file, int line, int preempt_offset);
+extern void ___might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __cant_sleep(const char *file, int line, int preempt_offset);
+
/**
* might_sleep - annotation for functions that can sleep
*
@@ -259,6 +261,13 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+/**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+ * this macro will print a stack trace if it is executed with preemption enabled
+ */
+# define cant_sleep() \
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
@@ -266,6 +275,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
+# define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a674c7db2f29..1dcbff62f973 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6149,6 +6149,34 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL(___might_sleep);
+
+void __cant_sleep(const char *file, int line, int preempt_offset)
+{
+ static unsigned long prev_jiffy;
+
+ if (irqs_disabled())
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ if (preempt_count() > preempt_offset)
+ return;
+
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
+ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(),
+ current->pid, current->comm);
+
+ debug_show_held_locks(current);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL_GPL(__cant_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
--
2.20.0
Powered by blists - more mailing lists