[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200519214547.352050-8-a.darwish@linutronix.de>
Date: Tue, 19 May 2020 23:45:29 +0200
From: "Ahmed S. Darwish" <a.darwish@...utronix.de>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>
Cc: Thomas Gleixner <tglx@...utronix.de>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Sebastian A. Siewior" <bigeasy@...utronix.de>,
Steven Rostedt <rostedt@...dmis.org>,
LKML <linux-kernel@...r.kernel.org>,
"Ahmed S. Darwish" <a.darwish@...utronix.de>
Subject: [PATCH v1 07/25] lockdep: Add preemption disabled assertion API
Asserting that preemption is disabled is a critical sanity check.
Developers are usually reluctant to add such a check in a fastpath, as
reading the preemption count can be costly.
Extend the lockdep API with a preemption disabled assertion. If lockdep
is disabled, or if the underlying architecture does not support kernel
preemption, this assert has no runtime overhead.
Since the lockdep assertion references sched.h task_struct current,
define it at lockdep.c instead of lockdep.h. This unbinds a potential
circular header dependency chain for call-sites, defined inlined, at
other header files already included and needed by sched.h.
Mark the exported assertion symbol with NOKPROBE_SYMBOL. Lockdep
functions can be involved in breakpoint handling and probing on those
functions can cause a breakpoint recursion.
References: f54bb2ec02c8 ("locking/lockdep: Add IRQs disabled/enabled assertion APIs: ...")
References: 2f43c6022d84 ("kprobes: Prohibit probing on lockdep functions")
Signed-off-by: Ahmed S. Darwish <a.darwish@...utronix.de>
---
include/linux/lockdep.h | 9 +++++++++
kernel/locking/lockdep.c | 15 +++++++++++++++
lib/Kconfig.debug | 1 +
3 files changed, 25 insertions(+)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 206774ac6946..54c929ea5b98 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -702,6 +702,14 @@ do { \
"Not in hardirq as expected\n"); \
} while (0)
+/*
+ * Don't define this assertion here to avoid a call-site's header file
+ * dependency on sched.h task_struct current. This is needed by call
+ * sites that are inline defined at header files already included by
+ * sched.h.
+ */
+void lockdep_assert_preemption_disabled(void);
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
@@ -709,6 +717,7 @@ do { \
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_preemption_disabled() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ac10db66cc63..4dae65bc65c2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5857,3 +5857,18 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
dump_stack();
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+
+#ifdef CONFIG_PROVE_LOCKING
+
+void lockdep_assert_preemption_disabled(void)
+{
+ WARN_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) &&
+ debug_locks &&
+ !current->lockdep_recursion &&
+ (preempt_count() == 0 && current->hardirqs_enabled),
+ "preemption not disabled as expected\n");
+}
+EXPORT_SYMBOL_GPL(lockdep_assert_preemption_disabled);
+NOKPROBE_SYMBOL(lockdep_assert_preemption_disabled);
+
+#endif
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 21d9c5f6e7ec..34d9d8896003 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1062,6 +1062,7 @@ config PROVE_LOCKING
select DEBUG_RWSEMS
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
+ select PREEMPT_COUNT if !ARCH_NO_PREEMPT
select TRACE_IRQFLAGS
default n
help
--
2.20.1
Powered by blists - more mailing lists