[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200527155003.407192056@infradead.org>
Date: Wed, 27 May 2020 17:45:32 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, will@...nel.org, tglx@...utronix.de
Cc: x86@...nel.org, linux-kernel@...r.kernel.org,
a.darwish@...utronix.de, rostedt@...dmis.org,
bigeasy@...utronix.de, peterz@...radead.org
Subject: [PATCH 5/6] lockdep: Prepare for NMI IRQ state tracking
There is no reason not to always, accurately, track IRQ state.
This change also makes IRQ state tracking ignore lockdep_off().
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/locking/lockdep.c | 23 ++++++++++++++++-------
1 file changed, 16 insertions(+), 7 deletions(-)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3646,7 +3646,12 @@ static void __trace_hardirqs_on_caller(v
*/
void lockdep_hardirqs_on_prepare(unsigned long ip)
{
- if (unlikely(!debug_locks || current->lockdep_recursion))
+ /*
+ * Even though NMIs can happen while in lockdep; we only call this
+ * then the NMI returns to an IRQs enabled context, which guarantees
+ * we're not in lockdep.
+ */
+ if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
return;
if (unlikely(lockdep_hardirqs_enabled())) {
@@ -3682,9 +3687,11 @@ void lockdep_hardirqs_on_prepare(unsigne
current->hardirq_chain_key = current->curr_chain_key;
- current->lockdep_recursion++;
- __trace_hardirqs_on_caller();
- lockdep_recursion_finish();
+ if (!in_nmi()) {
+ current->lockdep_recursion++;
+ __trace_hardirqs_on_caller();
+ lockdep_recursion_finish();
+ }
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
@@ -3692,7 +3699,7 @@ void noinstr lockdep_hardirqs_on(unsigne
{
struct task_struct *curr = current;
- if (unlikely(!debug_locks || curr->lockdep_recursion))
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_recursion & LOCKDEP_RECURSION_MASK))
return;
if (lockdep_hardirqs_enabled()) {
@@ -3735,8 +3742,10 @@ void noinstr lockdep_hardirqs_off(unsign
{
struct task_struct *curr = current;
- if (unlikely(!debug_locks || curr->lockdep_recursion))
- return;
+ /*
+ * Can't test recursion; NMIs can happen while in lockdep.
+ * Harmless though; all we do is clear hardirqs_enabled.
+ */
/*
* So we're supposed to get called after you mask local IRQs, but for
Powered by blists - more mailing lists