[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191206123201.GC2871@hirez.programming.kicks-ass.net>
Date: Fri, 6 Dec 2019 13:32:01 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: David Howells <dhowells@...hat.com>
Cc: Davidlohr Bueso <dave@...olabs.net>,
Ingo Molnar <mingo@...nel.org>, linux-kernel@...r.kernel.org,
Frederic Weisbecker <fweisbec@...il.com>,
Thomas Gleixner <tglx@...utronix.de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: Re: Problem with WARN_ON in mutex_trylock() and rxrpc
On Thu, Dec 05, 2019 at 02:22:12PM +0100, Peter Zijlstra wrote:
> At the very least I'm going to do a lockdep patch that verifies the lock
> stack is 'empty' for the current irq_context when it changes.
Something like the below..
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 21619c92c377..c0a314dc9969 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -21,11 +21,13 @@
extern void trace_softirqs_off(unsigned long ip);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
+ extern void lockdep_leave_irq_context(void);
#else
static inline void trace_softirqs_on(unsigned long ip) { }
static inline void trace_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
+ static inline void lockdep_leave_irq_context(void) { }
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -41,6 +43,8 @@ do { \
} while (0)
# define trace_hardirq_exit() \
do { \
+ if (current->hardirq_context == 1) \
+ lockdep_leave_irq_context(); \
current->hardirq_context--; \
} while (0)
# define lockdep_softirq_enter() \
@@ -49,6 +53,8 @@ do { \
} while (0)
# define lockdep_softirq_exit() \
do { \
+ if (current->softirq_context == 1) \
+ lockdep_leave_irq_context(); \
current->softirq_context--; \
} while (0)
#else
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32282e7112d3..5c1102967927 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3600,6 +3600,43 @@ static inline unsigned int task_irq_context(struct task_struct *task)
return 2 * !!task->hardirq_context + !!task->softirq_context;
}
+/*
+ * Validate the current irqcontext holds no locks.
+ */
+void lockdep_leave_irq_context(void)
+{
+ struct task_struct *curr = current;
+ unsigned int irq_context = task_irq_context(curr);
+ int depth = curr->lockdep_depth;
+ struct held_lock *hlock;
+
+ if (unlikely(!debug_locks || curr->lockdep_recursion))
+ return;
+
+ if (!depth)
+ return;
+
+ if (curr->held_locks[depth-1].irq_context != irq_context)
+ return;
+
+ pr_warn("\n");
+ pr_warn("========================================================\n");
+ pr_warn("WARNING: Leaving (soft/hard) IRQ context with locks held\n");
+ print_kernel_ident();
+ pr_warn("--------------------------------------------------------\n");
+
+ for (; depth; depth--) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->irq_context != irq_context)
+ break;
+ print_lock(hlock);
+ }
+
+ pr_warn("\nstack backtrace:\n");
+ dump_stack();
+}
+NOKPROBE_SYMBOL(lockdep_leave_irq_context);
+
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
Powered by blists - more mailing lists