[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201002135644.7903d0e5@gandalf.local.home>
Date: Fri, 2 Oct 2020 13:56:44 -0400
From: Steven Rostedt <rostedt@...dmis.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>,
"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: [WARNING] kernel/rcu/tree.c:1058 rcu_irq_enter+0x15/0x20
On Wed, 30 Sep 2020 20:13:23 +0200
Peter Zijlstra <peterz@...radead.org> wrote:
> Blergh, IIRC there's header hell that way. The sane fix is killing off
> that trace_*_rcuidle() disease.
>
> But I think this will also cure it.
I guess you still don't build modules ;-). I had to add a
EXPORT_SYMBOL(lockdep_recursion) to get it to build, and then move the
checks within the irq disabling to get rid of the using cpu pointers within
preemptable code warnings
But it appears to solve the problem.
-- Steve
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0e100c9784a5..70610f217b4e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -77,6 +77,7 @@ module_param(lock_stat, int, 0644);
#endif
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
+EXPORT_SYMBOL(lockdep_recursion);
static inline bool lockdep_enabled(void)
{
@@ -4241,13 +4242,13 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
if (subclass) {
unsigned long flags;
- if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
- return;
-
raw_local_irq_save(flags);
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
+ goto out;
lockdep_recursion_inc();
register_lock_class(lock, subclass, 1);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
}
@@ -4928,15 +4929,15 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
{
unsigned long flags;
- if (unlikely(!lockdep_enabled()))
- return;
-
raw_local_irq_save(flags);
+ if (unlikely(!lockdep_enabled()))
+ goto out;
lockdep_recursion_inc();
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_set_class);
@@ -4945,15 +4946,15 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lockdep_enabled()))
- return;
-
raw_local_irq_save(flags);
+ if (unlikely(!lockdep_enabled()))
+ goto out;
lockdep_recursion_inc();
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_downgrade);
@@ -5041,16 +5042,18 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
trace_lock_release(lock, ip);
+ raw_local_irq_save(flags);
+
if (unlikely(!lockdep_enabled()))
- return;
+ goto out;
- raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
@@ -5060,15 +5063,17 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
unsigned long flags;
int ret = 0;
- if (unlikely(!lockdep_enabled()))
- return 1; /* avoid false negative lockdep_assert_held() */
-
raw_local_irq_save(flags);
+ if (unlikely(!lockdep_enabled())) {
+ ret = 1; /* avoid false negative lockdep_assert_held() */
+ goto out;
+ }
check_flags(flags);
lockdep_recursion_inc();
ret = __lock_is_held(lock, read);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
return ret;
@@ -5081,15 +5086,16 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
struct pin_cookie cookie = NIL_COOKIE;
unsigned long flags;
+ raw_local_irq_save(flags);
if (unlikely(!lockdep_enabled()))
- return cookie;
+ goto out;
- raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
cookie = __lock_pin_lock(lock);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
return cookie;
@@ -5100,15 +5106,16 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
unsigned long flags;
+ raw_local_irq_save(flags);
if (unlikely(!lockdep_enabled()))
- return;
+ goto out;
- raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_repin_lock(lock, cookie);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_repin_lock);
@@ -5117,15 +5124,16 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
unsigned long flags;
+ raw_local_irq_save(flags);
if (unlikely(!lockdep_enabled()))
- return;
+ goto out;
- raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_unpin_lock(lock, cookie);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_unpin_lock);
@@ -5253,14 +5261,15 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
trace_lock_acquired(lock, ip);
+ raw_local_irq_save(flags);
if (unlikely(!lock_stat || !lockdep_enabled()))
- return;
+ goto out;
- raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_contended(lock, ip);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);
@@ -5271,14 +5280,15 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
trace_lock_contended(lock, ip);
+ raw_local_irq_save(flags);
if (unlikely(!lock_stat || !lockdep_enabled()))
- return;
+ goto out;
- raw_local_irq_save(flags);
check_flags(flags);
lockdep_recursion_inc();
__lock_acquired(lock, ip);
lockdep_recursion_finish();
+out:
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
Powered by blists - more mailing lists