[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200821085348.782688941@infradead.org>
Date: Fri, 21 Aug 2020 10:47:49 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: linux-kernel@...r.kernel.org, mingo@...nel.org, will@...nel.org
Cc: npiggin@...il.com, elver@...gle.com, jgross@...e.com,
paulmck@...nel.org, rostedt@...dmis.org, rjw@...ysocki.net,
joel@...lfernandes.org, svens@...ux.ibm.com, tglx@...utronix.de,
peterz@...radead.org
Subject: [PATCH v2 11/11] lockdep,trace: Expose tracepoints
The lockdep tracepoints are under the lockdep recursion counter, this
has a bunch of nasty side effects:
- TRACE_IRQFLAGS doesn't work across the entire tracepoint
- RCU-lockdep doesn't see the tracepoints either, hiding numerous
"suspicious RCU usage" warnings.
Pull the trace_lock_*() tracepoints completely out from under the
lockdep recursion handling and completely rely on the trace level
recusion handling -- also, tracing *SHOULD* not be taking locks in any
case.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@...dmis.org>
Tested-by: Marco Elver <elver@...gle.com>
---
kernel/locking/lockdep.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5044,6 +5044,8 @@ void lock_acquire(struct lockdep_map *lo
{
unsigned long flags;
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
if (unlikely(current->lockdep_recursion)) {
/* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) {
@@ -5068,7 +5070,6 @@ void lock_acquire(struct lockdep_map *lo
check_flags(flags);
current->lockdep_recursion++;
- trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
lockdep_recursion_finish();
@@ -5080,13 +5081,15 @@ void lock_release(struct lockdep_map *lo
{
unsigned long flags;
+ trace_lock_release(lock, ip);
+
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
+
current->lockdep_recursion++;
- trace_lock_release(lock, ip);
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
@@ -5272,8 +5275,6 @@ __lock_acquired(struct lockdep_map *lock
hlock->holdtime_stamp = now;
}
- trace_lock_acquired(lock, ip);
-
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
@@ -5292,6 +5293,8 @@ void lock_contended(struct lockdep_map *
{
unsigned long flags;
+ trace_lock_contended(lock, ip);
+
if (unlikely(!lock_stat || !debug_locks))
return;
@@ -5301,7 +5304,6 @@ void lock_contended(struct lockdep_map *
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion++;
- trace_lock_contended(lock, ip);
__lock_contended(lock, ip);
lockdep_recursion_finish();
raw_local_irq_restore(flags);
@@ -5312,6 +5314,8 @@ void lock_acquired(struct lockdep_map *l
{
unsigned long flags;
+ trace_lock_acquired(lock, ip);
+
if (unlikely(!lock_stat || !debug_locks))
return;
Powered by blists - more mailing lists