[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190212171423.8308-30-frederic@kernel.org>
Date: Tue, 12 Feb 2019 18:14:20 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Mauro Carvalho Chehab <mchehab@...pensource.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
"David S . Miller" <davem@...emloft.net>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Ingo Molnar <mingo@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 29/32] locking/lockdep: Branch the new vec-finegrained softirq masking to lockdep
Now that we have full support from softirqs to perform per vector
masking, let's feed lockdep with the proper inputs and push the vector
numbers involved in a base softirq lock usage:
LOCK_ENABLED_SOFTIRQ: push local_softirq_enabled()
LOCK_USED_IN_SOFTIRQ: push curr->softirq_context, modified by
lockdep_softirq_enter/exit()
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Pavan Kondeti <pkondeti@...eaurora.org>
Cc: Paul E . McKenney <paulmck@...ux.vnet.ibm.com>
Cc: David S . Miller <davem@...emloft.net>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
kernel/locking/lockdep.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index acd82145f6a6..570eea5376ec 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2877,6 +2877,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
*/
if (curr->softirqs_enabled) {
usage.bit = LOCK_ENABLED_SOFTIRQ;
+ usage.vector = local_softirq_enabled();
if (!mark_held_locks(curr, &usage))
return;
}
@@ -2964,6 +2965,7 @@ void trace_softirqs_on(unsigned long ip)
struct task_struct *curr = current;
struct lock_usage usage = {
.bit = LOCK_ENABLED_SOFTIRQ,
+ .vector = local_softirq_enabled()
};
if (unlikely(!debug_locks || current->lockdep_recursion))
@@ -3028,7 +3030,7 @@ void trace_softirqs_off(unsigned long ip)
static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
- struct lock_usage usage = { .vector = 0 };
+ struct lock_usage usage;
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
@@ -3037,22 +3039,26 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
if (hlock->read) {
if (curr->hardirq_context) {
usage.bit = LOCK_USED_IN_HARDIRQ_READ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
if (curr->softirq_context) {
usage.bit = LOCK_USED_IN_SOFTIRQ_READ;
+ usage.vector = curr->softirq_context;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
} else {
if (curr->hardirq_context) {
usage.bit = LOCK_USED_IN_HARDIRQ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
if (curr->softirq_context) {
usage.bit = LOCK_USED_IN_SOFTIRQ;
+ usage.vector = curr->softirq_context;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
@@ -3061,19 +3067,23 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
if (!hlock->hardirqs_off) {
if (hlock->read) {
usage.bit = LOCK_ENABLED_HARDIRQ_READ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
if (curr->softirqs_enabled) {
usage.bit = LOCK_ENABLED_SOFTIRQ_READ;
+ usage.vector = local_softirq_enabled();
if (!mark_lock(curr, hlock, &usage))
return 0;
}
} else {
usage.bit = LOCK_ENABLED_HARDIRQ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
if (curr->softirqs_enabled) {
usage.bit = LOCK_ENABLED_SOFTIRQ;
+ usage.vector = local_softirq_enabled();
if (!mark_lock(curr, hlock, &usage))
return 0;
}
--
2.17.1
Powered by blists - more mailing lists