[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190212171423.8308-7-frederic@kernel.org>
Date: Tue, 12 Feb 2019 18:13:57 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Mauro Carvalho Chehab <mchehab@...pensource.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
"David S . Miller" <davem@...emloft.net>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Ingo Molnar <mingo@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 06/32] locking/lockdep: Prepare check_usage_*() to handle plain masks
mark_lock_irq() is going to deal with lock usages that gather multiple
softirq vectors at once. Therefore the validation through
check_usage_backwards() and check_usage_forwards() will need to handle
expanded usage masks.
So enhance those functions to that purpose.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Pavan Kondeti <pkondeti@...eaurora.org>
Cc: Paul E . McKenney <paulmck@...ux.vnet.ibm.com>
Cc: David S . Miller <davem@...emloft.net>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
kernel/locking/lockdep.c | 30 ++++++++++++++++++++++++------
1 file changed, 24 insertions(+), 6 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6b625b70598a..bb66327e5cc3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2660,9 +2660,12 @@ print_irq_inversion_bug(struct task_struct *curr,
*/
static int
check_usage_forwards(struct task_struct *curr, struct held_lock *this,
- u64 usage_mask, const char *irqclass)
+ u64 usage_mask, bool read)
{
int ret;
+ u64 intersec;
+ int intersec_bit;
+ const char *irqclass;
struct lock_list root;
struct lock_list *uninitialized_var(target_entry);
@@ -2674,8 +2677,14 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
if (ret == 1)
return ret;
+ intersec = usage_mask & target_entry->class->usage_mask;
+ intersec_bit = __ffs64(intersec);
+ if (read)
+ intersec_bit += LOCK_USAGE_READ_MASK;
+ irqclass = state_name(intersec_bit);
+
return print_irq_inversion_bug(curr, &root, target_entry,
- this, 1, irqclass);
+ this, 1, irqclass);
}
/*
@@ -2684,9 +2693,12 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
*/
static int
check_usage_backwards(struct task_struct *curr, struct held_lock *this,
- u64 usage_mask, const char *irqclass)
+ u64 usage_mask, bool read)
{
int ret;
+ u64 intersec;
+ int intersec_bit;
+ const char *irqclass;
struct lock_list root;
struct lock_list *uninitialized_var(target_entry);
@@ -2698,6 +2710,12 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
if (ret == 1)
return ret;
+ intersec = usage_mask & target_entry->class->usage_mask;
+ intersec_bit = __ffs64(intersec);
+ if (read)
+ intersec_bit += LOCK_USAGE_READ_MASK;
+ irqclass = state_name(intersec_bit);
+
return print_irq_inversion_bug(curr, &root, target_entry,
this, 0, irqclass);
}
@@ -2751,7 +2769,7 @@ static inline int state_verbose(enum lock_usage_bit bit,
}
typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
- u64 usage_mask, const char *name);
+ u64 usage_mask, bool read);
static int
mark_lock_irq(struct task_struct *curr, struct held_lock *this,
@@ -2787,7 +2805,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
* states.
*/
if ((!read || !dir || STRICT_READ_CHECKS) &&
- !usage(curr, this, lock_usage_mask(&excl_usage), state_name(new_usage->bit & ~LOCK_USAGE_READ_MASK)))
+ !usage(curr, this, lock_usage_mask(&excl_usage), false))
return 0;
/*
@@ -2801,7 +2819,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
if (STRICT_READ_CHECKS &&
!usage(curr, this, lock_usage_mask(&excl_usage),
- state_name(new_usage->bit + LOCK_USAGE_READ_MASK)))
+ true))
return 0;
}
--
2.17.1
Powered by blists - more mailing lists