[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190212171423.8308-2-frederic@kernel.org>
Date: Tue, 12 Feb 2019 18:13:52 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Mauro Carvalho Chehab <mchehab@...pensource.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
"David S . Miller" <davem@...emloft.net>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Ingo Molnar <mingo@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 01/32] locking/lockdep: Use expanded masks on find_usage_*() functions
In order to perform softirq vector-finegrained locking validation we'll
need to be able to check multiple vector usages at once. Prepare the low
level usage mask check functions for that purpose.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Pavan Kondeti <pkondeti@...eaurora.org>
Cc: Paul E . McKenney <paulmck@...ux.vnet.ibm.com>
Cc: David S . Miller <davem@...emloft.net>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
kernel/locking/lockdep.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 608f74ed8bb9..6127cef4f8fb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1335,9 +1335,9 @@ check_redundant(struct lock_list *root, struct lock_class *target,
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static inline int usage_match(struct lock_list *entry, void *bit)
+static inline int usage_match(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+ return entry->class->usage_mask & *(u64 *)mask;
}
@@ -1353,14 +1353,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
* Return <0 on error.
*/
static int
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_forwards(struct lock_list *root, u64 usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1376,14 +1376,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
* Return <0 on error.
*/
static int
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_backwards(struct lock_list *root, u64 usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1588,7 +1588,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
this.parent = NULL;
this.class = hlock_class(prev);
- ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+ ret = find_usage_backwards(&this, BIT(bit_backwards), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -1596,7 +1596,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
that.parent = NULL;
that.class = hlock_class(next);
- ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+ ret = find_usage_forwards(&that, BIT(bit_forwards), &target_entry1);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2553,7 +2553,7 @@ print_irq_inversion_bug(struct task_struct *curr,
*/
static int
check_usage_forwards(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit bit, const char *irqclass)
+ u64 usage_mask, const char *irqclass)
{
int ret;
struct lock_list root;
@@ -2561,7 +2561,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_forwards(&root, bit, &target_entry);
+ ret = find_usage_forwards(&root, usage_mask, &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2577,7 +2577,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
*/
static int
check_usage_backwards(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit bit, const char *irqclass)
+ u64 usage_mask, const char *irqclass)
{
int ret;
struct lock_list root;
@@ -2585,7 +2585,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_backwards(&root, bit, &target_entry);
+ ret = find_usage_backwards(&root, usage_mask, &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2644,7 +2644,7 @@ static inline int state_verbose(enum lock_usage_bit bit,
}
typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
- enum lock_usage_bit bit, const char *name);
+ u64 usage_mask, const char *name);
static int
mark_lock_irq(struct task_struct *curr, struct held_lock *this,
@@ -2676,7 +2676,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
* states.
*/
if ((!read || !dir || STRICT_READ_CHECKS) &&
- !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
+ !usage(curr, this, BIT(excl_bit), state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
return 0;
/*
@@ -2687,7 +2687,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
return 0;
if (STRICT_READ_CHECKS &&
- !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
+ !usage(curr, this, BIT(excl_bit + LOCK_USAGE_READ_MASK),
state_name(new_bit + LOCK_USAGE_READ_MASK)))
return 0;
}
--
2.17.1
Powered by blists - more mailing lists