[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190228171242.32144-3-frederic@kernel.org>
Date: Thu, 28 Feb 2019 18:12:07 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
"David S . Miller" <davem@...emloft.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Mauro Carvalho Chehab <mchehab+samsung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Ingo Molnar <mingo@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 02/37] locking/lockdep: Use expanded masks on find_usage_*() functions
In order to perform softirq vector-finegrained locking validation we'll
need to be able to check multiple vector usages at once. Prepare the low
level usage mask check functions for that purpose.
Reviewed-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@...nel.org>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Pavan Kondeti <pkondeti@...eaurora.org>
Cc: Paul E . McKenney <paulmck@...ux.vnet.ibm.com>
Cc: David S . Miller <davem@...emloft.net>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
kernel/locking/lockdep.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a989a3e9ead7..11db7ba29660 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1335,9 +1335,9 @@ check_redundant(struct lock_list *root, struct lock_class *target,
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static inline int usage_match(struct lock_list *entry, void *bit)
+static inline int usage_match(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+ return entry->class->usage_mask & *(unsigned long *)mask;
}
@@ -1353,14 +1353,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
* Return <0 on error.
*/
static int
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1376,14 +1376,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
* Return <0 on error.
*/
static int
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1588,7 +1588,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
this.parent = NULL;
this.class = hlock_class(prev);
- ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+ ret = find_usage_backwards(&this, BIT(bit_backwards), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -1596,7 +1596,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
that.parent = NULL;
that.class = hlock_class(next);
- ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+ ret = find_usage_forwards(&that, BIT(bit_forwards), &target_entry1);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2555,7 +2555,7 @@ print_irq_inversion_bug(struct task_struct *curr,
*/
static int
check_usage_forwards(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit bit, const char *irqclass)
+ unsigned long usage_mask, const char *irqclass)
{
int ret;
struct lock_list root;
@@ -2563,7 +2563,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_forwards(&root, bit, &target_entry);
+ ret = find_usage_forwards(&root, usage_mask, &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2579,7 +2579,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
*/
static int
check_usage_backwards(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit bit, const char *irqclass)
+ unsigned long usage_mask, const char *irqclass)
{
int ret;
struct lock_list root;
@@ -2587,7 +2587,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_backwards(&root, bit, &target_entry);
+ ret = find_usage_backwards(&root, usage_mask, &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2646,7 +2646,7 @@ static inline int state_verbose(enum lock_usage_bit bit,
}
typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
- enum lock_usage_bit bit, const char *name);
+ unsigned long usage_mask, const char *name);
static int
mark_lock_irq(struct task_struct *curr, struct held_lock *this,
@@ -2678,7 +2678,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
* states.
*/
if ((!read || !dir || STRICT_READ_CHECKS) &&
- !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
+ !usage(curr, this, BIT(excl_bit), state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
return 0;
/*
@@ -2689,7 +2689,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
return 0;
if (STRICT_READ_CHECKS &&
- !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
+ !usage(curr, this, BIT(excl_bit + LOCK_USAGE_READ_MASK),
state_name(new_bit + LOCK_USAGE_READ_MASK)))
return 0;
}
--
2.21.0
Powered by blists - more mailing lists