[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-627f364d24c009b61c9199b2c75006e35c294675@git.kernel.org>
Date: Thu, 18 Apr 2019 04:27:34 -0700
From: tip-bot for Frederic Weisbecker <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: will.deacon@....com, torvalds@...ux-foundation.org,
mingo@...nel.org, akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org, hpa@...or.com, frederic@...nel.org,
peterz@...radead.org, tglx@...utronix.de,
paulmck@...ux.vnet.ibm.com
Subject: [tip:locking/core] locking/lockdep: Use expanded masks on
find_usage_*() functions
Commit-ID: 627f364d24c009b61c9199b2c75006e35c294675
Gitweb: https://git.kernel.org/tip/627f364d24c009b61c9199b2c75006e35c294675
Author: Frederic Weisbecker <frederic@...nel.org>
AuthorDate: Tue, 2 Apr 2019 18:02:43 +0200
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Thu, 18 Apr 2019 12:50:17 +0200
locking/lockdep: Use expanded masks on find_usage_*() functions
In order to optimize check_irq_usage() and factorize all the IRQ usage
validations we'll need to be able to check multiple lock usage bits at
once. Prepare the low level usage mask check functions for that purpose.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Will Deacon <will.deacon@....com>
Link: https://lkml.kernel.org/r/20190402160244.32434-4-frederic@kernel.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
kernel/locking/lockdep.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2288aa2fa4c6..5e149dd78298 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1682,9 +1682,9 @@ check_redundant(struct lock_list *root, struct lock_class *target,
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static inline int usage_match(struct lock_list *entry, void *bit)
+static inline int usage_match(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+ return entry->class->usage_mask & *(unsigned long *)mask;
}
@@ -1700,14 +1700,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
* Return <0 on error.
*/
static int
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1723,14 +1723,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
* Return <0 on error.
*/
static int
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
struct lock_list **target_entry)
{
int result;
debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+ result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
return result;
}
@@ -1935,7 +1935,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
this.parent = NULL;
this.class = hlock_class(prev);
- ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+ ret = find_usage_backwards(&this, lock_flag(bit_backwards), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -1943,7 +1943,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
that.parent = NULL;
that.class = hlock_class(next);
- ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+ ret = find_usage_forwards(&that, lock_flag(bit_forwards), &target_entry1);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2941,7 +2941,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_forwards(&root, bit, &target_entry);
+ ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
@@ -2965,7 +2965,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
root.parent = NULL;
root.class = hlock_class(this);
- ret = find_usage_backwards(&root, bit, &target_entry);
+ ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
if (ret < 0)
return print_bfs_bug(ret);
if (ret == 1)
Powered by blists - more mailing lists