[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190516080015.16033-7-duyuyang@gmail.com>
Date: Thu, 16 May 2019 16:00:04 +0800
From: Yuyang Du <duyuyang@...il.com>
To: peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc: bvanassche@....org, ming.lei@...hat.com, frederic@...nel.org,
tglx@...utronix.de, boqun.feng@...il.com, paulmck@...ux.ibm.com,
linux-kernel@...r.kernel.org, Yuyang Du <duyuyang@...il.com>
Subject: [PATCH v2 06/17] locking/lockdep: Adjust BFS algorithm to support multiple matches
With read locks, circle is not sufficient condition for a deadlock. As a
result, we need to continue the search after a match but the match is not
wanted. __bfs() is adjusted to that end. The basic idea is to enqueue a
node's children before matching the node.
No functional change.
Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
kernel/locking/lockdep.c | 54 +++++++++++++++++++++++++-----------------------
1 file changed, 28 insertions(+), 26 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f4982ad..54ddf85 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1417,7 +1417,7 @@ static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
- int offset)
+ int offset, bool init)
{
struct lock_list *entry;
struct lock_list *lock;
@@ -1425,19 +1425,11 @@ static int __bfs(struct lock_list *source_entry,
struct circular_queue *cq = &lock_cq;
int ret = 1;
- if (match(source_entry, data)) {
- *target_entry = source_entry;
- ret = 0;
- goto exit;
+ if (init) {
+ __cq_init(cq);
+ __cq_enqueue(cq, source_entry);
}
- head = get_dep_list(source_entry, offset);
- if (list_empty(head))
- goto exit;
-
- __cq_init(cq);
- __cq_enqueue(cq, source_entry);
-
while ((lock = __cq_dequeue(cq))) {
if (!lock->class) {
@@ -1449,25 +1441,34 @@ static int __bfs(struct lock_list *source_entry,
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ /*
+ * Enqueue a node's children before match() so that even
+ * this node matches but is not wanted, we can continue
+ * the search.
+ */
list_for_each_entry_rcu(entry, head, entry) {
if (!lock_accessed(entry)) {
unsigned int cq_depth;
+
mark_lock_accessed(entry, lock);
- if (match(entry, data)) {
- *target_entry = entry;
- ret = 0;
- goto exit;
- }
if (__cq_enqueue(cq, entry)) {
ret = -1;
goto exit;
}
+
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
+
+ if (match(lock, data)) {
+ *target_entry = lock;
+ ret = 0;
+ goto exit;
+ }
+
}
exit:
return ret;
@@ -1476,10 +1477,10 @@ static int __bfs(struct lock_list *source_entry,
static inline int __bfs_forwards(struct lock_list *src_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
+ struct lock_list **target_entry, bool init)
{
return __bfs(src_entry, data, match, target_entry,
- offsetof(struct lock_class, locks_after));
+ offsetof(struct lock_class, locks_after), init);
}
@@ -1489,7 +1490,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, target_entry,
- offsetof(struct lock_class, locks_before));
+ offsetof(struct lock_class, locks_before), true);
}
@@ -1662,7 +1663,7 @@ static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
unsigned long count = 0;
struct lock_list *uninitialized_var(target_entry);
- __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
+ __bfs_forwards(this, (void *)&count, noop_count, &target_entry, true);
return count;
}
@@ -1750,12 +1751,12 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
*/
static noinline int
check_path(struct lock_class *target, struct lock_list *src_entry,
- struct lock_list **target_entry)
+ struct lock_list **target_entry, bool init)
{
int ret;
ret = __bfs_forwards(src_entry, (void *)target, class_equal,
- target_entry);
+ target_entry, init);
if (unlikely(ret < 0))
print_bfs_bug(ret);
@@ -1783,7 +1784,7 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
debug_atomic_inc(nr_cyclic_checks);
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
if (unlikely(!ret)) {
if (!trace->nr_entries) {
@@ -1821,7 +1822,7 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
debug_atomic_inc(nr_redundant_checks);
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
+ ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
if (!ret) {
struct lock_list *parent;
@@ -1897,7 +1898,8 @@ static inline int usage_match(struct lock_list *entry, void *mask)
debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
+ result = __bfs_forwards(root, &usage_mask, usage_match,
+ target_entry, true);
return result;
}
--
1.8.3.1
Powered by blists - more mailing lists