lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190829083132.22394-24-duyuyang@gmail.com>
Date:   Thu, 29 Aug 2019 16:31:25 +0800
From:   Yuyang Du <duyuyang@...il.com>
To:     peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc:     bvanassche@....org, ming.lei@...hat.com, frederic@...nel.org,
        tglx@...utronix.de, linux-kernel@...r.kernel.org,
        longman@...hat.com, paulmck@...ux.vnet.ibm.com,
        boqun.feng@...il.com, Yuyang Du <duyuyang@...il.com>
Subject: [PATCH v4 23/30] locking/lockdep: Adjust BFS algorithm to support multiple matches

With recursive-read locks, a circle is not sufficient condition for
deadlocks. As a result, we need to continue the search after a match but
the match is not wanted. __bfs() is adjusted to that end. The basic idea
is to enqueue a node's children before matching the node.

No functional change.

Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
 kernel/locking/lockdep.c | 51 ++++++++++++++++++++++++------------------------
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d13b6b7..3ad97bc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1623,7 +1623,7 @@ static inline void set_lock_type2(struct lock_list *lock, int read)
 static int __bfs(struct lock_list *source_entry,
 		 void *data,
 		 int (*match)(struct lock_list *entry, void *data),
-		 struct lock_list **target_entry, int forward)
+		 struct lock_list **target_entry, int forward, bool init)
 {
 	struct lock_list *entry;
 	struct lock_list *lock;
@@ -1631,19 +1631,11 @@ static int __bfs(struct lock_list *source_entry,
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	if (match(source_entry, data)) {
-		*target_entry = source_entry;
-		ret = 0;
-		goto exit;
+	if (init) {
+		__cq_init(cq);
+		__cq_enqueue(cq, source_entry);
 	}
 
-	head = get_dep_list(source_entry, forward);
-	if (list_empty(head))
-		goto exit;
-
-	__cq_init(cq);
-	__cq_enqueue(cq, source_entry);
-
 	while ((lock = __cq_dequeue(cq))) {
 
 		if (!lock->class[forward]) {
@@ -1655,25 +1647,34 @@ static int __bfs(struct lock_list *source_entry,
 
 		DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
+		/*
+		 * Enqueue a node's children before match() so that even
+		 * this node matches but is not wanted, we can continue
+		 * the search.
+		 */
 		list_for_each_entry_rcu(entry, head, entry[forward]) {
 			if (!lock_accessed(entry, forward)) {
 				unsigned int cq_depth;
+
 				mark_lock_accessed(entry, lock, forward);
-				if (match(entry, data)) {
-					*target_entry = entry;
-					ret = 0;
-					goto exit;
-				}
 
 				if (__cq_enqueue(cq, entry)) {
 					ret = -1;
 					goto exit;
 				}
+
 				cq_depth = __cq_get_elem_count(cq);
 				if (max_bfs_queue_depth < cq_depth)
 					max_bfs_queue_depth = cq_depth;
 			}
 		}
+
+		if (match(lock, data)) {
+			*target_entry = lock;
+			ret = 0;
+			goto exit;
+		}
+
 	}
 exit:
 	return ret;
@@ -1682,9 +1683,9 @@ static int __bfs(struct lock_list *source_entry,
 static inline int __bfs_forwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
-			struct lock_list **target_entry)
+			struct lock_list **target_entry, bool init)
 {
-	return __bfs(src_entry, data, match, target_entry, 1);
+	return __bfs(src_entry, data, match, target_entry, 1, init);
 }
 
 static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1692,7 +1693,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
 {
-	return __bfs(src_entry, data, match, target_entry, 0);
+	return __bfs(src_entry, data, match, target_entry, 0, true);
 }
 
 static void print_lock_trace(const struct lock_trace *trace,
@@ -1864,7 +1865,7 @@ static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
 	unsigned long  count = 0;
 	struct lock_list *uninitialized_var(target_entry);
 
-	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
+	__bfs_forwards(this, (void *)&count, noop_count, &target_entry, true);
 
 	return count;
 }
@@ -1918,12 +1919,12 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
  */
 static noinline int
 check_path(struct lock_class *target, struct lock_list *src_entry,
-	   struct lock_list **target_entry)
+	   struct lock_list **target_entry, bool init)
 {
 	int ret;
 
 	ret = __bfs_forwards(src_entry, (void *)target, class_equal,
-			     target_entry);
+			     target_entry, init);
 
 	if (unlikely(ret < 0))
 		print_bfs_bug(ret);
@@ -1951,7 +1952,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
 
 	debug_atomic_inc(nr_cyclic_checks);
 
-	ret = check_path(hlock_class(target), &src_entry, &target_entry);
+	ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
 
 	if (unlikely(!ret)) {
 		if (!*trace) {
@@ -2012,7 +2013,7 @@ static inline int usage_match_backward(struct lock_list *entry, void *mask)
 	debug_atomic_inc(nr_find_usage_forwards_checks);
 
 	result = __bfs_forwards(root, &usage_mask, usage_match_forward,
-				target_entry);
+				target_entry, true);
 
 	return result;
 }
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ