lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 24 Apr 2019 18:19:33 +0800
From:   Yuyang Du <duyuyang@...il.com>
To:     peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc:     bvanassche@....org, ming.lei@...hat.com, frederic@...nel.org,
        tglx@...utronix.de, linux-kernel@...r.kernel.org,
        Yuyang Du <duyuyang@...il.com>
Subject: [PATCH 27/28] locking/lockdep: Remove locks_before

Since the backward dependencies are always empty, remove the
locks_before field in lock_class struct and its occurrences.

Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
 include/linux/lockdep.h  |  5 ++---
 kernel/locking/lockdep.c | 15 +++------------
 2 files changed, 5 insertions(+), 15 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 0e209b8..d0a587c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -97,10 +97,9 @@ struct lock_class {
 
 	/*
 	 * These fields represent a directed graph of lock dependencies,
-	 * to every node we attach a list of "forward" and a list of
-	 * "backward" graph nodes.
+	 * to every node we attach a list of "forward" graph nodes.
 	 */
-	struct list_head		locks_after, locks_before;
+	struct list_head		locks_after;
 
 	struct lockdep_subclass_key	*key;
 	unsigned int			subclass;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index acaa3b3..fa6611e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -823,8 +823,7 @@ static bool in_list(struct list_head *e, struct list_head *h)
 }
 
 /*
- * Check whether entry @e occurs in any of the locks_after or locks_before
- * lists.
+ * Check whether entry @e occurs in any of the locks_after list.
  */
 static bool in_any_class_list(struct list_head *e)
 {
@@ -833,8 +832,7 @@ static bool in_any_class_list(struct list_head *e)
 
 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
 		class = &lock_classes[i];
-		if (in_list(e, &class->locks_after) ||
-		    in_list(e, &class->locks_before))
+		if (in_list(e, &class->locks_after))
 			return true;
 	}
 	return false;
@@ -922,8 +920,6 @@ static bool __check_data_structures(void)
 	/* Check whether all classes have valid lock lists. */
 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
 		class = &lock_classes[i];
-		if (!class_lock_list_valid(class, &class->locks_before))
-			return false;
 		if (!class_lock_list_valid(class, &class->locks_after))
 			return false;
 	}
@@ -1021,7 +1017,6 @@ static void init_data_structures_once(void)
 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
 		list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
 		INIT_LIST_HEAD(&lock_classes[i].locks_after);
-		INIT_LIST_HEAD(&lock_classes[i].locks_before);
 	}
 }
 
@@ -1151,7 +1146,6 @@ static bool is_dynamic_key(const struct lock_class_key *key)
 	class->key = key;
 	class->name = lock->name;
 	class->subclass = subclass;
-	WARN_ON_ONCE(!list_empty(&class->locks_before));
 	WARN_ON_ONCE(!list_empty(&class->locks_after));
 	class->name_version = count_matching_names(class);
 	for (i = 0; i < ARRAY_SIZE(class->irqsafe_distance); i++)
@@ -4798,8 +4792,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
 		nr_list_entries--;
 		list_del_rcu(&entry->entry);
 	}
-	if (list_empty(&class->locks_after) &&
-	    list_empty(&class->locks_before)) {
+	if (list_empty(&class->locks_after)) {
 		list_move_tail(&class->lock_entry, &pf->zapped);
 		hlist_del_rcu(&class->hash_entry);
 		WRITE_ONCE(class->key, NULL);
@@ -4822,11 +4815,9 @@ static void reinit_class(struct lock_class *class)
 
 	WARN_ON_ONCE(!class->lock_entry.next);
 	WARN_ON_ONCE(!list_empty(&class->locks_after));
-	WARN_ON_ONCE(!list_empty(&class->locks_before));
 	memset(p + offset, 0, sizeof(*class) - offset);
 	WARN_ON_ONCE(!class->lock_entry.next);
 	WARN_ON_ONCE(!list_empty(&class->locks_after));
-	WARN_ON_ONCE(!list_empty(&class->locks_before));
 }
 
 static inline int within(const void *addr, void *start, unsigned long size)
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ