lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200216074636.GB14025@workstation-portable>
Date:   Sun, 16 Feb 2020 13:16:36 +0530
From:   Amol Grover <frextrite@...il.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>
Cc:     linux-kernel@...r.kernel.org,
        linux-kernel-mentees@...ts.linuxfoundation.org,
        Joel Fernandes <joel@...lfernandes.org>,
        Madhuparna Bhowmik <madhuparnabhowmik04@...il.com>,
        "Paul E . McKenney" <paulmck@...nel.org>,
        Amol Grover <frextrite@...il.com>
Subject: [PATCH RESEND] lockdep: Pass lockdep expression to RCU lists

Data is traversed using hlist_for_each_entry_rcu outside an
RCU read-side critical section but under the protection
of either lockdep_lock or with irqs disabled.

Hence, add corresponding lockdep expression to silence false-positive
lockdep warnings, and harden RCU lists. Also add macro for
corresponding lockdep expression.

Two things to note:
- RCU traversals protected under both, irqs disabled and
graph lock, have both the checks in the lockdep expression.
- RCU traversals under the protection of just disabled irqs
don't have a corresponding lockdep expression as it is implicitly
checked for.

Signed-off-by: Amol Grover <frextrite@...il.com>
---
 kernel/locking/lockdep.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32282e7112d3..696ad5d4daed 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -85,6 +85,8 @@ module_param(lock_stat, int, 0644);
  * code to recurse back into the lockdep code...
  */
 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+#define graph_lock_held() \
+	arch_spin_is_locked(&lockdep_lock)
 static struct task_struct *lockdep_selftest_task_struct;
 
 static int graph_lock(void)
@@ -1009,7 +1011,7 @@ static bool __check_data_structures(void)
 	/* Check the chain_key of all lock chains. */
 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
 		head = chainhash_table + i;
-		hlist_for_each_entry_rcu(chain, head, entry) {
+		hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
 			if (!check_lock_chain_key(chain))
 				return false;
 		}
@@ -1124,7 +1126,8 @@ void lockdep_register_key(struct lock_class_key *key)
 	raw_local_irq_save(flags);
 	if (!graph_lock())
 		goto restore_irqs;
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry,
+				 irqs_disabled() && graph_lock_held()) {
 		if (WARN_ON_ONCE(k == key))
 			goto out_unlock;
 	}
@@ -1203,7 +1206,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 	 * We have to do the hash-walk again, to avoid races
 	 * with another CPU:
 	 */
-	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(class, hash_head, hash_entry,
+				 irqs_disabled() && graph_lock_held()) {
 		if (class->key == key)
 			goto out_unlock_set;
 	}
@@ -2858,7 +2862,7 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
 	struct hlist_head *hash_head = chainhashentry(chain_key);
 	struct lock_chain *chain;
 
-	hlist_for_each_entry_rcu(chain, hash_head, entry) {
+	hlist_for_each_entry_rcu(chain, hash_head, entry, graph_lock_held()) {
 		if (READ_ONCE(chain->chain_key) == chain_key) {
 			debug_atomic_inc(chain_lookup_hits);
 			return chain;
@@ -4833,7 +4837,7 @@ static void remove_class_from_lock_chains(struct pending_free *pf,
 
 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
 		head = chainhash_table + i;
-		hlist_for_each_entry_rcu(chain, head, entry) {
+		hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
 			remove_class_from_lock_chain(pf, chain, class);
 		}
 	}
@@ -4993,7 +4997,7 @@ static void __lockdep_free_key_range(struct pending_free *pf, void *start,
 	/* Unhash all classes that were created by a module. */
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
 		head = classhash_table + i;
-		hlist_for_each_entry_rcu(class, head, hash_entry) {
+		hlist_for_each_entry_rcu(class, head, hash_entry, graph_lock_held()) {
 			if (!within(class->key, start, size) &&
 			    !within(class->name, start, size))
 				continue;
@@ -5076,7 +5080,7 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock)
 
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
 		head = classhash_table + i;
-		hlist_for_each_entry_rcu(class, head, hash_entry) {
+		hlist_for_each_entry_rcu(class, head, hash_entry, graph_lock_held()) {
 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
 				if (lock->class_cache[j] == class)
 					return true;
@@ -5181,7 +5185,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
 		goto out_irq;
 
 	pf = get_pending_free();
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry,
+				 irqs_disabled() && graph_lock_held()) {
 		if (k == key) {
 			hlist_del_rcu(&k->hash_entry);
 			found = true;
-- 
2.24.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ