lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20250320143120.3119975-3-sashal@kernel.org>
Date: Thu, 20 Mar 2025 10:31:19 -0400
From: Sasha Levin <sashal@...nel.org>
To: peterz@...radead.org,
	mingo@...hat.com,
	will@...nel.org,
	boqun.feng@...il.com
Cc: longman@...hat.com,
	linux-kernel@...r.kernel.org,
	Sasha Levin <sashal@...nel.org>
Subject: [PATCH 3/4] locking/lockdep: Use hashtable.h for chainhash_table

Convert chainhash_table in lockdep.c to use the generic hashtable
implementation from hashtable.h instead of the manual hlist_head array
implementation.

This simplifies the code and makes it more maintainable by using the
standard hashtable API and removes the need for custom hash lookup
functions.

Signed-off-by: Sasha Levin <sashal@...nel.org>
---
 kernel/locking/lockdep.c | 26 +++++++++-----------------
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b071bbf0d955c..151fec00f1c2c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -412,11 +412,7 @@ static DEFINE_HASHTABLE(classhash_table, CLASSHASH_BITS);
  * their existence:
  */
 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
-#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
-#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
-#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
-
-static struct hlist_head chainhash_table[CHAINHASH_SIZE];
+DEFINE_HASHTABLE(chainhash_table, CHAINHASH_BITS);
 
 /*
  * the id of held_lock
@@ -3716,7 +3712,6 @@ static inline int add_chain_cache(struct task_struct *curr,
 				  struct held_lock *hlock,
 				  u64 chain_key)
 {
-	struct hlist_head *hash_head = chainhashentry(chain_key);
 	struct lock_chain *chain;
 	int i, j;
 
@@ -3767,7 +3762,7 @@ static inline int add_chain_cache(struct task_struct *curr,
 		chain_hlocks[chain->base + j] = lock_id;
 	}
 	chain_hlocks[chain->base + j] = hlock_id(hlock);
-	hlist_add_head_rcu(&chain->entry, hash_head);
+	hash_add_rcu(chainhash_table, &chain->entry, chain_key);
 	debug_atomic_inc(chain_lookup_misses);
 	inc_chains(chain->irq_context);
 
@@ -3780,10 +3775,9 @@ static inline int add_chain_cache(struct task_struct *curr,
  */
 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
 {
-	struct hlist_head *hash_head = chainhashentry(chain_key);
 	struct lock_chain *chain;
 
-	hlist_for_each_entry_rcu(chain, hash_head, entry) {
+	hash_for_each_possible_rcu(chainhash_table, chain, entry, chain_key) {
 		if (READ_ONCE(chain->chain_key) == chain_key) {
 			debug_atomic_inc(chain_lookup_hits);
 			return chain;
@@ -6142,7 +6136,6 @@ EXPORT_SYMBOL_GPL(lock_acquired);
 void lockdep_reset(void)
 {
 	unsigned long flags;
-	int i;
 
 	raw_local_irq_save(flags);
 	lockdep_init_task(current);
@@ -6151,8 +6144,7 @@ void lockdep_reset(void)
 	nr_softirq_chains = 0;
 	nr_process_chains = 0;
 	debug_locks = 1;
-	for (i = 0; i < CHAINHASH_SIZE; i++)
-		INIT_HLIST_HEAD(chainhash_table + i);
+	hash_init(chainhash_table);
 	raw_local_irq_restore(flags);
 }
 
@@ -6183,10 +6175,10 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
 	dec_chains(chain->irq_context);
 
 	/*
-	 * Note: calling hlist_del_rcu() from inside a
-	 * hlist_for_each_entry_rcu() loop is safe.
+	 * Note: calling hash_del_rcu() from inside a
+	 * hash_for_each_rcu() loop is safe.
 	 */
-	hlist_del_rcu(&chain->entry);
+	hash_del_rcu(&chain->entry);
 	__set_bit(chain - lock_chains, pf->lock_chains_being_freed);
 	nr_zapped_lock_chains++;
 #endif
@@ -6229,7 +6221,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
 	if (list_empty(&class->locks_after) &&
 	    list_empty(&class->locks_before)) {
 		list_move_tail(&class->lock_entry, &pf->zapped);
-		hlist_del_rcu(&class->hash_entry);
+		hash_del_rcu(&class->hash_entry);
 		WRITE_ONCE(class->key, NULL);
 		WRITE_ONCE(class->name, NULL);
 		nr_lock_classes--;
@@ -6587,7 +6579,7 @@ void __init lockdep_init(void)
 	pr_info("... CLASSHASH_SIZE:          %lu\n", HASH_SIZE(classhash_table));
 	pr_info("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
 	pr_info("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
-	pr_info("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
+	pr_info("... CHAINHASH_SIZE:          %lu\n", HASH_SIZE(chainhash_table));
 
 	pr_info(" memory used by lock dependency info: %zu kB\n",
 	       (sizeof(lock_classes) +
-- 
2.39.5


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ