lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250320143120.3119975-4-sashal@kernel.org>
Date: Thu, 20 Mar 2025 10:31:20 -0400
From: Sasha Levin <sashal@...nel.org>
To: peterz@...radead.org,
	mingo@...hat.com,
	will@...nel.org,
	boqun.feng@...il.com
Cc: longman@...hat.com,
	linux-kernel@...r.kernel.org,
	Sasha Levin <sashal@...nel.org>
Subject: [PATCH 4/4] locking/lockdep: Use hashtable.h for stack_trace_hash

Convert stack_trace_hash in lockdep.c to use the generic hashtable
implementation from hashtable.h instead of the manual hlist_head array
implementation.

This simplifies the code and makes it more maintainable by using the
standard hashtable API and removes the need for custom hash lookup
functions.

Signed-off-by: Sasha Levin <sashal@...nel.org>
---
 kernel/locking/lockdep.c | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 151fec00f1c2c..987b81f2ddb77 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -541,7 +541,8 @@ struct lock_trace {
  * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
  */
 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
-static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
+#define STACK_TRACE_HASH_BITS (ilog2(STACK_TRACE_HASH_SIZE))
+static DEFINE_HASHTABLE(stack_trace_hash, STACK_TRACE_HASH_BITS);
 
 static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
 {
@@ -553,7 +554,6 @@ static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
 static struct lock_trace *save_trace(void)
 {
 	struct lock_trace *trace, *t2;
-	struct hlist_head *hash_head;
 	u32 hash;
 	int max_entries;
 
@@ -580,13 +580,13 @@ static struct lock_trace *save_trace(void)
 	hash = jhash(trace->entries, trace->nr_entries *
 		     sizeof(trace->entries[0]), 0);
 	trace->hash = hash;
-	hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
-	hlist_for_each_entry(t2, hash_head, hash_entry) {
+
+	hash_for_each_possible(stack_trace_hash, t2, hash_entry, hash)
 		if (traces_identical(trace, t2))
 			return t2;
-	}
+
 	nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
-	hlist_add_head(&trace->hash_entry, hash_head);
+	hash_add(stack_trace_hash, &trace->hash_entry, hash);
 
 	return trace;
 }
@@ -598,11 +598,8 @@ u64 lockdep_stack_trace_count(void)
 	u64 c = 0;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
-		hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
-			c++;
-		}
-	}
+	hash_for_each(stack_trace_hash, i, trace, hash_entry)
+		c++;
 
 	return c;
 }
@@ -613,7 +610,7 @@ u64 lockdep_stack_hash_count(void)
 	u64 c = 0;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
+	for (i = 0; i < HASH_SIZE(stack_trace_hash); i++)
 		if (!hlist_empty(&stack_trace_hash[i]))
 			c++;
 
-- 
2.39.5


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ