[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <68111ac5.810a0220.2b20a.1d32@mx.google.com>
Date: Tue, 29 Apr 2025 11:30:26 -0700
From: Boqun Feng <boqun.feng@...il.com>
To: Sasha Levin <sashal@...nel.org>
Cc: peterz@...radead.org, mingo@...hat.com, will@...nel.org,
longman@...hat.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/4] locking/lockdep: Use hashtable.h for lock_keys_hash
Hi Sasha,
Thanks for the patches. I want to let you know that we are currently
doing some changes on the hashlist usage in lockdep:
https://lore.kernel.org/lkml/20250414060055.341516-1-boqun.feng@gmail.com/
so, it may take a while for me to back looking into this (until that get
sorted).
Regards,
Boqun
On Thu, Mar 20, 2025 at 10:31:17AM -0400, Sasha Levin wrote:
> Convert the lock_keys_hash array in lockdep.c to use the generic
> hashtable implementation from hashtable.h instead of the manual
> hlist_head array implementation.
>
> This simplifies the code and makes it more maintainable by using the
> standard hashtable API defined in hashtable.h, while preserving the
> RCU-safe behavior with proper RCU variants of the hashtable functions.
>
> Signed-off-by: Sasha Levin <sashal@...nel.org>
> ---
> kernel/locking/lockdep.c | 34 ++++++++++++++++------------------
> 1 file changed, 16 insertions(+), 18 deletions(-)
>
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 4470680f02269..160cf8310eda0 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -57,6 +57,7 @@
> #include <linux/lockdep.h>
> #include <linux/context_tracking.h>
> #include <linux/console.h>
> +#include <linux/hashtable.h>
>
> #include <asm/sections.h>
>
> @@ -212,8 +213,7 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
> * in use.
> */
> #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
> -#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
> -static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
> +static DEFINE_HASHTABLE(lock_keys_hash, KEYHASH_BITS);
> unsigned long nr_lock_classes;
> unsigned long nr_zapped_classes;
> unsigned long max_lock_class_idx;
> @@ -1209,32 +1209,28 @@ static void init_data_structures_once(void)
> init_chain_block_buckets();
> }
>
> -static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
> -{
> - unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
> -
> - return lock_keys_hash + hash;
> -}
>
> /* Register a dynamically allocated key. */
> void lockdep_register_key(struct lock_class_key *key)
> {
> - struct hlist_head *hash_head;
> struct lock_class_key *k;
> + unsigned long hash;
> unsigned long flags;
>
> if (WARN_ON_ONCE(static_obj(key)))
> return;
> - hash_head = keyhashentry(key);
> +
> + hash = hash_long((uintptr_t)key, KEYHASH_BITS);
>
> raw_local_irq_save(flags);
> if (!graph_lock())
> goto restore_irqs;
> - hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
> +
> + hash_for_each_possible_rcu(lock_keys_hash, k, hash_entry, hash) {
> if (WARN_ON_ONCE(k == key))
> goto out_unlock;
> }
> - hlist_add_head_rcu(&key->hash_entry, hash_head);
> + hash_add_rcu(lock_keys_hash, &key->hash_entry, hash);
> out_unlock:
> graph_unlock();
> restore_irqs:
> @@ -1245,8 +1241,8 @@ EXPORT_SYMBOL_GPL(lockdep_register_key);
> /* Check whether a key has been registered as a dynamic key. */
> static bool is_dynamic_key(const struct lock_class_key *key)
> {
> - struct hlist_head *hash_head;
> struct lock_class_key *k;
> + unsigned long hash;
> bool found = false;
>
> if (WARN_ON_ONCE(static_obj(key)))
> @@ -1260,10 +1256,10 @@ static bool is_dynamic_key(const struct lock_class_key *key)
> if (!debug_locks)
> return true;
>
> - hash_head = keyhashentry(key);
> + hash = hash_long((uintptr_t)key, KEYHASH_BITS);
>
> rcu_read_lock();
> - hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
> + hash_for_each_possible_rcu(lock_keys_hash, k, hash_entry, hash) {
> if (k == key) {
> found = true;
> break;
> @@ -6561,9 +6557,9 @@ void lockdep_reset_lock(struct lockdep_map *lock)
> */
> void lockdep_unregister_key(struct lock_class_key *key)
> {
> - struct hlist_head *hash_head = keyhashentry(key);
> struct lock_class_key *k;
> struct pending_free *pf;
> + unsigned long hash;
> unsigned long flags;
> bool found = false;
> bool need_callback = false;
> @@ -6573,12 +6569,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
> if (WARN_ON_ONCE(static_obj(key)))
> return;
>
> + hash = hash_long((uintptr_t)key, KEYHASH_BITS);
> +
> raw_local_irq_save(flags);
> lockdep_lock();
>
> - hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
> + hash_for_each_possible(lock_keys_hash, k, hash_entry, hash) {
> if (k == key) {
> - hlist_del_rcu(&k->hash_entry);
> + hash_del_rcu(&k->hash_entry);
> found = true;
> break;
> }
> --
> 2.39.5
>
Powered by blists - more mailing lists