[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250203175939.3133477-2-bvanassche@acm.org>
Date: Mon, 3 Feb 2025 09:59:35 -0800
From: Bart Van Assche <bvanassche@....org>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: Peter Zijlstra <peterz@...radead.org>,
linux-kernel@...r.kernel.org,
Bart Van Assche <bvanassche@....org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>
Subject: [PATCH 1/5] lockdep: Introduce lockdep_unregister_key_nosync()
Add a variant of lockdep_unregister_key() that doesn't sleep and hence
that may be called from inside RCU callbacks.
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Will Deacon <will@...nel.org>
Cc: Waiman Long <longman@...hat.com>
Signed-off-by: Bart Van Assche <bvanassche@....org>
---
include/linux/lockdep.h | 5 +++++
kernel/locking/lockdep.c | 22 +++++++++++++++++++---
2 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 67964dc4db95..afb3f0ec7304 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -117,6 +117,7 @@ do { \
} while (0)
extern void lockdep_register_key(struct lock_class_key *key);
+extern void lockdep_unregister_key_nosync(struct lock_class_key *key);
extern void lockdep_unregister_key(struct lock_class_key *key);
/*
@@ -372,6 +373,10 @@ static inline void lockdep_register_key(struct lock_class_key *key)
{
}
+static inline void lockdep_unregister_key_nosync(struct lock_class_key *key)
+{
+}
+
static inline void lockdep_unregister_key(struct lock_class_key *key)
{
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4470680f0226..6e0423df9ebe 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -6558,8 +6558,11 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Unlike lockdep_register_key(), a search is always done to find a matching
* key irrespective of debug_locks to avoid potential invalid access to freed
* memory in lock_class entry.
+ *
+ * Does not call synchronize_rcu(). The caller is responsible for making sure
+ * that memory is only freed after concurrent accesses have finished.
*/
-void lockdep_unregister_key(struct lock_class_key *key)
+void lockdep_unregister_key_nosync(struct lock_class_key *key)
{
struct hlist_head *hash_head = keyhashentry(key);
struct lock_class_key *k;
@@ -6568,8 +6571,6 @@ void lockdep_unregister_key(struct lock_class_key *key)
bool found = false;
bool need_callback = false;
- might_sleep();
-
if (WARN_ON_ONCE(static_obj(key)))
return;
@@ -6594,6 +6595,21 @@ void lockdep_unregister_key(struct lock_class_key *key)
if (need_callback)
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+}
+EXPORT_SYMBOL_GPL(lockdep_unregister_key_nosync);
+
+/*
+ * Unregister a dynamically allocated key.
+ *
+ * Unlike lockdep_register_key(), a search is always done to find a matching
+ * key irrespective of debug_locks to avoid potential invalid access to freed
+ * memory in lock_class entry.
+ */
+void lockdep_unregister_key(struct lock_class_key *key)
+{
+ might_sleep();
+
+ lockdep_unregister_key_nosync(key);
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
Powered by blists - more mailing lists