lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251123010157.2076095-1-longman@redhat.com>
Date: Sat, 22 Nov 2025 20:01:57 -0500
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...hat.com>,
	Will Deacon <will@...nel.org>,
	Boqun Feng <boqun.feng@...il.com>
Cc: linux-kernel@...r.kernel.org,
	Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
	Waiman Long <longman@...hat.com>
Subject: [PATCH] locking/rwsem: Redo __rwsem_init()

This is a rwsem version of the Sebastian's commit 3572e2edc7b6
("locking/mutex: Redo __mutex_init()"). The aim is to avoid storing
the lock name strings which consume memory that are never used.

The __rwsem_init() will be split into a _generic version with lockdep
(CONFIG_DEBUG_LOCK_ALLOC) disabled and a _lockdep version when it
is enabled. This will make the rwsem code behavior similar to the
mutex code.

This results in the following kernel size reduction with defconfig
based config files:

   text	      data      bss       dec    filename
  30200741  8165526   1188464  39554731  vmlinux
  30200725  8161430   1188464  39550619  vmlinux (patched)

  32417939  8475218  12946524  53839681  vmlinux.lockdep
  32417945  8475218  12946524  53839687  vmlinux.lockdep (patched)

  27119989  7195918   2075852  36391759  vmlinux.preempt_rt
  27119996  7195918   2075852  36391766  vmlinux.preempt_rt (patched)

  29349572  7509862  13792416  50651850  vmlinux.preempt_rt.lockdep

The kernel size reduction for the lockdep disabled cases isn't that
noticeable when compared with the mutex case as there is an order
of magnitude more mutex_init() calls than the init_rwsem() calls in
the kernel.

Signed-off-by: Waiman Long <longman@...hat.com>
---
 include/linux/rwsem.h  | 45 +++++++++++++++++++++++++++++++++------
 kernel/locking/rwsem.c | 48 ++++++++++++++++++++++++++++--------------
 2 files changed, 71 insertions(+), 22 deletions(-)

diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f1aaf676a874..4045ddf2b698 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -111,15 +111,32 @@ static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-			 struct lock_class_key *key);
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+extern void rwsem_init_generic(struct rw_semaphore *sem);
+
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+				struct lock_class_key *key)
+{
+	rwsem_init_generic(sem);
+}
+
+#define init_rwsem(sem)	rwsem_init_generic(sem)
+#else
+extern void rwsem_init_lockdep(struct rw_semaphore *sem, const char *name,
+			       struct lock_class_key *key);
+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
+				struct lock_class_key *key)
+{
+	rwsem_init_lockdep(sem, name, key);
+}
 
 #define init_rwsem(sem)						\
 do {								\
 	static struct lock_class_key __key;			\
 								\
-	__init_rwsem((sem), #sem, &__key);			\
+	rwsem_init_lockdep((sem), #sem, &__key);		\
 } while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 /*
  * This is the same regardless of which rwsem implementation that is being used.
@@ -164,15 +181,31 @@ struct rw_semaphore {
 #define DECLARE_RWSEM(lockname) \
 	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
 
-extern void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
-			  struct lock_class_key *key);
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+extern void  rwsem_rt_init_generic(struct rw_semaphore *rwsem);
+static inline void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
+				 struct lock_class_key *key)
+{
+	rwsem_rt_init_generic(rwsem);
+}
+
+#define init_rwsem(sem)	rwsem_rt_init_generic(sem)
+#else
+extern void rwsem_rt_init_lockdep(struct rw_semaphore *rwsem, const char *name,
+				  struct lock_class_key *key);
+static inline void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
+				 struct lock_class_key *key)
+{
+	rwsem_rt_init_lockdep(rwsem, name, key);
+}
 
 #define init_rwsem(sem)						\
 do {								\
 	static struct lock_class_key __key;			\
 								\
-	__init_rwsem((sem), #sem, &__key);			\
+	rwsem_rt_init_lockdep((sem), #sem, &__key);		\
 } while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
 {
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 24df4d98f7d2..ffb09e0520c9 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -306,16 +306,8 @@ rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
 /*
  * Initialize an rwsem:
  */
-void __init_rwsem(struct rw_semaphore *sem, const char *name,
-		  struct lock_class_key *key)
+static inline void __rwsem_init_generic(struct rw_semaphore *sem)
 {
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	/*
-	 * Make sure we are not reinitializing a held semaphore:
-	 */
-	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
-	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
-#endif
 #ifdef CONFIG_DEBUG_RWSEMS
 	sem->magic = sem;
 #endif
@@ -327,7 +319,26 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
 	osq_lock_init(&sem->osq);
 #endif
 }
-EXPORT_SYMBOL(__init_rwsem);
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+void rwsem_init_generic(struct rw_semaphore *sem)
+{
+	__rwsem_init_generic(sem);
+}
+EXPORT_SYMBOL(rwsem_init_generic);
+#else
+void rwsem_init_lockdep(struct rw_semaphore *sem, const char *name,
+			struct lock_class_key *key)
+{
+	/*
+	 * Make sure we are not reinitializing a held semaphore:
+	 */
+	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
+	__rwsem_init_generic(sem);
+}
+EXPORT_SYMBOL(rwsem_init_lockdep);
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 enum rwsem_waiter_type {
 	RWSEM_WAITING_FOR_WRITE,
@@ -1449,17 +1460,22 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 
 #include "rwbase_rt.c"
 
-void __init_rwsem(struct rw_semaphore *sem, const char *name,
-		  struct lock_class_key *key)
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+void rwsem_rt_init_generic(struct rw_semaphore *sem)
+{
+	init_rwbase_rt(&(sem)->rwbase);
+}
+EXPORT_SYMBOL(rwsem_rt_init_generic);
+#else
+void rwsem_rt_init_lockdep(struct rw_semaphore *sem, const char *name,
+			   struct lock_class_key *key)
 {
 	init_rwbase_rt(&(sem)->rwbase);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
-#endif
 }
-EXPORT_SYMBOL(__init_rwsem);
+EXPORT_SYMBOL(rwsem_rt_init_lockdep);
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 static inline void __down_read(struct rw_semaphore *sem)
 {
-- 
2.51.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ