lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220628081707.1997728-5-guoren@kernel.org>
Date:   Tue, 28 Jun 2022 04:17:06 -0400
From:   guoren@...nel.org
To:     palmer@...osinc.com, arnd@...db.de, mingo@...hat.com,
        will@...nel.org, longman@...hat.com, boqun.feng@...il.com
Cc:     linux-riscv@...ts.infradead.org, linux-arch@...r.kernel.org,
        linux-kernel@...r.kernel.org, Guo Ren <guoren@...ux.alibaba.com>,
        Guo Ren <guoren@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH V7 4/5] asm-generic: spinlock: Add combo spinlock (ticket & queued)

From: Guo Ren <guoren@...ux.alibaba.com>

Some architecture has a flexible requirement on the type of spinlock.
Some LL/SC architectures of ISA don't force micro-arch to give a strong
forward guarantee. Thus different kinds of memory model micro-arch would
come out in one ISA. The ticket lock is suitable for exclusive monitor
designed LL/SC micro-arch with limited cores and "!NUMA". The
queue-spinlock could deal with NUMA/large-scale scenarios with a strong
forward guarantee designed LL/SC micro-arch.

So, make the spinlock a combo with feature.

Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
Signed-off-by: Guo Ren <guoren@...nel.org>
Cc: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: Palmer Dabbelt <palmer@...osinc.com>
---
 include/asm-generic/spinlock.h | 43 ++++++++++++++++++++++++++++++++--
 kernel/locking/qspinlock.c     |  2 ++
 2 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index f41dc7c2b900..a9b43089bf99 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -28,34 +28,73 @@
 #define __ASM_GENERIC_SPINLOCK_H
 
 #include <asm-generic/ticket_spinlock.h>
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+#include <linux/jump_label.h>
+#include <asm-generic/qspinlock.h>
+
+DECLARE_STATIC_KEY_TRUE(use_qspinlock_key);
+#endif
+
+#undef arch_spin_is_locked
+#undef arch_spin_is_contended
+#undef arch_spin_value_unlocked
+#undef arch_spin_lock
+#undef arch_spin_trylock
+#undef arch_spin_unlock
 
 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
 {
-	ticket_spin_lock(lock);
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+	if (static_branch_likely(&use_qspinlock_key))
+		queued_spin_lock(lock);
+	else
+#endif
+		ticket_spin_lock(lock);
 }
 
 static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
 {
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+	if (static_branch_likely(&use_qspinlock_key))
+		return queued_spin_trylock(lock);
+#endif
 	return ticket_spin_trylock(lock);
 }
 
 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-	ticket_spin_unlock(lock);
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+	if (static_branch_likely(&use_qspinlock_key))
+		queued_spin_unlock(lock);
+	else
+#endif
+		ticket_spin_unlock(lock);
 }
 
 static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+	if (static_branch_likely(&use_qspinlock_key))
+		return queued_spin_is_locked(lock);
+#endif
 	return ticket_spin_is_locked(lock);
 }
 
 static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+	if (static_branch_likely(&use_qspinlock_key))
+		return queued_spin_is_contended(lock);
+#endif
 	return ticket_spin_is_contended(lock);
 }
 
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
+#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
+	if (static_branch_likely(&use_qspinlock_key))
+		return queued_spin_value_unlocked(lock);
+#endif
 	return ticket_spin_value_unlocked(lock);
 }
 
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 65a9a10caa6f..b7f7436f42f6 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -566,6 +566,8 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 }
 EXPORT_SYMBOL(queued_spin_lock_slowpath);
 
+DEFINE_STATIC_KEY_TRUE_RO(use_qspinlock_key);
+
 /*
  * Generate the paravirt code for queued_spin_unlock_slowpath().
  */
-- 
2.36.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ