[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210409025131.4114078-4-willy@infradead.org>
Date: Fri, 9 Apr 2021 03:51:17 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: neilb@...e.de, peterz@...radead.org, mingo@...hat.com,
will@...nel.org, longman@...hat.com, boqun.feng@...il.com,
tglx@...utronix.de, bigeasy@...utronix.de
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 03/17] bit_spinlock: Prepare for split_locks
Make bit_spin_lock() and variants variadic to help with the transition.
The split_lock parameter will become mandatory at the end of the series.
Also add bit_spin_lock_nested() and bit_spin_unlock_assign() which will
both be used by the rhashtable code later.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
include/linux/bit_spinlock.h | 43 ++++++++++++++++++++++++++++++++----
1 file changed, 39 insertions(+), 4 deletions(-)
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index bbc4730a6505..6c5bbb55b334 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -6,6 +6,7 @@
#include <linux/preempt.h>
#include <linux/atomic.h>
#include <linux/bug.h>
+#include <linux/split_lock.h>
/*
* bit-based spin_lock()
@@ -13,7 +14,8 @@
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static inline void bit_spin_lock_nested(int bitnum, unsigned long *addr,
+ struct split_lock *lock, unsigned int subclass)
{
/*
* Assuming the lock is uncontended, this never enters
@@ -35,10 +37,27 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
__acquire(bitlock);
}
+static inline void bit_spin_lock(int bitnum, unsigned long *addr,
+ ...)
+{
+ preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
+ preempt_enable();
+ do {
+ cpu_relax();
+ } while (test_bit(bitnum, addr));
+ preempt_disable();
+ }
+#endif
+ __acquire(bitlock);
+}
+
/*
* Return true if it was acquired
*/
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+static inline int bit_spin_trylock(int bitnum, unsigned long *addr,
+ ...)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -54,7 +73,8 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
/*
* bit-based spin_unlock()
*/
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+static inline void bit_spin_unlock(int bitnum, unsigned long *addr,
+ ...)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -71,7 +91,8 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
-static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+static inline void __bit_spin_unlock(int bitnum, unsigned long *addr,
+ ...)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -83,6 +104,20 @@ static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
__release(bitlock);
}
+/**
+ * bit_spin_unlock_assign - Unlock a bitlock by assignment of new value.
+ * @addr: Address to assign the value to.
+ * @val: New value to assign.
+ * @lock: Split lock that this bitlock is part of.
+ */
+static inline void bit_spin_unlock_assign(unsigned long *addr,
+ unsigned long val, struct split_lock *lock)
+{
+ smp_store_release(addr, val);
+ preempt_enable();
+ __release(bitlock);
+}
+
/*
* Return true if the lock is held.
*/
--
2.30.2
Powered by blists - more mailing lists