lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 13 Jul 2022 17:06:56 +1000
From:   Nicholas Piggin <npiggin@...il.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     Nicholas Piggin <npiggin@...il.com>,
        Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
        Waiman Long <longman@...hat.com>,
        Boqun Feng <boqun.feng@...il.com>,
        "linux-kernel @ vger . kernel . org" <linux-kernel@...r.kernel.org>
Subject: [PATCH v2 04/12] locking/qspinlock: move pv lock word helpers into qspinlock.c

There is no real reason not to keep all the bit manipulation together.

Signed-off-by: Nicholas Piggin <npiggin@...il.com>
---
 kernel/locking/qspinlock.c          | 109 +++++++++++++++++-----------
 kernel/locking/qspinlock_paravirt.h |  51 -------------
 2 files changed, 65 insertions(+), 95 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 7360d643de29..95bf24d276c3 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -141,7 +141,26 @@ struct qnode *grab_qnode(struct qnode *base, int idx)
 
 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
 
+/**
+ * set_pending - set the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,0,* -> *,1,*
+ *
+ * For paravirt, the pending bit is used by the queue head vCPU to indicate
+ * that it is actively spinning on the lock and no lock stealing is allowed.
+ * Non-paravirt, the pending bit is used to avoid loading the extra node
+ * cacheline in the likely contended case.
+ */
+static __always_inline void set_pending(struct qspinlock *lock)
+{
 #if _Q_PENDING_BITS == 8
+	WRITE_ONCE(lock->pending, 1);
+#else
+	atomic_or(_Q_PENDING_VAL, &lock->val);
+#endif
+}
+
 /**
  * clear_pending - clear the pending bit.
  * @lock: Pointer to queued spinlock structure
@@ -150,7 +169,11 @@ struct qnode *grab_qnode(struct qnode *base, int idx)
  */
 static __always_inline void clear_pending(struct qspinlock *lock)
 {
+#if _Q_PENDING_BITS == 8
 	WRITE_ONCE(lock->pending, 0);
+#else
+	atomic_andnot(_Q_PENDING_VAL, &lock->val);
+#endif
 }
 
 /**
@@ -163,74 +186,72 @@ static __always_inline void clear_pending(struct qspinlock *lock)
  */
 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
 {
+#if _Q_PENDING_BITS == 8
 	WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+#else
+	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+#endif
 }
 
-/*
- * xchg_tail - Put in the new queue tail code word & retrieve previous one
- * @lock : Pointer to queued spinlock structure
- * @tail : The new queue tail code word
- * Return: The previous queue tail code word
- *
- * xchg(lock, tail), which heads an address dependency
- *
- * p,*,* -> n,*,* ; prev = xchg(lock, node)
- */
-static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
-{
-	/*
-	 * We can use relaxed semantics since the caller ensures that the
-	 * MCS node is properly initialized before updating the tail.
-	 */
-	return (u32)xchg_relaxed(&lock->tail,
-				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
-}
-
-#else /* _Q_PENDING_BITS == 8 */
-
 /**
- * clear_pending - clear the pending bit.
+ * trylock_clear_pending - try to take ownership and clear the pending bit
  * @lock: Pointer to queued spinlock structure
  *
- * *,1,* -> *,0,*
+ * 0,1,0 -> 0,0,1
  */
-static __always_inline void clear_pending(struct qspinlock *lock)
+static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 {
-	atomic_andnot(_Q_PENDING_VAL, &lock->val);
-}
+#if _Q_PENDING_BITS == 8
+	return !READ_ONCE(lock->locked) &&
+	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
+				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
+#else
+	int val = atomic_read(&lock->val);
 
-/**
- * clear_pending_set_locked - take ownership and clear the pending bit.
- * @lock: Pointer to queued spinlock structure
- *
- * *,1,0 -> *,0,1
- */
-static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
-{
-	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+	for (;;) {
+		int old, new;
+
+		if (val  & _Q_LOCKED_MASK)
+			break;
+
+		/*
+		 * Try to clear pending bit & set locked bit
+		 */
+		old = val;
+		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
+		val = atomic_cmpxchg_acquire(&lock->val, old, new);
+
+		if (val == old)
+			return 1;
+	}
+	return 0;
+#endif
 }
 
-/**
+/*
  * xchg_tail - Put in the new queue tail code word & retrieve previous one
  * @lock : Pointer to queued spinlock structure
  * @tail : The new queue tail code word
  * Return: The previous queue tail code word
  *
- * xchg(lock, tail)
+ * xchg(lock, tail), which heads an address dependency
  *
  * p,*,* -> n,*,* ; prev = xchg(lock, node)
  */
 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
 {
+	/*
+	 * We can use relaxed semantics since the caller ensures that the
+	 * MCS node is properly initialized before updating the tail.
+	 */
+#if _Q_PENDING_BITS == 8
+	return (u32)xchg_relaxed(&lock->tail,
+				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+#else
 	u32 old, new, val = atomic_read(&lock->val);
 
 	for (;;) {
 		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
-		/*
-		 * We can use relaxed semantics since the caller ensures that
-		 * the MCS node is properly initialized before updating the
-		 * tail.
-		 */
 		old = atomic_cmpxchg_relaxed(&lock->val, val, new);
 		if (old == val)
 			break;
@@ -238,8 +259,8 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
 		val = old;
 	}
 	return old;
+#endif
 }
-#endif /* _Q_PENDING_BITS == 8 */
 
 /**
  * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index cce3d3dde216..97385861adc2 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -95,57 +95,6 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
 	return false;
 }
 
-/*
- * The pending bit is used by the queue head vCPU to indicate that it
- * is actively spinning on the lock and no lock stealing is allowed.
- */
-#if _Q_PENDING_BITS == 8
-static __always_inline void set_pending(struct qspinlock *lock)
-{
-	WRITE_ONCE(lock->pending, 1);
-}
-
-/*
- * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
- * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
- * lock just to be sure that it will get it.
- */
-static __always_inline int trylock_clear_pending(struct qspinlock *lock)
-{
-	return !READ_ONCE(lock->locked) &&
-	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
-				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
-}
-#else /* _Q_PENDING_BITS == 8 */
-static __always_inline void set_pending(struct qspinlock *lock)
-{
-	atomic_or(_Q_PENDING_VAL, &lock->val);
-}
-
-static __always_inline int trylock_clear_pending(struct qspinlock *lock)
-{
-	int val = atomic_read(&lock->val);
-
-	for (;;) {
-		int old, new;
-
-		if (val  & _Q_LOCKED_MASK)
-			break;
-
-		/*
-		 * Try to clear pending bit & set locked bit
-		 */
-		old = val;
-		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
-		val = atomic_cmpxchg_acquire(&lock->val, old, new);
-
-		if (val == old)
-			return 1;
-	}
-	return 0;
-}
-#endif /* _Q_PENDING_BITS == 8 */
-
 /*
  * Lock and MCS node addresses hash table for fast lookup
  *
-- 
2.35.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ