lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 17 Apr 2014 11:04:03 -0400
From:	Waiman Long <Waiman.Long@...com>
To:	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Peter Zijlstra <peterz@...radead.org>
Cc:	linux-arch@...r.kernel.org, x86@...nel.org,
	linux-kernel@...r.kernel.org,
	virtualization@...ts.linux-foundation.org,
	xen-devel@...ts.xenproject.org, kvm@...r.kernel.org,
	Paolo Bonzini <paolo.bonzini@...il.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	Rik van Riel <riel@...hat.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Raghavendra K T <raghavendra.kt@...ux.vnet.ibm.com>,
	David Vrabel <david.vrabel@...rix.com>,
	Oleg Nesterov <oleg@...hat.com>,
	Gleb Natapov <gleb@...hat.com>,
	Scott J Norton <scott.norton@...com>,
	Chegu Vinod <chegu_vinod@...com>,
	Waiman Long <Waiman.Long@...com>
Subject: [PATCH v9 11/19] qspinlock: Split the MCS queuing code into a separate slowerpath

With the pending addition of more codes to support unfair lock and
PV spinlock, the complexity of the slowpath function increases to
the point that the number of scratch-pad registers in the x86-64
architecture is not enough and so those additional non-scratch-pad
registers will need to be used. This has the downside of requiring
saving and restoring of those registers in the prolog and epilog of
the slowpath function slowing down the nominally faster pending bit
and trylock code path at the beginning of the slowpath function.

This patch separates out the actual MCS queuing code into a slowerpath
function. This avoids the slow down of the pending bit and trylock
code path at the expense of a little bit of additional overhead to
the MCS queuing code path.

Signed-off-by: Waiman Long <Waiman.Long@...com>
---
 kernel/locking/qspinlock.c |  111 ++++++++++++++++++++++++++------------------
 1 files changed, 65 insertions(+), 46 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 20e3fa6..954b8b3 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -340,53 +340,22 @@ static inline int trylock_pending(struct qspinlock *lock, u32 *pval)
 }
 
 /**
- * queue_spin_lock_slowpath - acquire the queue spinlock
+ * queue_spin_lock_slowerpath - a slower path for acquiring queue spinlock
  * @lock: Pointer to queue spinlock structure
- * @val: Current value of the queue spinlock 32-bit word
- *
- * (queue tail, pending bit, lock bit)
- *
- *              fast     :    slow                                  :    unlock
- *                       :                                          :
- * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
- *                       :       | ^--------.------.             /  :
- *                       :       v           \      \            |  :
- * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
- *                       :       | ^--'              |           |  :
- *                       :       v                   |           |  :
- * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
- *   queue               :       | ^--'                          |  :
- *                       :       v                               |  :
- * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
- *   queue               :         ^--'                             :
+ * @node: Pointer to the queue node
+ * @tail: The tail code
  *
+ * The reason for splitting a slowerpath from slowpath is to avoid the
+ * unnecessary overhead of non-scratch pad register pushing and popping
+ * due to increased complexity with unfair and PV spinlock from slowing
+ * down the nominally faster pending bit and trylock code path. So this
+ * function is not inlined.
  */
-void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+static noinline void
+queue_spin_lock_slowerpath(struct qspinlock *lock, struct qnode *node, u32 tail)
 {
-	struct qnode *prev, *next, *node;
-	u32 old, tail;
-	int idx;
-
-	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
-
-	if (trylock_pending(lock, &val))
-		return;	/* Lock acquired */
-
-	node = this_cpu_ptr(&qnodes[0]);
-	idx = node->mcs.count++;
-	tail = encode_tail(smp_processor_id(), idx);
-
-	node += idx;
-	node->qhead = 0;
-	node->mcs.next = NULL;
-
-	/*
-	 * We touched a (possibly) cold cacheline; attempt the trylock once
-	 * more in the hope someone let go while we weren't watching as long
-	 * as no one was queuing.
-	 */
-	if (!(val & _Q_TAIL_MASK) && queue_spin_trylock(lock))
-		goto release;
+	struct qnode *prev, *next;
+	u32 old, val;
 
 	/*
 	 * we already touched the queueing cacheline; don't bother with pending
@@ -443,7 +412,7 @@ retry_queue_wait:
 		}
 		old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
 		if (old == val)
-			goto release;	/* No contention */
+			return;	/* No contention */
 		else if (old & _Q_LOCKED_MASK)
 			goto retry_queue_wait;
 
@@ -451,14 +420,64 @@ retry_queue_wait:
 	}
 
 	/*
-	 * contended path; wait for next, release.
+	 * contended path; wait for next, return.
 	 */
 	while (!(next = (struct qnode *)ACCESS_ONCE(node->mcs.next)))
 		arch_mutex_cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->qhead);
+}
+
+/**
+ * queue_spin_lock_slowpath - acquire the queue spinlock
+ * @lock: Pointer to queue spinlock structure
+ * @val: Current value of the queue spinlock 32-bit word
+ *
+ * (queue tail, pending bit, lock bit)
+ *
+ *              fast     :    slow                                  :    unlock
+ *                       :                                          :
+ * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
+ *                       :       | ^--------.------.             /  :
+ *                       :       v           \      \            |  :
+ * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
+ *                       :       | ^--'              |           |  :
+ *                       :       v                   |           |  :
+ * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
+ *   queue               :       | ^--'                          |  :
+ *                       :       v                               |  :
+ * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
+ *   queue               :         ^--'                             :
+ *
+ * This slowpath only contains the faster pending bit and trylock codes.
+ * The slower queuing code is in the slowerpath function.
+ */
+void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+	struct qnode *node;
+	u32 tail, idx;
+
+	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+
+	if (trylock_pending(lock, &val))
+		return;	/* Lock acquired */
+
+	node = this_cpu_ptr(&qnodes[0]);
+	idx = node->mcs.count++;
+	tail = encode_tail(smp_processor_id(), idx);
+
+	node += idx;
+	node->qhead = 0;
+	node->mcs.next = NULL;
+
+	/*
+	 * We touched a (possibly) cold cacheline; attempt the trylock once
+	 * more in the hope someone let go while we weren't watching as long
+	 * as no one was queuing.
+	 */
+	if ((val & _Q_TAIL_MASK) || !queue_spin_trylock(lock))
+		queue_spin_lock_slowerpath(lock, node, tail);
 
-release:
 	/*
 	 * release the node
 	 */
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists