lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1401464642-33890-14-git-send-email-Waiman.Long@hp.com>
Date:	Fri, 30 May 2014 11:43:59 -0400
From:	Waiman Long <Waiman.Long@...com>
To:	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H. Peter Anvin" <hpa@...or.com>,
	Peter Zijlstra <peterz@...radead.org>
Cc:	linux-arch@...r.kernel.org, x86@...nel.org,
	linux-kernel@...r.kernel.org,
	virtualization@...ts.linux-foundation.org,
	xen-devel@...ts.xenproject.org, kvm@...r.kernel.org,
	Paolo Bonzini <paolo.bonzini@...il.com>,
	Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
	Boris Ostrovsky <boris.ostrovsky@...cle.com>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	Rik van Riel <riel@...hat.com>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Raghavendra K T <raghavendra.kt@...ux.vnet.ibm.com>,
	David Vrabel <david.vrabel@...rix.com>,
	Oleg Nesterov <oleg@...hat.com>,
	Gleb Natapov <gleb@...hat.com>,
	Scott J Norton <scott.norton@...com>,
	Chegu Vinod <chegu_vinod@...com>,
	Waiman Long <Waiman.Long@...com>
Subject: [PATCH v11 13/16] pvqspinlock: Enable coexistence with the unfair lock

This patch enables the coexistence of both the PV qspinlock and
unfair lock.  When both are enabled, however, only the lock fastpath
will perform lock stealing whereas the slowpath will have that disabled
to get the best of both features.

We also need to transition a CPU spinning too long in the pending
bit code path back to the regular queuing code path so that it can
be properly halted by the PV qspinlock code.

Signed-off-by: Waiman Long <Waiman.Long@...com>
---
 kernel/locking/qspinlock.c |   47 ++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 43 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 93c663a..8deedcf 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -57,12 +57,24 @@
 #include "mcs_spinlock.h"
 
 /*
+ * Check the pending bit spinning threshold only if PV qspinlock is enabled
+ */
+#define PSPIN_THRESHOLD		(1 << 10)
+#define MAX_NODES		4
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define pv_qspinlock_enabled()	static_key_false(&paravirt_spinlocks_enabled)
+#else
+#define pv_qspinlock_enabled()	false
+#endif
+
+/*
  * Per-CPU queue node structures; we can never have more than 4 nested
  * contexts: task, softirq, hardirq, nmi.
  *
  * Exactly fits one cacheline.
  */
-static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
 
 /*
  * We must be able to distinguish between no-tail and the tail at 0:0,
@@ -265,6 +277,9 @@ static noinline void queue_spin_lock_slowerpath(struct qspinlock *lock,
 		ACCESS_ONCE(prev->next) = node;
 
 		arch_mcs_spin_lock_contended(&node->locked);
+	} else {
+		/* Mark it as the queue head */
+		ACCESS_ONCE(node->locked) = true;
 	}
 
 	/*
@@ -344,14 +359,17 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 	struct mcs_spinlock *node;
 	u32 new, old, tail;
 	int idx;
+	int retry = INT_MAX;	/* Retry count, queue if <= 0 */
 
 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
 
 #ifdef CONFIG_VIRT_UNFAIR_LOCKS
 	/*
 	 * A simple test and set unfair lock
+	 * Disable waiter lock stealing if PV spinlock is enabled
 	 */
-	if (static_key_false(&virt_unfairlocks_enabled)) {
+	if (!pv_qspinlock_enabled() &&
+	    static_key_false(&virt_unfairlocks_enabled)) {
 		cpu_relax();	/* Relax after a failed lock attempt */
 		while (!queue_spin_trylock(lock))
 			cpu_relax();
@@ -360,6 +378,14 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 #endif /* CONFIG_VIRT_UNFAIR_LOCKS */
 
 	/*
+	 * When PV qspinlock is enabled, exit the pending bit code path and
+	 * go back to the regular queuing path if the lock isn't available
+	 * within a certain threshold.
+	 */
+	if (pv_qspinlock_enabled())
+		retry = PSPIN_THRESHOLD;
+
+	/*
 	 * trylock || pending
 	 *
 	 * 0,0,0 -> 0,0,1 ; trylock
@@ -370,7 +396,7 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 		 * If we observe that the queue is not empty or both
 		 * the pending and lock bits are set, queue
 		 */
-		if ((val & _Q_TAIL_MASK) ||
+		if ((val & _Q_TAIL_MASK) || (retry-- <= 0) ||
 		    (val == (_Q_LOCKED_VAL|_Q_PENDING_VAL)))
 			goto queue;
 
@@ -413,8 +439,21 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
 	 * sequentiality; this because not all clear_pending_set_locked()
 	 * implementations imply full barriers.
 	 */
-	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
+	while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) {
+		if (pv_qspinlock_enabled() && (retry-- <= 0)) {
+			/*
+			 * Clear the pending bit and queue
+			 */
+			for (;;) {
+				new = val & ~_Q_PENDING_MASK;
+				old = atomic_cmpxchg(&lock->val, val, new);
+				if (old == val)
+					goto queue;
+				val = old;
+			}
+		}
 		arch_mutex_cpu_relax();
+	}
 
 	/*
 	 * take ownership and clear the pending bit.
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ