lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190911150537.19527-4-longman@redhat.com>
Date:   Wed, 11 Sep 2019 16:05:35 +0100
From:   Waiman Long <longman@...hat.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Will Deacon <will.deacon@....com>,
        Alexander Viro <viro@...iv.linux.org.uk>,
        Mike Kravetz <mike.kravetz@...cle.com>
Cc:     linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
        linux-mm@...ck.org, Davidlohr Bueso <dave@...olabs.net>,
        Waiman Long <longman@...hat.com>
Subject: [PATCH 3/5] locking/osq: Allow early break from OSQ

The current osq_lock() function will spin until it gets the lock or
when its time slice has been used up. There may be other reasons that
a task may want to back out from the OSQ before getting the lock. This
patch extends the osq_lock() function by adding two new arguments - a
break function pointer and its argument.  That break function will be
called, if defined, in each iteration of the loop to see if it should
break out early.

The optimistic_spin_node structure in osq_lock.h isn't needed by callers,
so it is moved into osq_lock.c.

Signed-off-by: Waiman Long <longman@...hat.com>
---
 include/linux/osq_lock.h  | 13 ++-----------
 kernel/locking/mutex.c    |  2 +-
 kernel/locking/osq_lock.c | 12 +++++++++++-
 kernel/locking/rwsem.c    |  2 +-
 4 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 5581dbd3bd34..161eb6b26d6d 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -2,16 +2,6 @@
 #ifndef __LINUX_OSQ_LOCK_H
 #define __LINUX_OSQ_LOCK_H
 
-/*
- * An MCS like lock especially tailored for optimistic spinning for sleeping
- * lock implementations (mutex, rwsem, etc).
- */
-struct optimistic_spin_node {
-	struct optimistic_spin_node *next, *prev;
-	int locked; /* 1 if lock acquired */
-	int cpu; /* encoded CPU # + 1 value */
-};
-
 struct optimistic_spin_queue {
 	/*
 	 * Stores an encoded value of the CPU # of the tail node in the queue.
@@ -30,7 +20,8 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
 	atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
 }
 
-extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern bool osq_lock(struct optimistic_spin_queue *lock,
+		     bool (*break_fn)(void *), void *break_arg);
 extern void osq_unlock(struct optimistic_spin_queue *lock);
 
 static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 468a9b8422e3..8a1df82fd71a 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -654,7 +654,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 		 * acquire the mutex all at once, the spinners need to take a
 		 * MCS (queued) lock first before spinning on the owner field.
 		 */
-		if (!osq_lock(&lock->osq))
+		if (!osq_lock(&lock->osq, NULL, NULL))
 			goto fail;
 	}
 
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 6ef600aa0f47..40c94380a485 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -11,6 +11,12 @@
  * called from interrupt context and we have preemption disabled while
  * spinning.
  */
+struct optimistic_spin_node {
+	struct optimistic_spin_node *next, *prev;
+	int locked; /* 1 if lock acquired */
+	int cpu; /* encoded CPU # + 1 value */
+};
+
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
 
 /*
@@ -87,7 +93,8 @@ osq_wait_next(struct optimistic_spin_queue *lock,
 	return next;
 }
 
-bool osq_lock(struct optimistic_spin_queue *lock)
+bool osq_lock(struct optimistic_spin_queue *lock,
+	      bool (*break_fn)(void *), void *break_arg)
 {
 	struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
 	struct optimistic_spin_node *prev, *next;
@@ -143,6 +150,9 @@ bool osq_lock(struct optimistic_spin_queue *lock)
 		if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
 			goto unqueue;
 
+		if (unlikely(break_fn) && break_fn(break_arg))
+			goto unqueue;
+
 		cpu_relax();
 	}
 	return true;
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 49f052d68404..c15926ecb21e 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -807,7 +807,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock,
 	preempt_disable();
 
 	/* sem->wait_lock should not be held when doing optimistic spinning */
-	if (!osq_lock(&sem->osq))
+	if (!osq_lock(&sem->osq, NULL, NULL))
 		goto done;
 
 	curtime = timeout ? sched_clock() : 0;
-- 
2.18.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ