[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190428155755.14267-8-longman@redhat.com>
Date: Sun, 28 Apr 2019 11:57:42 -0400
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
Davidlohr Bueso <dave@...olabs.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
huang ying <huang.ying.caritas@...il.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH-tip v6 07/20] locking/rwsem: Make rwsem_spin_on_owner() return owner state
This patch modifies rwsem_spin_on_owner() to return four possible
values to better reflect the state of lock holder which enables us to
make a better decision of what to do next.
Signed-off-by: Waiman Long <longman@...hat.com>
---
kernel/locking/rwsem.c | 65 ++++++++++++++++++++++++++++++------------
1 file changed, 47 insertions(+), 18 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 23e3aed2173a..979770be9529 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -415,17 +415,54 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
}
/*
- * Return true only if we can still spin on the owner field of the rwsem.
+ * The rwsem_spin_on_owner() function returns the folowing 4 values
+ * depending on the lock owner state.
+ * OWNER_NULL : owner is currently NULL
+ * OWNER_WRITER: when owner changes and is a writer
+ * OWNER_READER: when owner changes and the new owner may be a reader.
+ * OWNER_NONSPINNABLE:
+ * when optimistic spinning has to stop because either the
+ * owner stops running, is unknown, or its timeslice has
+ * been used up.
*/
-static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+enum owner_state {
+ OWNER_NULL = 1 << 0,
+ OWNER_WRITER = 1 << 1,
+ OWNER_READER = 1 << 2,
+ OWNER_NONSPINNABLE = 1 << 3,
+};
+#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER)
+
+static inline enum owner_state rwsem_owner_state(unsigned long owner)
{
- struct task_struct *owner = READ_ONCE(sem->owner);
+ if (!owner)
+ return OWNER_NULL;
- if (!is_rwsem_owner_spinnable(owner))
- return false;
+ if (owner & RWSEM_ANONYMOUSLY_OWNED)
+ return OWNER_NONSPINNABLE;
+
+ if (owner & RWSEM_READER_OWNED)
+ return OWNER_READER;
+
+ return OWNER_WRITER;
+}
+
+static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
+{
+ struct task_struct *tmp, *owner = READ_ONCE(sem->owner);
+ enum owner_state state = rwsem_owner_state((unsigned long)owner);
+
+ if (state != OWNER_WRITER)
+ return state;
rcu_read_lock();
- while (owner && (READ_ONCE(sem->owner) == owner)) {
+ for (;;) {
+ tmp = READ_ONCE(sem->owner);
+ if (tmp != owner) {
+ state = rwsem_owner_state((unsigned long)tmp);
+ break;
+ }
+
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking sem->owner still matches owner, if that fails,
@@ -434,24 +471,16 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
*/
barrier();
- /*
- * abort spinning when need_resched or owner is not running or
- * owner's cpu is preempted.
- */
if (need_resched() || !owner_on_cpu(owner)) {
- rcu_read_unlock();
- return false;
+ state = OWNER_NONSPINNABLE;
+ break;
}
cpu_relax();
}
rcu_read_unlock();
- /*
- * If there is a new owner or the owner is not set, we continue
- * spinning.
- */
- return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
+ return state;
}
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
@@ -474,7 +503,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* 2) readers own the lock as we can't determine if they are
* actively running or not.
*/
- while (rwsem_spin_on_owner(sem)) {
+ while (rwsem_spin_on_owner(sem) & OWNER_SPINNABLE) {
/*
* Try to acquire the lock
*/
--
2.18.1
Powered by blists - more mailing lists