[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190418234628.3675-16-longman@redhat.com>
Date: Thu, 18 Apr 2019 19:46:25 -0400
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
Davidlohr Bueso <dave@...olabs.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
huang ying <huang.ying.caritas@...il.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH v5 15/18] locking/rwsem: Add more rwsem owner access helpers
Before combining owner and count, we are adding two new helpers for
accessing the owner value in the rwsem.
1) struct task_struct *rwsem_get_owner(struct rw_semaphore *sem)
2) bool is_rwsem_reader_owned(struct rw_semaphore *sem)
Signed-off-by: Waiman Long <longman@...hat.com>
---
kernel/locking/rwsem.c | 41 ++++++++++++++++++++++++++++++-----------
1 file changed, 30 insertions(+), 11 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 80f6e75c92ad..327bf8295d5d 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -157,6 +157,11 @@ static inline void rwsem_clear_owner(struct rw_semaphore *sem)
WRITE_ONCE(sem->owner, NULL);
}
+static inline struct task_struct *rwsem_get_owner(struct rw_semaphore *sem)
+{
+ return READ_ONCE(sem->owner);
+}
+
/*
* The task_struct pointer of the last owning reader will be left in
* the owner field.
@@ -198,6 +203,23 @@ static inline bool is_rwsem_spinnable(struct rw_semaphore *sem, bool wr)
return is_rwsem_owner_spinnable(READ_ONCE(sem->owner), wr);
}
+/*
+ * Return true if the rwsem is owned by a reader.
+ */
+static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
+{
+#ifdef CONFIG_DEBUG_RWSEMS
+ /*
+ * Check the count to see if it is write-locked.
+ */
+ long count = atomic_long_read(&sem->count);
+
+ if (count & RWSEM_WRITER_MASK)
+ return false;
+#endif
+ return (unsigned long)sem->owner & RWSEM_READER_OWNED;
+}
+
#ifdef CONFIG_DEBUG_RWSEMS
/*
* With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
@@ -210,7 +232,7 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
unsigned long owner = (unsigned long)READ_ONCE(sem->owner);
if ((owner & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current)
- cmpxchg_relaxed((unsigned long *)&sem->owner, val,
+ cmpxchg_relaxed((unsigned long *)&sem->owner, owner,
owner & RWSEM_OWNER_FLAGS_MASK);
}
#else
@@ -565,7 +587,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem, bool wr)
preempt_disable();
rcu_read_lock();
- owner = READ_ONCE(sem->owner);
+ owner = rwsem_get_owner(sem);
if (owner) {
ret = is_rwsem_owner_spinnable(owner, wr) &&
(((unsigned long)owner & RWSEM_READER_OWNED) ||
@@ -614,7 +636,7 @@ static inline enum owner_state rwsem_owner_state(unsigned long owner, bool wr)
static noinline enum owner_state
rwsem_spin_on_owner(struct rw_semaphore *sem, bool wr)
{
- struct task_struct *tmp, *owner = READ_ONCE(sem->owner);
+ struct task_struct *tmp, *owner = rwsem_get_owner(sem);
enum owner_state state = rwsem_owner_state((unsigned long)owner, wr);
if (state != OWNER_WRITER)
@@ -627,7 +649,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, bool wr)
break;
}
- tmp = READ_ONCE(sem->owner);
+ tmp = rwsem_get_owner(sem);
if (tmp != owner) {
state = rwsem_owner_state((unsigned long)tmp, wr);
break;
@@ -1170,8 +1192,7 @@ inline void __down_read(struct rw_semaphore *sem)
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) {
rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
- RWSEM_READER_OWNED), sem);
+ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
@@ -1183,8 +1204,7 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
&sem->count) & RWSEM_READ_FAILED_MASK)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
return -EINTR;
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
- RWSEM_READER_OWNED), sem);
+ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
@@ -1255,7 +1275,7 @@ inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), sem);
+ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
@@ -1469,8 +1489,7 @@ EXPORT_SYMBOL(down_write_killable_nested);
void up_read_non_owner(struct rw_semaphore *sem)
{
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
- sem);
+ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
__up_read(sem);
}
EXPORT_SYMBOL(up_read_non_owner);
--
2.18.1
Powered by blists - more mailing lists