[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <160753912803.3364.4437531645113964830.tip-bot2@tip-bot2>
Date: Wed, 09 Dec 2020 18:38:48 -0000
From: "tip-bot2 for Waiman Long" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Waiman Long <longman@...hat.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Davidlohr Bueso <dbueso@...e.de>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: locking/core] locking/rwsem: Pass the current atomic count to
rwsem_down_read_slowpath()
The following commit has been merged into the locking/core branch of tip:
Commit-ID: c8fe8b0564388f41147326f31e4587171aacccd4
Gitweb: https://git.kernel.org/tip/c8fe8b0564388f41147326f31e4587171aacccd4
Author: Waiman Long <longman@...hat.com>
AuthorDate: Fri, 20 Nov 2020 23:14:12 -05:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 09 Dec 2020 17:08:47 +01:00
locking/rwsem: Pass the current atomic count to rwsem_down_read_slowpath()
The atomic count value right after reader count increment can be useful
to determine the rwsem state at trylock time. So the count value is
passed down to rwsem_down_read_slowpath() to be used when appropriate.
Signed-off-by: Waiman Long <longman@...hat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Davidlohr Bueso <dbueso@...e.de>
Link: https://lkml.kernel.org/r/20201121041416.12285-2-longman@redhat.com
---
kernel/locking/rwsem.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 67ae366..5768b90 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -270,14 +270,14 @@ static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
owner | RWSEM_NONSPINNABLE));
}
-static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
+static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
{
- long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
+ *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
- if (WARN_ON_ONCE(cnt < 0))
+ if (WARN_ON_ONCE(*cntp < 0))
rwsem_set_nonspinnable(sem);
- if (!(cnt & RWSEM_READ_FAILED_MASK)) {
+ if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
rwsem_set_reader_owned(sem);
return true;
}
@@ -1008,9 +1008,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
* Wait for the read lock to be granted
*/
static struct rw_semaphore __sched *
-rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
+rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
{
- long count, adjustment = -RWSEM_READER_BIAS;
+ long adjustment = -RWSEM_READER_BIAS;
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
bool wake = false;
@@ -1356,8 +1356,10 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
*/
static inline int __down_read_common(struct rw_semaphore *sem, int state)
{
- if (!rwsem_read_trylock(sem)) {
- if (IS_ERR(rwsem_down_read_slowpath(sem, state)))
+ long count;
+
+ if (!rwsem_read_trylock(sem, &count)) {
+ if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
}
Powered by blists - more mailing lists