[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230327202413.1955856-6-longman@redhat.com>
Date: Mon, 27 Mar 2023 16:24:10 -0400
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v2 5/8] locking/rwsem: Split out rwsem_reader_wake()
From: Peter Zijlstra <peterz@...radead.org>
To provide symmetry with rwsem_writer_wake().
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/locking/rwsem.c | 83 +++++++++++++++++++++++-------------------
1 file changed, 46 insertions(+), 37 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 225e86326ea4..0bc262dc77fd 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -106,9 +106,9 @@
* atomic_long_cmpxchg() will be used to obtain writer lock.
*
* There are three places where the lock handoff bit may be set or cleared.
- * 1) rwsem_mark_wake() for readers -- set, clear
+ * 1) rwsem_reader_wake() for readers -- set, clear
* 2) rwsem_try_write_lock() for writers -- set, clear
- * 3) rwsem_del_waiter() -- clear
+ * 3) rwsem_del_waiter() -- clear
*
* For all the above cases, wait_lock will be held. A writer must also
* be the first one in the wait_list to be eligible for setting the handoff
@@ -377,7 +377,7 @@ rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
/*
* Remove a waiter from the wait_list and clear flags.
*
- * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full 'copy' of
+ * Both rwsem_reader_wake() and rwsem_try_write_lock() contain a full 'copy' of
* this function. Modify with care.
*
* Return: true if wait_list isn't empty and false otherwise
@@ -483,42 +483,15 @@ static void rwsem_writer_wake(struct rw_semaphore *sem,
rwsem_waiter_wake(waiter, wake_q);
}
-/*
- * handle the lock release when processes blocked on it that can now run
- * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
- * have been set.
- * - there must be someone on the queue
- * - the wait_lock must be held by the caller
- * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
- * to actually wakeup the blocked task(s) and drop the reference count,
- * preferably when the wait_lock is released
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only marked woken if downgrading is false
- *
- * Implies rwsem_del_waiter() for all woken readers.
- */
-static void rwsem_mark_wake(struct rw_semaphore *sem,
- enum rwsem_wake_type wake_type,
- struct wake_q_head *wake_q)
+static void rwsem_reader_wake(struct rw_semaphore *sem,
+ enum rwsem_wake_type wake_type,
+ struct rwsem_waiter *waiter,
+ struct wake_q_head *wake_q)
{
- struct rwsem_waiter *waiter, *tmp;
long count, woken = 0, adjustment = 0;
+ struct rwsem_waiter *tmp;
struct list_head wlist;
- lockdep_assert_held(&sem->wait_lock);
-
- /*
- * Take a peek at the queue head waiter such that we can determine
- * the wakeup(s) to perform.
- */
- waiter = rwsem_first_waiter(sem);
-
- if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
- if (wake_type == RWSEM_WAKE_ANY)
- rwsem_writer_wake(sem, waiter, wake_q);
- return;
- }
-
/*
* No reader wakeup if there are too many of them already.
*/
@@ -634,6 +607,42 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
rwsem_waiter_wake(waiter, wake_q);
}
+/*
+ * handle the lock release when processes blocked on it that can now run
+ * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
+ * have been set.
+ * - there must be someone on the queue
+ * - the wait_lock must be held by the caller
+ * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
+ * to actually wakeup the blocked task(s) and drop the reference count,
+ * preferably when the wait_lock is released
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only marked woken if downgrading is false
+ *
+ * Implies rwsem_del_waiter() for all woken waiters.
+ */
+static void rwsem_mark_wake(struct rw_semaphore *sem,
+ enum rwsem_wake_type wake_type,
+ struct wake_q_head *wake_q)
+{
+ struct rwsem_waiter *waiter;
+
+ lockdep_assert_held(&sem->wait_lock);
+
+ /*
+ * Take a peek at the queue head waiter such that we can determine
+ * the wakeup(s) to perform.
+ */
+ waiter = rwsem_first_waiter(sem);
+
+ if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
+ if (wake_type == RWSEM_WAKE_ANY)
+ rwsem_writer_wake(sem, waiter, wake_q);
+ } else {
+ rwsem_reader_wake(sem, wake_type, waiter, wake_q);
+ }
+}
+
/*
* Remove a waiter and try to wake up other waiters in the wait queue
* This function is called from the out_nolock path of both the reader and
@@ -1022,8 +1031,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock);
if (!list_empty(&sem->wait_list))
- rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
- &wake_q);
+ rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
+
raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q);
}
--
2.31.1
Powered by blists - more mailing lists