[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y/tJZXrDYpvdJKMh@hirez.programming.kicks-ass.net>
Date: Sun, 26 Feb 2023 12:58:29 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Waiman Long <longman@...hat.com>
Cc: mingo@...hat.com, will@...nel.org, linux-kernel@...r.kernel.org,
boqun.feng@...il.com
Subject: Re: [PATCH 3/6] locking/rwsem: Rework writer wakeup
On Thu, Feb 23, 2023 at 04:38:08PM -0500, Waiman Long wrote:
> > +static void rwsem_writer_wake(struct rw_semaphore *sem,
> > + struct rwsem_waiter *waiter,
> > + struct wake_q_head *wake_q)
> > +{
> > + struct rwsem_waiter *first = rwsem_first_waiter(sem);
> > + long count, new;
> > +
> > + lockdep_assert_held(&sem->wait_lock);
> > +
> > + count = atomic_long_read(&sem->count);
> > + do {
> > + bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
> > +
> > + if (has_handoff) {
> > + /*
> > + * Honor handoff bit and yield only when the first
> > + * waiter is the one that set it. Otherwisee, we
> > + * still try to acquire the rwsem.
> > + */
> > + if (first->handoff_set && (waiter != first))
> > + return;
> > + }
> This "if" statement if for a non-first waiter that somehow got woken up to
> have a chance to steal the lock. Now the handoff is done in the wake side
> for the first waiter, this "if" statement is not applicable and can be
> removed.
Yeah, that can be cleaned up, something like the below. But that doesn't
appear to be the cause of issues.
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -427,25 +427,12 @@ static void rwsem_writer_wake(struct rw_
struct rwsem_waiter *waiter,
struct wake_q_head *wake_q)
{
- struct rwsem_waiter *first = rwsem_first_waiter(sem);
long count, new;
lockdep_assert_held(&sem->wait_lock);
count = atomic_long_read(&sem->count);
do {
- bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
-
- if (has_handoff) {
- /*
- * Honor handoff bit and yield only when the first
- * waiter is the one that set it. Otherwisee, we
- * still try to acquire the rwsem.
- */
- if (first->handoff_set && (waiter != first))
- return;
- }
-
new = count;
if (count & RWSEM_LOCK_MASK) {
@@ -454,8 +441,9 @@ static void rwsem_writer_wake(struct rw_
* if it is an RT task or wait in the wait queue
* for too long.
*/
- if (has_handoff || (!rt_task(waiter->task) &&
- !time_after(jiffies, waiter->timeout)))
+ if ((count & RWSEM_FLAG_HANDOFF) ||
+ (!rt_task(waiter->task) &&
+ !time_after(jiffies, waiter->timeout)))
return;
new |= RWSEM_FLAG_HANDOFF;
@@ -474,7 +462,7 @@ static void rwsem_writer_wake(struct rw_
* set here to enable optimistic spinning in slowpath loop.
*/
if (new & RWSEM_FLAG_HANDOFF) {
- first->handoff_set = true;
+ waiter->handoff_set = true;
lockevent_inc(rwsem_wlock_handoff);
return;
}
Powered by blists - more mailing lists