[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y/tJjJMurN1uaC9V@hirez.programming.kicks-ass.net>
Date: Sun, 26 Feb 2023 12:59:08 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: longman@...hat.com, mingo@...hat.com, will@...nel.org
Cc: linux-kernel@...r.kernel.org, boqun.feng@...il.com
Subject: Re: [PATCH 3/6] locking/rwsem: Rework writer wakeup
On Thu, Feb 23, 2023 at 01:26:45PM +0100, Peter Zijlstra wrote:
> +/*
> + * This function must be called with the sem->wait_lock held to prevent
> + * race conditions between checking the rwsem wait list and setting the
> + * sem->count accordingly.
> + *
> + * Implies rwsem_del_waiter() on success.
> + */
> +static void rwsem_writer_wake(struct rw_semaphore *sem,
> + struct rwsem_waiter *waiter,
> + struct wake_q_head *wake_q)
> +{
> + struct rwsem_waiter *first = rwsem_first_waiter(sem);
> + long count, new;
> +
> + lockdep_assert_held(&sem->wait_lock);
> +
> + count = atomic_long_read(&sem->count);
> + do {
> + bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
> +
> + if (has_handoff) {
> + /*
> + * Honor handoff bit and yield only when the first
> + * waiter is the one that set it. Otherwisee, we
> + * still try to acquire the rwsem.
> + */
> + if (first->handoff_set && (waiter != first))
> + return;
> + }
> +
> + new = count;
> +
> + if (count & RWSEM_LOCK_MASK) {
> + /*
> + * A waiter (first or not) can set the handoff bit
> + * if it is an RT task or wait in the wait queue
> + * for too long.
> + */
> + if (has_handoff || (!rt_task(waiter->task) &&
> + !time_after(jiffies, waiter->timeout)))
> + return;
> +
> + new |= RWSEM_FLAG_HANDOFF;
> + } else {
> + new |= RWSEM_WRITER_LOCKED;
> + new &= ~RWSEM_FLAG_HANDOFF;
> +
> + if (list_is_singular(&sem->wait_list))
> + new &= ~RWSEM_FLAG_WAITERS;
> + }
> + } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
> +
> + /*
> + * We have either acquired the lock with handoff bit cleared or set
> + * the handoff bit. Only the first waiter can have its handoff_set
> + * set here to enable optimistic spinning in slowpath loop.
> + */
> + if (new & RWSEM_FLAG_HANDOFF) {
> + first->handoff_set = true;
> + lockevent_inc(rwsem_wlock_handoff);
> + return;
> + }
> +
> + /*
> + * Have rwsem_writer_wake() fully imply rwsem_del_waiter() on
> + * success.
> + */
> + list_del(&waiter->list);
> + rwsem_set_owner(sem);
At the very least this needs to be:
atomic_long_set(&sem->owner, (long)waiter->task);
> + rwsem_waiter_wake(waiter, wake_q);
> +}
Powered by blists - more mailing lists