[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y+otv+QGyMpHAFO1@hirez.programming.kicks-ass.net>
Date: Mon, 13 Feb 2023 13:31:59 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Waiman Long <longman@...hat.com>
Cc: Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
linux-kernel@...r.kernel.org, john.p.donnelly@...cle.com,
Hillf Danton <hdanton@...a.com>,
Mukesh Ojha <quic_mojha@...cinc.com>,
Ting11 Wang 王婷 <wangting11@...omi.com>
Subject: Re: [PATCH v7 4/4] locking/rwsem: Enable direct rwsem lock handoff
On Wed, Jan 25, 2023 at 07:36:28PM -0500, Waiman Long wrote:
> @@ -609,6 +618,12 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
>
> lockdep_assert_held(&sem->wait_lock);
>
> + if (!waiter->task) {
> + /* Write lock handed off */
> + smp_acquire__after_ctrl_dep();
> + return true;
> + }
> +
> count = atomic_long_read(&sem->count);
> do {
> bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
> @@ -754,6 +769,10 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
>
> owner = rwsem_owner_flags(sem, &flags);
> state = rwsem_owner_state(owner, flags);
> +
> + if (owner == current)
> + return OWNER_NONSPINNABLE; /* Handoff granted */
> +
> if (state != OWNER_WRITER)
> return state;
>
> @@ -1168,21 +1186,23 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
> * without sleeping.
> */
> if (waiter.handoff_set) {
> - enum owner_state owner_state;
> -
> - owner_state = rwsem_spin_on_owner(sem);
> - if (owner_state == OWNER_NULL)
> - goto trylock_again;
> + rwsem_spin_on_owner(sem);
> + if (!READ_ONCE(waiter.task)) {
> + /* Write lock handed off */
> + smp_acquire__after_ctrl_dep();
> + set_current_state(TASK_RUNNING);
> + goto out;
> + }
> }
>
> schedule_preempt_disabled();
> lockevent_inc(rwsem_sleep_writer);
> set_current_state(state);
> -trylock_again:
> raw_spin_lock_irq(&sem->wait_lock);
> }
> __set_current_state(TASK_RUNNING);
> raw_spin_unlock_irq(&sem->wait_lock);
> +out:
> lockevent_inc(rwsem_wlock);
> trace_contention_end(sem, 0);
> return sem;
> @@ -1190,6 +1210,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
> out_nolock:
> __set_current_state(TASK_RUNNING);
> raw_spin_lock_irq(&sem->wait_lock);
> + if (!waiter.task) {
> + smp_acquire__after_ctrl_dep();
> + raw_spin_unlock_irq(&sem->wait_lock);
> + goto out;
> + }
> rwsem_del_wake_waiter(sem, &waiter, &wake_q);
> lockevent_inc(rwsem_wlock_fail);
> trace_contention_end(sem, -EINTR);
> @@ -1202,14 +1227,41 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
> */
> static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
> {
> - unsigned long flags;
> DEFINE_WAKE_Q(wake_q);
> + unsigned long flags;
> + unsigned long count;
>
> raw_spin_lock_irqsave(&sem->wait_lock, flags);
>
> - if (!list_empty(&sem->wait_list))
> - rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
> + if (list_empty(&sem->wait_list))
> + goto unlock_out;
> +
> + /*
> + * If the rwsem is free and handoff flag is set with wait_lock held,
> + * no other CPUs can take an active lock.
> + */
> + count = atomic_long_read(&sem->count);
> + if (!(count & RWSEM_LOCK_MASK) && (count & RWSEM_FLAG_HANDOFF)) {
> + /*
> + * Since rwsem_mark_wake() will handle the handoff to reader
> + * properly, we don't need to do anything extra for reader.
> + * Special handoff processing will only be needed for writer.
> + */
> + struct rwsem_waiter *waiter = rwsem_first_waiter(sem);
> + long adj = RWSEM_WRITER_LOCKED - RWSEM_FLAG_HANDOFF;
> +
> + if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
> + atomic_long_set(&sem->owner, (long)waiter->task);
> + atomic_long_add(adj, &sem->count);
> + wake_q_add(&wake_q, waiter->task);
> + rwsem_del_waiter(sem, waiter);
> + waiter->task = NULL; /* Signal the handoff */
> + goto unlock_out;
> + }
> + }
> + rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
>
> +unlock_out:
> raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
> wake_up_q(&wake_q);
>
I am once again confused...
*WHY* are you changing the writer wake-up path? The comments added here
don't clarify anything.
If we set handoff, we terminate/disallow the spinning/stealing. The
direct consequence is that the slowpath/wait-list becomes the only way
forward.
Since we don't take wait_lock on up, we fundamentally have a race
condition. But *WHY* do you insist on handling that in rwsem_wake()?
Delaying all that until rwsem_try_write_lock()? Doing so would render
pretty much all of the above pointless, no?
After all, rwsem_mark_wake() already wakes the writer if it is first,
no? Why invent yet another special way to wake up the writer.
Also; and I asked this last time around; why do we care about the
handoff to writer *at*all* ? It is the readers that set HANDOFF.
Powered by blists - more mailing lists