[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <98456253-47e9-a9a3-0afd-22a6c2286704@oracle.com>
Date: Wed, 27 Apr 2022 18:20:00 -0500
From: John Donnelly <John.p.donnelly@...cle.com>
To: Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Cc: peterz@...radead.org, mingo@...hat.com, will@...nel.org,
boqun.feng@...il.com, Waiman Long <longman@...hat.com>
Subject: Re: [PATCH 5.15 1/1] Revert "locking/rwsem: Make handoff bit handling
more consistent"
On 4/27/22 5:56 AM, john.p.donnelly@...cle.com wrote:
--
Hi
Since Waiman followed up with :
[PATCH] locking/rwsem: Allow slowpath writer to ignore handoff bit if
not set by first waiter
today, I am not sure if this is needed if that patch is applied to 5.15.y .
Please advise.
> This reverts commit 76723ed1fb8922ee94089e7432b8a262e3a06ed7.
>
> This commit has introduced frequent system hangs running "fio test
> suite" on a variety of filesystem types on LVM volumes composed of four
> nvme devices.
>
> This issue was first encountered in Linux 5.15.30, and later in Linux
> 5.17.y.
>
> Each panic includes the rwsem_down_write_slowpath() :
>
> ext4:
>
> PID: 3682 TASK: ffff8f489ae34bc0 CPU: 2 COMMAND: "dio/dm-0"
> #0 [fffffe0000083e50] crash_nmi_callback at ffffffff828772b3
> #1 [fffffe0000083e58] nmi_handle at ffffffff82840778
> #2 [fffffe0000083ea0] default_do_nmi at ffffffff8337a1e2
> #3 [fffffe0000083ec8] exc_nmi at ffffffff8337a48d
> #4 [fffffe0000083ef0] end_repeat_nmi at ffffffff8340153b
> [exception RIP: _raw_spin_lock_irq+23]
> RIP: ffffffff8338b2e7 RSP: ffff9c4409b47c78 RFLAGS: 00000046
> RAX: 0000000000000000 RBX: ffff8f489ae34bc0 RCX: 0000000000000000
> RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff8f47f7b90104
> RBP: ffff9c4409b47d20 R8: 0000000000000000 R9: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000000 R12: ffff8f47f7b90104
> R13: ffff9c4409b47cb0 R14: ffff8f47f7b900f0 R15: 0000000000000000
> ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
> #5 [ffff9c4409b47c78] _raw_spin_lock_irq at ffffffff8338b2e7
> #6 [ffff9c4409b47c78] rwsem_down_write_slowpath at ffffffff82925be9
> #7 [ffff9c4409b47d28] ext4_map_blocks at ffffffffc11c26dc [ext4]
> #8 [ffff9c4409b47d98] ext4_convert_unwritten_extents at
> ffffffffc11ad9e0 [ext4]
> #9 [ffff9c4409b47df0] ext4_dio_write_end_io at ffffffffc11b22aa [ext4]
>
> xfs:
>
> PID: 3719 TASK: ffff9f81d2d74bc0 CPU: 37 COMMAND: "dio/dm-0"
> #0 [fffffe0000894e50] crash_nmi_callback at ffffffffad6772b3
> #1 [fffffe0000894e58] nmi_handle at ffffffffad640778
> #2 [fffffe0000894ea0] default_do_nmi at ffffffffae17a1e2
> #3 [fffffe0000894ec8] exc_nmi at ffffffffae17a48d
> #4 [fffffe0000894ef0] end_repeat_nmi at ffffffffae20153b
> [exception RIP: _raw_spin_lock_irq+23]
> RIP: ffffffffae18b2e7 RSP: ffffbb7ec9637c48 RFLAGS: 00000046
> RAX: 0000000000000000 RBX: ffff9f81d2d74bc0 RCX: 0000000000000000
> RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff9f81c04a918c
> RBP: ffffbb7ec9637ce8 R8: 0000000000000000 R9: 0000000000000000
> R10: 0000000000000000 R11: 0000000000000000 R12: ffff9f81c04a918c
> R13: ffffbb7ec9637c80 R14: ffff9f81c04a9178 R15: 0000000000000000
> ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
> <NMI exception stack> ---
> #5 [ffffbb7ec9637c48] _raw_spin_lock_irq at ffffffffae18b2e7
> #6 [ffffbb7ec9637c48] rwsem_down_write_slowpath at ffffffffad725be9
> #7 [ffffbb7ec9637cf0] xfs_trans_alloc_inode at ffffffffc074f2bd [xfs]
> #8 [ffffbb7ec9637d50] xfs_iomap_write_unwritten at ffffffffc073ad15
>
> Reported-by: Jorge Lopez <jorge.jo.lopez@...cle.com>
> Tested-by: Jorge Lopez <jorge.jo.lopez@...cle.com>
> Signed-off-by: John Donnelly <john.p.donnelly@...cle.com>
> Reviewed-by: Jack Vogel <jack.vogel@...cle.com>
> ---
> kernel/locking/rwsem.c | 171 +++++++++++++++++++++--------------------
> 1 file changed, 86 insertions(+), 85 deletions(-)
>
> diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
> index e63f740c2cc8..29eea50a3e67 100644
> --- a/kernel/locking/rwsem.c
> +++ b/kernel/locking/rwsem.c
> @@ -106,9 +106,9 @@
> * atomic_long_cmpxchg() will be used to obtain writer lock.
> *
> * There are three places where the lock handoff bit may be set or
> cleared.
> - * 1) rwsem_mark_wake() for readers -- set, clear
> - * 2) rwsem_try_write_lock() for writers -- set, clear
> - * 3) rwsem_del_waiter() -- clear
> + * 1) rwsem_mark_wake() for readers.
> + * 2) rwsem_try_write_lock() for writers.
> + * 3) Error path of rwsem_down_write_slowpath().
> *
> * For all the above cases, wait_lock will be held. A writer must also
> * be the first one in the wait_list to be eligible for setting the
> handoff
> @@ -335,9 +335,6 @@ struct rwsem_waiter {
> struct task_struct *task;
> enum rwsem_waiter_type type;
> unsigned long timeout;
> -
> - /* Writer only, not initialized in reader */
> - bool handoff_set;
> };
> #define rwsem_first_waiter(sem) \
> list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
> @@ -348,6 +345,12 @@ enum rwsem_wake_type {
> RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
> };
> +enum writer_wait_state {
> + WRITER_NOT_FIRST, /* Writer is not first in wait list */
> + WRITER_FIRST, /* Writer is first in wait list */
> + WRITER_HANDOFF /* Writer is first & handoff needed */
> +};
> +
> /*
> * The typical HZ value is either 250 or 1000. So set the minimum waiting
> * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
> @@ -363,31 +366,6 @@ enum rwsem_wake_type {
> */
> #define MAX_READERS_WAKEUP 0x100
> -static inline void
> -rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
> -{
> - lockdep_assert_held(&sem->wait_lock);
> - list_add_tail(&waiter->list, &sem->wait_list);
> - /* caller will set RWSEM_FLAG_WAITERS */
> -}
> -
> -/*
> - * Remove a waiter from the wait_list and clear flags.
> - *
> - * Both rwsem_mark_wake() and rwsem_try_write_lock() contain a full
> 'copy' of
> - * this function. Modify with care.
> - */
> -static inline void
> -rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter)
> -{
> - lockdep_assert_held(&sem->wait_lock);
> - list_del(&waiter->list);
> - if (likely(!list_empty(&sem->wait_list)))
> - return;
> -
> - atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS,
> &sem->count);
> -}
> -
> /*
> * handle the lock release when processes blocked on it that can now run
> * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
> @@ -399,8 +377,6 @@ rwsem_del_waiter(struct rw_semaphore *sem, struct
> rwsem_waiter *waiter)
> * preferably when the wait_lock is released
> * - woken process blocks are discarded from the list after having
> task zeroed
> * - writers are only marked woken if downgrading is false
> - *
> - * Implies rwsem_del_waiter() for all woken readers.
> */
> static void rwsem_mark_wake(struct rw_semaphore *sem,
> enum rwsem_wake_type wake_type,
> @@ -515,25 +491,18 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
> adjustment = woken * RWSEM_READER_BIAS - adjustment;
> lockevent_cond_inc(rwsem_wake_reader, woken);
> -
> - oldcount = atomic_long_read(&sem->count);
> if (list_empty(&sem->wait_list)) {
> - /*
> - * Combined with list_move_tail() above, this implies
> - * rwsem_del_waiter().
> - */
> + /* hit end of list above */
> adjustment -= RWSEM_FLAG_WAITERS;
> - if (oldcount & RWSEM_FLAG_HANDOFF)
> - adjustment -= RWSEM_FLAG_HANDOFF;
> - } else if (woken) {
> - /*
> - * When we've woken a reader, we no longer need to force
> - * writers to give up the lock and we can clear HANDOFF.
> - */
> - if (oldcount & RWSEM_FLAG_HANDOFF)
> - adjustment -= RWSEM_FLAG_HANDOFF;
> }
> + /*
> + * When we've woken a reader, we no longer need to force writers
> + * to give up the lock and we can clear HANDOFF.
> + */
> + if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
> + adjustment -= RWSEM_FLAG_HANDOFF;
> +
> if (adjustment)
> atomic_long_add(adjustment, &sem->count);
> @@ -564,12 +533,12 @@ static void rwsem_mark_wake(struct rw_semaphore
> *sem,
> * race conditions between checking the rwsem wait list and setting the
> * sem->count accordingly.
> *
> - * Implies rwsem_del_waiter() on success.
> + * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
> + * bit is set or the lock is acquired with handoff bit cleared.
> */
> static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
> - struct rwsem_waiter *waiter)
> + enum writer_wait_state wstate)
> {
> - bool first = rwsem_first_waiter(sem) == waiter;
> long count, new;
> lockdep_assert_held(&sem->wait_lock);
> @@ -578,19 +547,13 @@ static inline bool rwsem_try_write_lock(struct
> rw_semaphore *sem,
> do {
> bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
> - if (has_handoff) {
> - if (!first)
> - return false;
> -
> - /* First waiter inherits a previously set handoff bit */
> - waiter->handoff_set = true;
> - }
> + if (has_handoff && wstate == WRITER_NOT_FIRST)
> + return false;
> new = count;
> if (count & RWSEM_LOCK_MASK) {
> - if (has_handoff || (!rt_task(waiter->task) &&
> - !time_after(jiffies, waiter->timeout)))
> + if (has_handoff || (wstate != WRITER_HANDOFF))
> return false;
> new |= RWSEM_FLAG_HANDOFF;
> @@ -607,17 +570,9 @@ static inline bool rwsem_try_write_lock(struct
> rw_semaphore *sem,
> * We have either acquired the lock with handoff bit cleared or
> * set the handoff bit.
> */
> - if (new & RWSEM_FLAG_HANDOFF) {
> - waiter->handoff_set = true;
> - lockevent_inc(rwsem_wlock_handoff);
> + if (new & RWSEM_FLAG_HANDOFF)
> return false;
> - }
> - /*
> - * Have rwsem_try_write_lock() fully imply rwsem_del_waiter() on
> - * success.
> - */
> - list_del(&waiter->list);
> rwsem_set_owner(sem);
> return true;
> }
> @@ -998,7 +953,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem,
> long count, unsigned int stat
> }
> adjustment += RWSEM_FLAG_WAITERS;
> }
> - rwsem_add_waiter(sem, &waiter);
> + list_add_tail(&waiter.list, &sem->wait_list);
> /* we're now waiting on the lock, but no longer actively locking */
> count = atomic_long_add_return(adjustment, &sem->count);
> @@ -1044,7 +999,11 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem,
> long count, unsigned int stat
> return sem;
> out_nolock:
> - rwsem_del_waiter(sem, &waiter);
> + list_del(&waiter.list);
> + if (list_empty(&sem->wait_list)) {
> + atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
> + &sem->count);
> + }
> raw_spin_unlock_irq(&sem->wait_lock);
> __set_current_state(TASK_RUNNING);
> lockevent_inc(rwsem_rlock_fail);
> @@ -1058,7 +1017,9 @@ static struct rw_semaphore *
> rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
> {
> long count;
> + enum writer_wait_state wstate;
> struct rwsem_waiter waiter;
> + struct rw_semaphore *ret = sem;
> DEFINE_WAKE_Q(wake_q);
> /* do optimistic spinning and steal lock if possible */
> @@ -1074,13 +1035,16 @@ rwsem_down_write_slowpath(struct rw_semaphore
> *sem, int state)
> waiter.task = current;
> waiter.type = RWSEM_WAITING_FOR_WRITE;
> waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
> - waiter.handoff_set = false;
> raw_spin_lock_irq(&sem->wait_lock);
> - rwsem_add_waiter(sem, &waiter);
> +
> + /* account for this before adding a new element to the list */
> + wstate = list_empty(&sem->wait_list) ? WRITER_FIRST :
> WRITER_NOT_FIRST;
> +
> + list_add_tail(&waiter.list, &sem->wait_list);
> /* we're now waiting on the lock */
> - if (rwsem_first_waiter(sem) != &waiter) {
> + if (wstate == WRITER_NOT_FIRST) {
> count = atomic_long_read(&sem->count);
> /*
> @@ -1116,16 +1080,13 @@ rwsem_down_write_slowpath(struct rw_semaphore
> *sem, int state)
> /* wait until we successfully acquire the lock */
> set_current_state(state);
> for (;;) {
> - if (rwsem_try_write_lock(sem, &waiter)) {
> + if (rwsem_try_write_lock(sem, wstate)) {
> /* rwsem_try_write_lock() implies ACQUIRE on success */
> break;
> }
> raw_spin_unlock_irq(&sem->wait_lock);
> - if (signal_pending_state(state, current))
> - goto out_nolock;
> -
> /*
> * After setting the handoff bit and failing to acquire
> * the lock, attempt to spin on owner to accelerate lock
> @@ -1134,7 +1095,7 @@ rwsem_down_write_slowpath(struct rw_semaphore
> *sem, int state)
> * In this case, we attempt to acquire the lock again
> * without sleeping.
> */
> - if (waiter.handoff_set) {
> + if (wstate == WRITER_HANDOFF) {
> enum owner_state owner_state;
> preempt_disable();
> @@ -1145,26 +1106,66 @@ rwsem_down_write_slowpath(struct rw_semaphore
> *sem, int state)
> goto trylock_again;
> }
> - schedule();
> - lockevent_inc(rwsem_sleep_writer);
> - set_current_state(state);
> + /* Block until there are no active lockers. */
> + for (;;) {
> + if (signal_pending_state(state, current))
> + goto out_nolock;
> +
> + schedule();
> + lockevent_inc(rwsem_sleep_writer);
> + set_current_state(state);
> + /*
> + * If HANDOFF bit is set, unconditionally do
> + * a trylock.
> + */
> + if (wstate == WRITER_HANDOFF)
> + break;
> +
> + if ((wstate == WRITER_NOT_FIRST) &&
> + (rwsem_first_waiter(sem) == &waiter))
> + wstate = WRITER_FIRST;
> +
> + count = atomic_long_read(&sem->count);
> + if (!(count & RWSEM_LOCK_MASK))
> + break;
> +
> + /*
> + * The setting of the handoff bit is deferred
> + * until rwsem_try_write_lock() is called.
> + */
> + if ((wstate == WRITER_FIRST) && (rt_task(current) ||
> + time_after(jiffies, waiter.timeout))) {
> + wstate = WRITER_HANDOFF;
> + lockevent_inc(rwsem_wlock_handoff);
> + break;
> + }
> + }
> trylock_again:
> raw_spin_lock_irq(&sem->wait_lock);
> }
> __set_current_state(TASK_RUNNING);
> + list_del(&waiter.list);
> raw_spin_unlock_irq(&sem->wait_lock);
> lockevent_inc(rwsem_wlock);
> - return sem;
> +
> + return ret;
> out_nolock:
> __set_current_state(TASK_RUNNING);
> raw_spin_lock_irq(&sem->wait_lock);
> - rwsem_del_waiter(sem, &waiter);
> - if (!list_empty(&sem->wait_list))
> + list_del(&waiter.list);
> +
> + if (unlikely(wstate == WRITER_HANDOFF))
> + atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
> +
> + if (list_empty(&sem->wait_list))
> + atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
> + else
> rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
> raw_spin_unlock_irq(&sem->wait_lock);
> wake_up_q(&wake_q);
> lockevent_inc(rwsem_wlock_fail);
> +
> return ERR_PTR(-EINTR);
> }
> -- 2.31.1
>
Powered by blists - more mailing lists