[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210316185547.4mu6zj2bwjjs2c62@offworld>
Date: Tue, 16 Mar 2021 11:55:47 -0700
From: Davidlohr Bueso <dave@...olabs.net>
To: Waiman Long <longman@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
linux-kernel@...r.kernel.org, Juri Lelli <juri.lelli@...hat.com>
Subject: Re: [PATCH 1/4] locking/ww_mutex: Simplify use_ww_ctx & ww_ctx
handling
On Tue, 16 Mar 2021, Waiman Long wrote:
>The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the
>function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx)
>combination is repetitive.
I always found that very fugly.
>
>In fact, ww_ctx should not be used at all if !use_ww_ctx. Simplify
>ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an
>clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx &&
>ww_ctx) by just (ww_ctx).
>
>Signed-off-by: Waiman Long <longman@...hat.com>
Acked-by: Davidlohr Bueso <dbueso@...e.de>
>---
> kernel/locking/mutex.c | 25 ++++++++++++++-----------
> 1 file changed, 14 insertions(+), 11 deletions(-)
>
>diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
>index adb935090768..622ebdfcd083 100644
>--- a/kernel/locking/mutex.c
>+++ b/kernel/locking/mutex.c
>@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
> */
> static __always_inline bool
> mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
>- const bool use_ww_ctx, struct mutex_waiter *waiter)
>+ struct mutex_waiter *waiter)
> {
> if (!waiter) {
> /*
>@@ -702,7 +702,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
> #else
> static __always_inline bool
> mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
>- const bool use_ww_ctx, struct mutex_waiter *waiter)
>+ struct mutex_waiter *waiter)
> {
> return false;
> }
>@@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> struct ww_mutex *ww;
> int ret;
>
>+ if (!use_ww_ctx)
>+ ww_ctx = NULL;
>+
> might_sleep();
>
> #ifdef CONFIG_DEBUG_MUTEXES
>@@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> #endif
>
> ww = container_of(lock, struct ww_mutex, base);
>- if (use_ww_ctx && ww_ctx) {
>+ if (ww_ctx) {
> if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
> return -EALREADY;
>
>@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
>
> if (__mutex_trylock(lock) ||
>- mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
>+ mutex_optimistic_spin(lock, ww_ctx, NULL)) {
> /* got the lock, yay! */
> lock_acquired(&lock->dep_map, ip);
>- if (use_ww_ctx && ww_ctx)
>+ if (ww_ctx)
> ww_mutex_set_context_fastpath(ww, ww_ctx);
> preempt_enable();
> return 0;
>@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> * After waiting to acquire the wait_lock, try again.
> */
> if (__mutex_trylock(lock)) {
>- if (use_ww_ctx && ww_ctx)
>+ if (ww_ctx)
> __ww_mutex_check_waiters(lock, ww_ctx);
>
> goto skip_wait;
>@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> goto err;
> }
>
>- if (use_ww_ctx && ww_ctx) {
>+ if (ww_ctx) {
> ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
> if (ret)
> goto err;
>@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> * ww_mutex needs to always recheck its position since its waiter
> * list is not FIFO ordered.
> */
>- if ((use_ww_ctx && ww_ctx) || !first) {
>+ if (ww_ctx || !first) {
> first = __mutex_waiter_is_first(lock, &waiter);
> if (first)
> __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
>@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> * or we must see its unlock and acquire.
> */
> if (__mutex_trylock(lock) ||
>- (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
>+ (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
> break;
>
> spin_lock(&lock->wait_lock);
>@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> acquired:
> __set_current_state(TASK_RUNNING);
>
>- if (use_ww_ctx && ww_ctx) {
>+ if (ww_ctx) {
> /*
> * Wound-Wait; we stole the lock (!first_waiter), check the
> * waiters as anyone might want to wound us.
>@@ -1068,7 +1071,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
> /* got the lock - cleanup and rejoice! */
> lock_acquired(&lock->dep_map, ip);
>
>- if (use_ww_ctx && ww_ctx)
>+ if (ww_ctx)
> ww_mutex_lock_acquired(ww, ww_ctx);
>
> spin_unlock(&lock->wait_lock);
>--
>2.18.1
>
Powered by blists - more mailing lists