[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAM9d7ciQQEypvv2a2zQLHNc7p3NNxF59kASxHoFMCqiQicKwBA@mail.gmail.com>
Date: Mon, 28 Mar 2022 10:48:59 -0700
From: Namhyung Kim <namhyung@...nel.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...nel.org>, Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
LKML <linux-kernel@...r.kernel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Steven Rostedt <rostedt@...dmis.org>,
Byungchul Park <byungchul.park@....com>,
"Paul E. McKenney" <paulmck@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Arnd Bergmann <arnd@...db.de>,
Radoslaw Burny <rburny@...gle.com>,
linux-arch <linux-arch@...r.kernel.org>,
bpf <bpf@...r.kernel.org>, Hyeonggon Yoo <42.hyeyoo@...il.com>
Subject: Re: [PATCH 2/2] locking: Apply contention tracepoints in the slow path
On Mon, Mar 28, 2022 at 4:39 AM Peter Zijlstra <peterz@...radead.org> wrote:
>
> On Tue, Mar 22, 2022 at 11:57:09AM -0700, Namhyung Kim wrote:
> > diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> > index ee2fd7614a93..c88deda77cf2 100644
> > --- a/kernel/locking/mutex.c
> > +++ b/kernel/locking/mutex.c
> > @@ -644,6 +644,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
> > }
> >
> > set_current_state(state);
> > + trace_contention_begin(lock, 0);
> > for (;;) {
> > bool first;
> >
> > @@ -710,6 +711,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
> > skip_wait:
> > /* got the lock - cleanup and rejoice! */
> > lock_acquired(&lock->dep_map, ip);
> > + trace_contention_end(lock, 0);
> >
> > if (ww_ctx)
> > ww_mutex_lock_acquired(ww, ww_ctx);
>
> (note: it's possible to get to this trace_contention_end() without ever
> having passed a _begin -- fixed in the below)
>
> > @@ -721,6 +723,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
> > err:
> > __set_current_state(TASK_RUNNING);
> > __mutex_remove_waiter(lock, &waiter);
> > + trace_contention_end(lock, ret);
> > err_early_kill:
> > raw_spin_unlock(&lock->wait_lock);
> > debug_mutex_free_waiter(&waiter);
>
>
> So there was one thing here, that might or might not be important, but
> is somewhat inconsistent with the whole thing. That is, do you want to
> include optimistic spinning in the contention time or not?
Yes, this was in a grey area and would create begin -> begin -> end
path for mutexes. But I think tools can handle it with the flags.
>
> Because currently you do it sometimes.
>
> Also, if you were to add LCB_F_MUTEX then you could have something like:
Yep, I'm ok with having the mutex flag. Do you want me to send
v5 with this change or would you like to do it by yourself?
Thanks,
Namhyung
>
>
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -602,12 +602,14 @@ __mutex_lock_common(struct mutex *lock,
> preempt_disable();
> mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
>
> + trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
> if (__mutex_trylock(lock) ||
> mutex_optimistic_spin(lock, ww_ctx, NULL)) {
> /* got the lock, yay! */
> lock_acquired(&lock->dep_map, ip);
> if (ww_ctx)
> ww_mutex_set_context_fastpath(ww, ww_ctx);
> + trace_contention_end(lock, 0);
> preempt_enable();
> return 0;
> }
> @@ -644,7 +646,7 @@ __mutex_lock_common(struct mutex *lock,
> }
>
> set_current_state(state);
> - trace_contention_begin(lock, 0);
> + trace_contention_begin(lock, LCB_F_MUTEX);
> for (;;) {
> bool first;
>
> @@ -684,10 +686,16 @@ __mutex_lock_common(struct mutex *lock,
> * state back to RUNNING and fall through the next schedule(),
> * or we must see its unlock and acquire.
> */
> - if (__mutex_trylock_or_handoff(lock, first) ||
> - (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
> + if (__mutex_trylock_or_handoff(lock, first))
> break;
>
> + if (first) {
> + trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
> + if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
> + break;
> + trace_contention_begin(lock, LCB_F_MUTEX);
> + }
> +
> raw_spin_lock(&lock->wait_lock);
> }
> raw_spin_lock(&lock->wait_lock);
> @@ -723,8 +731,8 @@ __mutex_lock_common(struct mutex *lock,
> err:
> __set_current_state(TASK_RUNNING);
> __mutex_remove_waiter(lock, &waiter);
> - trace_contention_end(lock, ret);
> err_early_kill:
> + trace_contention_end(lock, ret);
> raw_spin_unlock(&lock->wait_lock);
> debug_mutex_free_waiter(&waiter);
> mutex_release(&lock->dep_map, ip);
Powered by blists - more mailing lists