[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <47c3b0df-9f11-4e14-97e2-0f3ba3b09855@paulmck-laptop>
Date: Wed, 16 Jul 2025 21:43:47 -0700
From: "Paul E. McKenney" <paulmck@...nel.org>
To: Steven Rostedt <rostedt@...nel.org>
Cc: linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org,
bpf@...r.kernel.org, x86@...nel.org,
Masami Hiramatsu <mhiramat@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>, Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Andrii Nakryiko <andrii@...nel.org>,
Indu Bhagat <indu.bhagat@...cle.com>,
"Jose E. Marchesi" <jemarch@....org>,
Beau Belgrave <beaub@...ux.microsoft.com>,
Jens Remus <jremus@...ux.ibm.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Jens Axboe <axboe@...nel.dk>, Florian Weimer <fweimer@...hat.com>,
Sam James <sam@...too.org>
Subject: Re: [PATCH v14 09/12] unwind deferred: Use SRCU
unwind_deferred_task_work()
On Wed, Jul 16, 2025 at 08:49:19PM -0400, Steven Rostedt wrote:
> From: Steven Rostedt <rostedt@...dmis.org>
>
> Instead of using the callback_mutex to protect the link list of callbacks
> in unwind_deferred_task_work(), use SRCU instead. This gets called every
> time a task exits that has to record a stack trace that was requested.
> This can happen for many tasks on several CPUs at the same time. A mutex
> is a bottleneck and can cause a bit of contention and slow down performance.
>
> As the callbacks themselves are allowed to sleep, regular RCU cannot be
> used to protect the list. Instead use SRCU, as that still allows the
> callbacks to sleep and the list can be read without needing to hold the
> callback_mutex.
>
> Link: https://lore.kernel.org/all/ca9bd83a-6c80-4ee0-a83c-224b9d60b755@efficios.com/
>
> Also added a new guard (srcu_lite) written by Peter Zilstra
>
> Link: https://lore.kernel.org/all/20250715102912.GQ1613200@noisy.programming.kicks-ass.net/
>
> Cc: "Paul E. McKenney" <paulmck@...nel.org>
> Suggested-by: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
> Signed-off-by: Steven Rostedt (Google) <rostedt@...dmis.org>
> ---
> Changes since v13: https://lore.kernel.org/20250708012359.172959778@kernel.org
>
> - Have the locking of the link list walk use guard(srcu_lite)
> (Peter Zijlstra)
>
> - Fixed up due to the new atomic_long logic.
>
> include/linux/srcu.h | 4 ++++
> kernel/unwind/deferred.c | 27 +++++++++++++++++++++------
> 2 files changed, 25 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/srcu.h b/include/linux/srcu.h
> index 900b0d5c05f5..879054b8bf87 100644
> --- a/include/linux/srcu.h
> +++ b/include/linux/srcu.h
> @@ -524,4 +524,8 @@ DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
> srcu_read_unlock(_T->lock, _T->idx),
> int idx)
>
> +DEFINE_LOCK_GUARD_1(srcu_lite, struct srcu_struct,
You need srcu_fast because srcu_lite is being removed. They are quite
similar, but srcu_fast is faster and is NMI-safe. (This last might or
might not matter here.)
See https://lore.kernel.org/all/20250716225418.3014815-3-paulmck@kernel.org/
for a srcu_fast_notrace, so something like this:
DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
_T->scp = srcu_read_lock_fast(_T->lock),
srcu_read_unlock_fast(_T->lock, _T->scp),
struct srcu_ctr __percpu *scp)
Other than that, it looks plausible.
Thanx, Paul
> + _T->idx = srcu_read_lock_lite(_T->lock),
> + srcu_read_unlock_lite(_T->lock, _T->idx),
> + int idx)
> #endif
> diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
> index 2311b725d691..353f7af610bf 100644
> --- a/kernel/unwind/deferred.c
> +++ b/kernel/unwind/deferred.c
> @@ -41,7 +41,7 @@ static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt)
> #define UNWIND_MAX_ENTRIES \
> ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
>
> -/* Guards adding to and reading the list of callbacks */
> +/* Guards adding to or removing from the list of callbacks */
> static DEFINE_MUTEX(callback_mutex);
> static LIST_HEAD(callbacks);
>
> @@ -49,6 +49,7 @@ static LIST_HEAD(callbacks);
>
> /* Zero'd bits are available for assigning callback users */
> static unsigned long unwind_mask = RESERVED_BITS;
> +DEFINE_STATIC_SRCU(unwind_srcu);
>
> static inline bool unwind_pending(struct unwind_task_info *info)
> {
> @@ -174,8 +175,9 @@ static void unwind_deferred_task_work(struct callback_head *head)
>
> cookie = info->id.id;
>
> - guard(mutex)(&callback_mutex);
> - list_for_each_entry(work, &callbacks, list) {
> + guard(srcu_lite)(&unwind_srcu);
> + list_for_each_entry_srcu(work, &callbacks, list,
> + srcu_read_lock_held(&unwind_srcu)) {
> if (test_bit(work->bit, &bits)) {
> work->func(work, &trace, cookie);
> if (info->cache)
> @@ -213,7 +215,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
> {
> struct unwind_task_info *info = ¤t->unwind_info;
> unsigned long old, bits;
> - unsigned long bit = BIT(work->bit);
> + unsigned long bit;
> int ret;
>
> *cookie = 0;
> @@ -230,6 +232,14 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
> if (WARN_ON_ONCE(!CAN_USE_IN_NMI && in_nmi()))
> return -EINVAL;
>
> + /* Do not allow cancelled works to request again */
> + bit = READ_ONCE(work->bit);
> + if (WARN_ON_ONCE(bit < 0))
> + return -EINVAL;
> +
> + /* Only need the mask now */
> + bit = BIT(bit);
> +
> guard(irqsave)();
>
> *cookie = get_cookie(info);
> @@ -281,10 +291,15 @@ void unwind_deferred_cancel(struct unwind_work *work)
> return;
>
> guard(mutex)(&callback_mutex);
> - list_del(&work->list);
> + list_del_rcu(&work->list);
> +
> + /* Do not allow any more requests and prevent callbacks */
> + work->bit = -1;
>
> __clear_bit(bit, &unwind_mask);
>
> + synchronize_srcu(&unwind_srcu);
> +
> guard(rcu)();
> /* Clear this bit from all threads */
> for_each_process_thread(g, t) {
> @@ -307,7 +322,7 @@ int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
> work->bit = ffz(unwind_mask);
> __set_bit(work->bit, &unwind_mask);
>
> - list_add(&work->list, &callbacks);
> + list_add_rcu(&work->list, &callbacks);
> work->func = func;
> return 0;
> }
> --
> 2.47.2
>
>
Powered by blists - more mailing lists