[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zfr0XVpgugTK8MMi@pc636>
Date: Wed, 20 Mar 2024 15:36:13 +0100
From: Uladzislau Rezki <urezki@...il.com>
To: "Joel Fernandes (Google)" <joel@...lfernandes.org>
Cc: linux-kernel@...r.kernel.org, "Paul E. McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang1211@...il.com>, urezki@...il.com,
neeraj.iitr10@...il.com, rcu@...r.kernel.org
Subject: Re: [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common
case
On Tue, Mar 19, 2024 at 02:54:57PM -0400, Joel Fernandes (Google) wrote:
> In the synchronize_rcu() common case, we will have less than
> SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker
> is pointless just to free the last injected wait head since at that point,
> all the users have already been awakened.
>
> Introduce a new counter to track this and prevent the wakeup in the
> common case.
>
> Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
> ---
> v1->v2: Rebase on paul/dev
> v2->v3: Additional optimization for wait_tail->next == NULL case.
> v3->v4: Apply clean ups from Vlad. Tested rcutorture all scenarios.
> ---
> kernel/rcu/tree.c | 35 ++++++++++++++++++++++++++++++-----
> kernel/rcu/tree.h | 1 +
> 2 files changed, 31 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9fbb5ab57c84..f3193670fe42 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
> .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
> .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
> rcu_sr_normal_gp_cleanup_work),
> + .srs_cleanups_pending = ATOMIC_INIT(0),
> };
>
> /* Dump rcu_node combining tree at boot to verify correct setup. */
> @@ -1642,8 +1643,11 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> * the done tail list manipulations are protected here.
> */
> done = smp_load_acquire(&rcu_state.srs_done_tail);
> - if (!done)
> + if (!done) {
> + /* See comments below. */
> + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
> return;
> + }
>
> WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
> head = done->next;
> @@ -1666,6 +1670,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
>
> rcu_sr_put_wait_head(rcu);
> }
> +
> + /* Order list manipulations with atomic access. */
> + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
> }
>
> /*
> @@ -1673,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> */
> static void rcu_sr_normal_gp_cleanup(void)
> {
> - struct llist_node *wait_tail, *next, *rcu;
> + struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
> int done = 0;
>
> wait_tail = rcu_state.srs_wait_tail;
> @@ -1699,16 +1706,34 @@ static void rcu_sr_normal_gp_cleanup(void)
> break;
> }
>
> - // concurrent sr_normal_gp_cleanup work might observe this update.
> - smp_store_release(&rcu_state.srs_done_tail, wait_tail);
> + /*
> + * Fast path, no more users to process except putting the second last
> + * wait head if no inflight-workers. If there are in-flight workers,
> + * they will remove the last wait head.
> + *
> + * Note that the ACQUIRE orders atomic access with list manipulation.
> + */
> + if (wait_tail->next && wait_tail->next->next == NULL &&
> + rcu_sr_is_wait_head(wait_tail->next) &&
> + !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
> + rcu_sr_put_wait_head(wait_tail->next);
> + wait_tail->next = NULL;
> + }
> +
> + /* Concurrent sr_normal_gp_cleanup work might observe this update. */
> ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
> + smp_store_release(&rcu_state.srs_done_tail, wait_tail);
>
> /*
> * We schedule a work in order to perform a final processing
> * of outstanding users(if still left) and releasing wait-heads
> * added by rcu_sr_normal_gp_init() call.
> */
> - queue_work(sync_wq, &rcu_state.srs_cleanup_work);
> + if (wait_tail->next) {
> + atomic_inc(&rcu_state.srs_cleanups_pending);
> + if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
> + atomic_dec(&rcu_state.srs_cleanups_pending);
> + }
> }
>
> /*
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index bae7925c497f..affcb92a358c 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -420,6 +420,7 @@ struct rcu_state {
> struct llist_node *srs_done_tail; /* ready for GP users. */
> struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
> struct work_struct srs_cleanup_work;
> + atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
> };
>
> /* Values for rcu_state structure's gp_flags field. */
> --
> 2.34.1
>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@...il.com>
--
Uladzislau Rezki
Powered by blists - more mailing lists