[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e70c457c-0ac6-174d-fa0f-807d8e563f40@quicinc.com>
Date: Tue, 21 Jun 2022 15:32:36 +0530
From: Neeraj Upadhyay <quic_neeraju@...cinc.com>
To: "Paul E. McKenney" <paulmck@...nel.org>, <rcu@...r.kernel.org>
CC: <linux-kernel@...r.kernel.org>, <kernel-team@...com>,
<rostedt@...dmis.org>,
"Joel Fernandes (Google)" <joel@...lfernandes.org>,
Uladzislau Rezki <urezki@...il.com>
Subject: Re: [PATCH rcu 09/12] rcu/kvfree: Remove useless monitor_todo flag
On 6/21/2022 3:50 AM, Paul E. McKenney wrote:
> From: "Joel Fernandes (Google)" <joel@...lfernandes.org>
>
> monitor_todo is not needed as the work struct already tracks
> if work is pending. Just use that to know if work is pending
> using schedule_delayed_work() helper.
>
> Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
> Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
> ---
Reviewed-by: Neeraj Upadhyay <quic_neeraju@...cinc.com>
Thanks
Neeraj
> kernel/rcu/tree.c | 33 ++++++++++++++++-----------------
> 1 file changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 5445b19b48408..7919d7b48fa6a 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3216,7 +3216,6 @@ struct kfree_rcu_cpu_work {
> * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
> * @lock: Synchronize access to this structure
> * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
> - * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
> * @initialized: The @rcu_work fields have been initialized
> * @count: Number of objects for which GP not started
> * @bkvcache:
> @@ -3241,7 +3240,6 @@ struct kfree_rcu_cpu {
> struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
> raw_spinlock_t lock;
> struct delayed_work monitor_work;
> - bool monitor_todo;
> bool initialized;
> int count;
>
> @@ -3421,6 +3419,18 @@ static void kfree_rcu_work(struct work_struct *work)
> }
> }
>
> +static bool
> +need_offload_krc(struct kfree_rcu_cpu *krcp)
> +{
> + int i;
> +
> + for (i = 0; i < FREE_N_CHANNELS; i++)
> + if (krcp->bkvhead[i])
> + return true;
> +
> + return !!krcp->head;
> +}
> +
> /*
> * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
> */
> @@ -3477,9 +3487,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
> // of the channels that is still busy we should rearm the
> // work to repeat an attempt. Because previous batches are
> // still in progress.
> - if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
> - krcp->monitor_todo = false;
> - else
> + if (need_offload_krc(krcp))
> schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
>
> raw_spin_unlock_irqrestore(&krcp->lock, flags);
> @@ -3667,11 +3675,8 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
> WRITE_ONCE(krcp->count, krcp->count + 1);
>
> // Set timer to drain after KFREE_DRAIN_JIFFIES.
> - if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
> - !krcp->monitor_todo) {
> - krcp->monitor_todo = true;
> + if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
> schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> - }
>
> unlock_return:
> krc_this_cpu_unlock(krcp, flags);
> @@ -3746,14 +3751,8 @@ void __init kfree_rcu_scheduler_running(void)
> struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
>
> raw_spin_lock_irqsave(&krcp->lock, flags);
> - if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
> - krcp->monitor_todo) {
> - raw_spin_unlock_irqrestore(&krcp->lock, flags);
> - continue;
> - }
> - krcp->monitor_todo = true;
> - schedule_delayed_work_on(cpu, &krcp->monitor_work,
> - KFREE_DRAIN_JIFFIES);
> + if (need_offload_krc(krcp))
> + schedule_delayed_work_on(cpu, &krcp->monitor_work, KFREE_DRAIN_JIFFIES);
> raw_spin_unlock_irqrestore(&krcp->lock, flags);
> }
> }
Powered by blists - more mailing lists