[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190816173504.GD10481@google.com>
Date: Fri, 16 Aug 2019 13:35:04 -0400
From: Joel Fernandes <joel@...lfernandes.org>
To: "Paul E. McKenney" <paulmck@...ux.ibm.com>
Cc: linux-kernel@...r.kernel.org, rcu@...r.kernel.org,
frederic@...nel.org
Subject: Re: [PATCH -rcu dev 1/3] rcu/tree: tick_dep_set/clear_cpu should
accept bits instead of masks
On Fri, Aug 16, 2019 at 10:25:29AM -0700, Paul E. McKenney wrote:
> On Thu, Aug 15, 2019 at 10:53:09PM -0400, Joel Fernandes (Google) wrote:
> > This commit fixes the issue.
> >
> > Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
>
> And I am squashing these into their respective commits with attribution.
> Good eyes, thank you very much!!!
Thank you!!
- Joel
> Thanx, Paul
>
> > ---
> > kernel/rcu/tree.c | 29 +++++++++++++++++------------
> > 1 file changed, 17 insertions(+), 12 deletions(-)
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 0512de9ead20..322b1b57967c 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -829,7 +829,7 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
> > !rdp->dynticks_nmi_nesting &&
> > rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
> > rdp->rcu_forced_tick = true;
> > - tick_dep_set_cpu(rdp->cpu, TICK_DEP_MASK_RCU);
> > + tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
> > }
> > trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
> > rdp->dynticks_nmi_nesting,
> > @@ -898,7 +898,7 @@ void rcu_irq_enter_irqson(void)
> > void rcu_disable_tick_upon_qs(struct rcu_data *rdp)
> > {
> > if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
> > - tick_dep_clear_cpu(rdp->cpu, TICK_DEP_MASK_RCU);
> > + tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
> > rdp->rcu_forced_tick = false;
> > }
> > }
> > @@ -2123,8 +2123,9 @@ int rcutree_dead_cpu(unsigned int cpu)
> > do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
> >
> > // Stop-machine done, so allow nohz_full to disable tick.
> > - for_each_online_cpu(c)
> > - tick_dep_clear_cpu(c, TICK_DEP_MASK_RCU);
> > + for_each_online_cpu(c) {
> > + tick_dep_clear_cpu(c, TICK_DEP_BIT_RCU);
> > + }
> > return 0;
> > }
> >
> > @@ -2175,8 +2176,9 @@ static void rcu_do_batch(struct rcu_data *rdp)
> > rcu_nocb_unlock_irqrestore(rdp, flags);
> >
> > /* Invoke callbacks. */
> > - if (IS_ENABLED(CONFIG_NO_HZ_FULL))
> > - tick_dep_set_task(current, TICK_DEP_MASK_RCU);
> > + if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
> > + tick_dep_set_task(current, TICK_DEP_BIT_RCU);
> > + }
> > rhp = rcu_cblist_dequeue(&rcl);
> > for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
> > debug_rcu_head_unqueue(rhp);
> > @@ -2243,8 +2245,9 @@ static void rcu_do_batch(struct rcu_data *rdp)
> > /* Re-invoke RCU core processing if there are callbacks remaining. */
> > if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
> > invoke_rcu_core();
> > - if (IS_ENABLED(CONFIG_NO_HZ_FULL))
> > - tick_dep_clear_task(current, TICK_DEP_MASK_RCU);
> > + if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
> > + tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
> > + }
> > }
> >
> > /*
> > @@ -3118,8 +3121,9 @@ int rcutree_online_cpu(unsigned int cpu)
> > rcutree_affinity_setting(cpu, -1);
> >
> > // Stop-machine done, so allow nohz_full to disable tick.
> > - for_each_online_cpu(c)
> > - tick_dep_clear_cpu(c, TICK_DEP_MASK_RCU);
> > + for_each_online_cpu(c) {
> > + tick_dep_clear_cpu(c, TICK_DEP_BIT_RCU);
> > + }
> > return 0;
> > }
> >
> > @@ -3143,8 +3147,9 @@ int rcutree_offline_cpu(unsigned int cpu)
> > rcutree_affinity_setting(cpu, cpu);
> >
> > // nohz_full CPUs need the tick for stop-machine to work quickly
> > - for_each_online_cpu(c)
> > - tick_dep_set_cpu(c, TICK_DEP_MASK_RCU);
> > + for_each_online_cpu(c) {
> > + tick_dep_set_cpu(c, TICK_DEP_BIT_RCU);
> > + }
> > return 0;
> > }
> >
> > --
> > 2.23.0.rc1.153.gdeed80330f-goog
> >
>
Powered by blists - more mailing lists