[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20161115011338.GA12110@tardis.cn.ibm.com>
Date: Tue, 15 Nov 2016 09:13:38 +0800
From: Boqun Feng <boqun.feng@...il.com>
To: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Cc: linux-kernel@...r.kernel.org, mingo@...nel.org,
jiangshanlai@...il.com, dipankar@...ibm.com,
akpm@...ux-foundation.org, mathieu.desnoyers@...icios.com,
josh@...htriplett.org, tglx@...utronix.de, peterz@...radead.org,
rostedt@...dmis.org, dhowells@...hat.com, edumazet@...gle.com,
dvhart@...ux.intel.com, fweisbec@...il.com, oleg@...hat.com,
bobby.prani@...il.com
Subject: Re: [PATCH tip/core/rcu 3/5] rcu: Abstract dynticks extended
quiescent state enter/exit operations
Hi Paul,
On Mon, Nov 14, 2016 at 10:30:19AM -0800, Paul E. McKenney wrote:
> This commit is the third step towards full abstraction of all accesses
> to the ->dynticks counter, implementing the previously open-coded atomic
> add of 1 and entry checks in a new rcu_dynticks_eqs_enter() function, and
> the same but with exit checks in a new rcu_dynticks_eqs_exit() function.
> This abstraction will ease changes to the ->dynticks counter operation.
>
> Signed-off-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
> ---
> kernel/rcu/tree.c | 92 +++++++++++++++++++++++++++++++++++++++----------------
> 1 file changed, 66 insertions(+), 26 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9bc60b29ea5c..52e844902a43 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -279,6 +279,65 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
> };
>
> /*
> + * Record entry into an extended quiescent state. This is only to be
> + * called when not already in an extended quiescent state.
> + */
> +static void rcu_dynticks_eqs_enter(void)
> +{
> + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> +
> + /*
> + * CPUs seeing atomic_inc() must see prior RCU read-side critical
> + * sections, and we also must force ordering with the next idle
> + * sojourn.
> + */
> + smp_mb__before_atomic(); /* See above. */
> + atomic_inc(&rdtp->dynticks);
> + smp_mb__after_atomic(); /* See above. */
How about replacing three lines above with:
(void)atomic_inc_return(&rdtp->dynticks);
?
*_return should have the full barrier semantics.
> + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> + atomic_read(&rdtp->dynticks) & 0x1);
> +}
> +
> +/*
> + * Record exit from an extended quiescent state. This is only to be
> + * called from an extended quiescent state.
> + */
> +static void rcu_dynticks_eqs_exit(void)
> +{
> + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> +
> + /*
> + * CPUs seeing atomic_inc() must see prior idle sojourns,
> + * and we also must force ordering with the next RCU read-side
> + * critical section.
> + */
> + smp_mb__before_atomic(); /* See above. */
> + atomic_inc(&rdtp->dynticks);
> + smp_mb__after_atomic(); /* See above. */
Ditto.
Regards,
Boqun
> + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> + !(atomic_read(&rdtp->dynticks) & 0x1));
> +}
> +
> +/*
> + * Reset the current CPU's ->dynticks counter to indicate that the
> + * newly onlined CPU is no longer in an extended quiescent state.
> + * This will either leave the counter unchanged, or increment it
> + * to the next non-quiescent value.
> + *
> + * The non-atomic test/increment sequence works because the upper bits
> + * of the ->dynticks counter are manipulated only by the corresponding CPU,
> + * or when the corresponding CPU is offline.
> + */
> +static void rcu_dynticks_eqs_online(void)
> +{
> + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> +
> + if (atomic_read(&rdtp->dynticks) & 0x1)
> + return;
> + atomic_add(0x1, &rdtp->dynticks);
> +}
> +
> +/*
> * Snapshot the ->dynticks counter with full ordering so as to allow
> * stable comparison of this counter with past and future snapshots.
> */
> @@ -690,7 +749,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
> {
> struct rcu_state *rsp;
> struct rcu_data *rdp;
> - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> + struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks);
>
> trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
> if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> @@ -709,12 +768,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
> do_nocb_deferred_wakeup(rdp);
> }
> rcu_prepare_for_idle();
> - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
> - smp_mb__before_atomic(); /* See above. */
> - atomic_inc(&rdtp->dynticks);
> - smp_mb__after_atomic(); /* Force ordering with next sojourn. */
> - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> - atomic_read(&rdtp->dynticks) & 0x1);
> + rcu_dynticks_eqs_enter();
> rcu_dynticks_task_enter();
>
> /*
> @@ -843,15 +897,10 @@ void rcu_irq_exit_irqson(void)
> */
> static void rcu_eqs_exit_common(long long oldval, int user)
> {
> - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
> + struct rcu_dynticks __maybe_unused *rdtp = this_cpu_ptr(&rcu_dynticks);
>
> rcu_dynticks_task_exit();
> - smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
> - atomic_inc(&rdtp->dynticks);
> - /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
> - smp_mb__after_atomic(); /* See above. */
> - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> - !(atomic_read(&rdtp->dynticks) & 0x1));
> + rcu_dynticks_eqs_exit();
> rcu_cleanup_after_idle();
> trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
> if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
> @@ -998,11 +1047,7 @@ void rcu_nmi_enter(void)
> * period (observation due to Andy Lutomirski).
> */
> if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
> - smp_mb__before_atomic(); /* Force delay from prior write. */
> - atomic_inc(&rdtp->dynticks);
> - /* atomic_inc() before later RCU read-side crit sects */
> - smp_mb__after_atomic(); /* See above. */
> - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
> + rcu_dynticks_eqs_exit();
> incby = 1;
> }
> rdtp->dynticks_nmi_nesting += incby;
> @@ -1040,11 +1085,7 @@ void rcu_nmi_exit(void)
>
> /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
> rdtp->dynticks_nmi_nesting = 0;
> - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
> - smp_mb__before_atomic(); /* See above. */
> - atomic_inc(&rdtp->dynticks);
> - smp_mb__after_atomic(); /* Force delay to next write. */
> - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
> + rcu_dynticks_eqs_enter();
> }
>
> /**
> @@ -3797,8 +3838,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
> init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
> rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
> rcu_sysidle_init_percpu_data(rdp->dynticks);
> - atomic_set(&rdp->dynticks->dynticks,
> - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
> + rcu_dynticks_eqs_online();
> raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
>
> /*
> --
> 2.5.2
>
Download attachment "signature.asc" of type "application/pgp-signature" (456 bytes)
Powered by blists - more mailing lists