[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20140902203743.GL5001@linux.vnet.ibm.com>
Date: Tue, 2 Sep 2014 13:37:44 -0700
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: Christoph Lameter <cl@...ux.com>
Cc: Frederic Weisbecker <fweisbec@...il.com>,
linux-kernel@...r.kernel.org
Subject: Re: [RFC] dynticks: dynticks_idle is only modified locally use
this_cpu ops
On Tue, Sep 02, 2014 at 03:14:43PM -0500, Christoph Lameter wrote:
>
> Since dynticks_idle is only ever modified by the local cpu we do
> not need to use an atomic there. The weak "atomicity" of this_cpu
> ops is sufficient since there is no other cpu modifying the variable.
>
> [This is a cautious patch that leaves the barriers in place]
Actually, not so cautious. On x86:
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
But yes, in theory, something like this can work if appropriate memory
barriers are put in place. In practice, this sort of change needs
profound testing on multiple architectures.
Thanx, Paul
> Signed-off-by: Christoph Lameter <cl@...ux.com>
>
> Index: linux/kernel/rcu/tree.c
> ===================================================================
> --- linux.orig/kernel/rcu/tree.c
> +++ linux/kernel/rcu/tree.c
> @@ -213,7 +213,7 @@ static DEFINE_PER_CPU(struct rcu_dyntick
> .dynticks = ATOMIC_INIT(1),
> #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
> .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
> - .dynticks_idle = ATOMIC_INIT(1),
> + .dynticks_idle = 1,
> #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
> };
>
> Index: linux/kernel/rcu/tree.h
> ===================================================================
> --- linux.orig/kernel/rcu/tree.h
> +++ linux/kernel/rcu/tree.h
> @@ -91,7 +91,7 @@ struct rcu_dynticks {
> #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
> long long dynticks_idle_nesting;
> /* irq/process nesting level from idle. */
> - atomic_t dynticks_idle; /* Even value for idle, else odd. */
> + long dynticks_idle; /* Even value for idle, else odd. */
> /* "Idle" excludes userspace execution. */
> unsigned long dynticks_idle_jiffies;
> /* End of last non-NMI non-idle period. */
> Index: linux/kernel/rcu/tree_plugin.h
> ===================================================================
> --- linux.orig/kernel/rcu/tree_plugin.h
> +++ linux/kernel/rcu/tree_plugin.h
> @@ -2644,9 +2644,9 @@ static void rcu_sysidle_enter(int irq)
> j = jiffies;
> ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
> smp_mb__before_atomic();
> - atomic_inc(&rdtp->dynticks_idle);
> + this_cpu_inc(rcu_dynticks.dynticks_idle);
> smp_mb__after_atomic();
> - WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
> + WARN_ON_ONCE(__this_cpu_read(rcu_dynticks.dynticks_idle) & 0x1);
> }
>
> /*
> @@ -2712,9 +2712,9 @@ static void rcu_sysidle_exit(int irq)
>
> /* Record end of idle period. */
> smp_mb__before_atomic();
> - atomic_inc(&rdtp->dynticks_idle);
> + this_cpu_inc(rcu_dynticks.dynticks_idle);
> smp_mb__after_atomic();
> - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
> + WARN_ON_ONCE(!(__this_cpu_read(rcu_dynticks.dynticks_idle) & 0x1));
>
> /*
> * If we are the timekeeping CPU, we are permitted to be non-idle
> @@ -2755,7 +2755,7 @@ static void rcu_sysidle_check_cpu(struct
> WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
>
> /* Pick up current idle and NMI-nesting counter and check. */
> - cur = atomic_read(&rdtp->dynticks_idle);
> + cur = rdtp->dynticks_idle;
> if (cur & 0x1) {
> *isidle = false; /* We are not idle! */
> return;
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists