[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20120615212527.GQ31184@leaf>
Date: Fri, 15 Jun 2012 14:25:27 -0700
From: Josh Triplett <josh@...htriplett.org>
To: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Cc: linux-kernel@...r.kernel.org, mingo@...e.hu, laijs@...fujitsu.com,
dipankar@...ibm.com, akpm@...ux-foundation.org,
mathieu.desnoyers@...ymtl.ca, niv@...ibm.com, tglx@...utronix.de,
peterz@...radead.org, rostedt@...dmis.org, Valdis.Kletnieks@...edu,
dhowells@...hat.com, eric.dumazet@...il.com, darren@...art.com,
fweisbec@...il.com, patches@...aro.org
Subject: Re: [PATCH tip/core/rcu 13/14] rcu: Split RCU core processing out of
__call_rcu()
On Fri, Jun 15, 2012 at 01:13:14PM -0700, Paul E. McKenney wrote:
> From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
>
> The __call_rcu() function is a bit overweight, so this commit splits
> it into actual enqueuing of and accounting for the callback (__call_rcu())
> and associated RCU-core processing (__call_rcu_core()).
>
> Signed-off-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@...htriplett.org>
> kernel/rcutree.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++-----
> 1 files changed, 49 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/rcutree.c b/kernel/rcutree.c
> index 9419ebf..6940a81 100644
> --- a/kernel/rcutree.c
> +++ b/kernel/rcutree.c
> @@ -1858,9 +1858,11 @@ static void invoke_rcu_core(void)
> raise_softirq(RCU_SOFTIRQ);
> }
>
> -static void
> -__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
> - struct rcu_state *rsp, bool lazy)
> +/*
> + * Handle any core-RCU processing required by a call_rcu() invocation.
> + */
> +static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
> + struct rcu_head *head, unsigned long flags)
> {
> unsigned long flags;
> struct rcu_data *rdp;
> @@ -1905,10 +1907,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
> invoke_rcu_core();
>
> /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
> - if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) {
> - local_irq_restore(flags);
> + if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
> return;
> - }
>
> /*
> * Force the grace period if too many callbacks or too long waiting.
> @@ -1941,6 +1941,49 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
> }
> } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
> force_quiescent_state(rsp, 1);
> +}
> +
> +static void
> +__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
> + struct rcu_state *rsp, bool lazy)
> +{
> + unsigned long flags;
> + struct rcu_data *rdp;
> +
> + WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
> + debug_rcu_head_queue(head);
> + head->func = func;
> + head->next = NULL;
> +
> + smp_mb(); /* Ensure RCU update seen before callback registry. */
> +
> + /*
> + * Opportunistically note grace-period endings and beginnings.
> + * Note that we might see a beginning right after we see an
> + * end, but never vice versa, since this CPU has to pass through
> + * a quiescent state betweentimes.
> + */
> + local_irq_save(flags);
> + rdp = this_cpu_ptr(rsp->rda);
> +
> + /* Add the callback to our list. */
> + ACCESS_ONCE(rdp->qlen)++;
> + if (lazy)
> + rdp->qlen_lazy++;
> + else
> + rcu_idle_count_callbacks_posted();
> + smp_mb(); /* Count before adding callback for rcu_barrier(). */
> + *rdp->nxttail[RCU_NEXT_TAIL] = head;
> + rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
> +
> + if (__is_kfree_rcu_offset((unsigned long)func))
> + trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
> + rdp->qlen_lazy, rdp->qlen);
> + else
> + trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
> +
> + /* Go handle any RCU core processing required. */
> + __call_rcu_core(rsp, rdp, head, flags);
> local_irq_restore(flags);
> }
>
> --
> 1.7.8
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists