[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2001761952.217.1525727873194.JavaMail.zimbra@efficios.com>
Date: Mon, 7 May 2018 17:17:53 -0400 (EDT)
From: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
To: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Cc: Joel Fernandes <joelaf@...gle.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
"Joel Fernandes, Google" <joel@...lfernandes.org>,
rostedt <rostedt@...dmis.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Tom Zanussi <tom.zanussi@...ux.intel.com>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Boqun Feng <boqun.feng@...il.com>,
fweisbec <fweisbec@...il.com>,
Randy Dunlap <rdunlap@...radead.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
kbuild test robot <fengguang.wu@...el.com>,
baohong liu <baohong.liu@...el.com>,
vedang patel <vedang.patel@...el.com>,
kernel-team <kernel-team@...roid.com>
Subject: Re: [PATCH RFC v6 4/5] tracepoint: Make rcuidle tracepoint callers
use SRCU
----- On May 7, 2018, at 5:08 PM, Paul E. McKenney paulmck@...ux.vnet.ibm.com wrote:
> On Mon, May 07, 2018 at 01:41:42PM -0700, Joel Fernandes wrote:
>> From: "Joel Fernandes (Google)" <joel@...lfernandes.org>
>>
>> In recent tests with IRQ on/off tracepoints, a large performance
>> overhead ~10% is noticed when running hackbench. This is root caused to
>> calls to rcu_irq_enter_irqson and rcu_irq_exit_irqson from the
>> tracepoint code. Following a long discussion on the list [1] about this,
>> we concluded that srcu is a better alternative for use during rcu idle.
>> Although it does involve extra barriers, its lighter than the sched-rcu
>> version which has to do additional RCU calls to notify RCU idle about
>> entry into RCU sections.
>>
>> In this patch, we change the underlying implementation of the
>> trace_*_rcuidle API to use SRCU. This has shown to improve performance
>> alot for the high frequency irq enable/disable tracepoints.
>>
>> Test: Tested idle and preempt/irq tracepoints.
>>
>> Here are some performance numbers:
>>
>> With a run of the following 30 times on a single core x86 Qemu instance
>> with 1GB memory:
>> hackbench -g 4 -f 2 -l 3000
>>
>> Completion times in seconds. CONFIG_PROVE_LOCKING=y.
>>
>> No patches (without this series)
>> Mean: 3.048
>> Median: 3.025
>> Std Dev: 0.064
>>
>> With Lockdep using irq tracepoints with RCU implementation:
>> Mean: 3.451 (-11.66 %)
>> Median: 3.447 (-12.22%)
>> Std Dev: 0.049
>>
>> With Lockdep using irq tracepoints with SRCU implementation (this series):
>> Mean: 3.020 (I would consider the improvement against the "without
>> this series" case as just noise).
>> Median: 3.013
>> Std Dev: 0.033
>>
>> [1] https://patchwork.kernel.org/patch/10344297/
>>
>> Cc: Steven Rostedt <rostedt@...dmis.org>
>> Cc: Peter Zilstra <peterz@...radead.org>
>> Cc: Ingo Molnar <mingo@...hat.com>
>> Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
>> Cc: Tom Zanussi <tom.zanussi@...ux.intel.com>
>> Cc: Namhyung Kim <namhyung@...nel.org>
>> Cc: Thomas Glexiner <tglx@...utronix.de>
>> Cc: Boqun Feng <boqun.feng@...il.com>
>> Cc: Paul McKenney <paulmck@...ux.vnet.ibm.com>
>> Cc: Frederic Weisbecker <fweisbec@...il.com>
>> Cc: Randy Dunlap <rdunlap@...radead.org>
>> Cc: Masami Hiramatsu <mhiramat@...nel.org>
>> Cc: Fenguang Wu <fengguang.wu@...el.com>
>> Cc: Baohong Liu <baohong.liu@...el.com>
>> Cc: Vedang Patel <vedang.patel@...el.com>
>> Cc: kernel-team@...roid.com
>> Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
>> ---
>> include/linux/tracepoint.h | 46 +++++++++++++++++++++++++++++++-------
>> kernel/tracepoint.c | 15 ++++++++++++-
>> 2 files changed, 52 insertions(+), 9 deletions(-)
>>
>> diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
>> index c94f466d57ef..f56f290cf8eb 100644
>> --- a/include/linux/tracepoint.h
>> +++ b/include/linux/tracepoint.h
>> @@ -15,6 +15,7 @@
>> */
>>
>> #include <linux/smp.h>
>> +#include <linux/srcu.h>
>> #include <linux/errno.h>
>> #include <linux/types.h>
>> #include <linux/cpumask.h>
>> @@ -33,6 +34,8 @@ struct trace_eval_map {
>>
>> #define TRACEPOINT_DEFAULT_PRIO 10
>>
>> +extern struct srcu_struct tracepoint_srcu;
>> +
>> extern int
>> tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
>> extern int
>> @@ -77,6 +80,9 @@ int unregister_tracepoint_module_notifier(struct
>> notifier_block *nb)
>> */
>> static inline void tracepoint_synchronize_unregister(void)
>> {
>> +#ifdef CONFIG_TRACEPOINTS
>> + synchronize_srcu(&tracepoint_srcu);
>> +#endif
>> synchronize_sched();
>> }
>>
>> @@ -129,18 +135,38 @@ extern void syscall_unregfunc(void);
>> * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
>> * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
>> */
>> -#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
>> +#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
>> do { \
>> struct tracepoint_func *it_func_ptr; \
>> void *it_func; \
>> void *__data; \
>> + int __maybe_unused idx = 0; \
>> \
>> if (!(cond)) \
>> return; \
>> - if (rcucheck) \
>> - rcu_irq_enter_irqson(); \
>> - rcu_read_lock_sched_notrace(); \
>> - it_func_ptr = rcu_dereference_sched((tp)->funcs); \
>> + \
>> + /* \
>> + * For rcuidle callers, use srcu since sched-rcu \
>> + * doesn't work from the idle path. \
>> + */ \
>> + if (rcuidle) { \
>> + if (in_nmi()) { \
>> + WARN_ON_ONCE(1); \
>> + return; /* no srcu from nmi */ \
>> + } \
>> + \
>> + idx = srcu_read_lock_notrace(&tracepoint_srcu); \
>> + it_func_ptr = \
>> + srcu_dereference_notrace((tp)->funcs, \
>> + &tracepoint_srcu); \
>> + /* To keep it consistent with !rcuidle path */ \
>> + preempt_disable_notrace(); \
>> + } else { \
>> + rcu_read_lock_sched_notrace(); \
>> + it_func_ptr = \
>> + rcu_dereference_sched((tp)->funcs); \
>> + } \
>> + \
>> if (it_func_ptr) { \
>> do { \
>> it_func = (it_func_ptr)->func; \
>> @@ -148,9 +174,13 @@ extern void syscall_unregfunc(void);
>> ((void(*)(proto))(it_func))(args); \
>> } while ((++it_func_ptr)->func); \
>> } \
>> - rcu_read_unlock_sched_notrace(); \
>> - if (rcucheck) \
>> - rcu_irq_exit_irqson(); \
>> + \
>> + if (rcuidle) { \
>
> Don't we also need an in_nmi() check here in order to avoid unbalanced
> srcu_read_unlock_notrace() calls?
AFAIU the "return;" in the if (in_nmi()) branch above takes care of never
executing the following code. diff appears to be a bit confused by the
preprocessor macros, but in reality this is all part of the same static
inline function.
Thanks,
Mathieu
>
> Thanx, Paul
>
>> + preempt_enable_notrace(); \
>> + srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
>> + } else { \
>> + rcu_read_unlock_sched_notrace(); \
>> + } \
>> } while (0)
>>
>> #ifndef MODULE
>> diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
>> index 671b13457387..2089f579f790 100644
>> --- a/kernel/tracepoint.c
>> +++ b/kernel/tracepoint.c
>> @@ -31,6 +31,9 @@
>> extern struct tracepoint * const __start___tracepoints_ptrs[];
>> extern struct tracepoint * const __stop___tracepoints_ptrs[];
>>
>> +DEFINE_SRCU(tracepoint_srcu);
>> +EXPORT_SYMBOL_GPL(tracepoint_srcu);
>> +
>> /* Set to 1 to enable tracepoint debug output */
>> static const int tracepoint_debug;
>>
>> @@ -67,16 +70,26 @@ static inline void *allocate_probes(int count)
>> return p == NULL ? NULL : p->probes;
>> }
>>
>> -static void rcu_free_old_probes(struct rcu_head *head)
>> +static void srcu_free_old_probes(struct rcu_head *head)
>> {
>> kfree(container_of(head, struct tp_probes, rcu));
>> }
>>
>> +static void rcu_free_old_probes(struct rcu_head *head)
>> +{
>> + call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
>> +}
>> +
>> static inline void release_probes(struct tracepoint_func *old)
>> {
>> if (old) {
>> struct tp_probes *tp_probes = container_of(old,
>> struct tp_probes, probes[0]);
>> + /*
>> + * Tracepoint probes are protected by both sched RCU and SRCU,
>> + * by calling the SRCU callback in the sched RCU callback we
>> + * cover both cases. So lets chain the SRCU and RCU callbacks.
>> + */
>> call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
>> }
>> }
>> --
>> 2.17.0.441.gb46fe60e1d-goog
--
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com
Powered by blists - more mailing lists