lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160902091403.09d196dc@gandalf.local.home>
Date:   Fri, 2 Sep 2016 09:14:03 -0400
From:   Steven Rostedt <rostedt@...dmis.org>
To:     Binoy Jayan <binoy.jayan@...aro.org>
Cc:     Ingo Molnar <mingo@...hat.com>,
        Daniel Wagner <daniel.wagner@...-carit.de>,
        Arnd Bergmann <arnd@...db.de>, linux-kernel@...r.kernel.org,
        Masami <masami.hiramatsu@...aro.org>
Subject: Re: [PATCH v5 3/4] tracing: Add trace_irqsoff tracepoints

On Fri,  2 Sep 2016 18:07:30 +0530
Binoy Jayan <binoy.jayan@...aro.org> wrote:

> This work is based on work by Daniel Wagner. A few tracepoints are added
> at the end of the critical section. With the hist trigger in place, the
> hist trigger plots may be generated, with per-cpu breakdown of events
> captured. It is based on linux kernel's event infrastructure.
> 
> The following filter(s) may be used
> 
> 'hist:key=latency.log2:val=hitcount:sort=latency'
> 'hist:key=ltype,latency:val=hitcount:sort=latency if cpu==1'
> 'hist:key=ltype:val=latency:sort=ltype if ltype==0 && cpu==2'
> 
> Where ltype is
> 0: IRQSOFF latency
> 1: PREEMPTOFF Latency
> 2: Critical Timings
> 
> This captures only the latencies introduced by disabled irqs and
> preemption. Additional per process data has to be captured to calculate
> the effective latencies introduced for individual processes.
> 
> Initial work - latency.patch
> 
> [1] https://git.kernel.org/cgit/linux/kernel/git/rt/linux-stable-rt.git/commit/?h=v3.14-rt-rebase&id=56d50cc34943bbba12b8c5942ee1ae3b29f73acb
> 
> Signed-off-by: Binoy Jayan <binoy.jayan@...aro.org>
> ---
>  include/trace/events/latency.h | 50 ++++++++++++++++++++++++++++++++++++++++++
>  kernel/trace/trace_irqsoff.c   | 35 +++++++++++++++++++++++++++++
>  2 files changed, 85 insertions(+)
>  create mode 100644 include/trace/events/latency.h
> 
> diff --git a/include/trace/events/latency.h b/include/trace/events/latency.h
> new file mode 100644
> index 0000000..ca57f06
> --- /dev/null
> +++ b/include/trace/events/latency.h
> @@ -0,0 +1,50 @@
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM latency
> +
> +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_HIST_H
> +
> +#include <linux/tracepoint.h>
> +
> +#ifndef __TRACE_LATENCY_TYPE
> +#define __TRACE_LATENCY_TYPE
> +enum latency_type {
> +	LT_IRQ,
> +	LT_PREEMPT,
> +	LT_CRITTIME,
> +	LT_MAX
> +};
> +#define show_ltype(type)			\
> +	__print_symbolic(type,			\
> +		{ LT_IRQ,	"IRQ" },	\
> +		{ LT_PREEMPT,	"PREEMPT" },	\
> +		{ LT_PREEMPT,	"CRIT_TIME" })

Hmm, you have LT_PREEMPT twice above.

Also please add:

TRACE_DEFINE_ENUM(LT_IRQ);
TRACE_DEFINE_ENUM(LT_PREEMPT);
TRACE_DEFINE_ENUM(LT_CRITTIME);

-- Steve

> +#endif
> +
> +DECLARE_EVENT_CLASS(latency_template,
> +	TP_PROTO(int ltype, cycles_t latency),
> +
> +	TP_ARGS(ltype, latency),
> +
> +	TP_STRUCT__entry(
> +		__field(int,		ltype)
> +		__field(cycles_t,	latency)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->ltype		= ltype;
> +		__entry->latency	= latency;
> +	),
> +
> +	TP_printk("ltype=%s(%d), latency=%lu", show_ltype(__entry->ltype),
> +		  __entry->ltype, (unsigned long) __entry->latency)
> +);
> +
> +DEFINE_EVENT(latency_template, latency_preempt,
> +	    TP_PROTO(int ltype, cycles_t latency),
> +	    TP_ARGS(ltype, latency));
> +
> +#endif /* _TRACE_HIST_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index 03cdff8..60ee660 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -13,13 +13,19 @@
>  #include <linux/uaccess.h>
>  #include <linux/module.h>
>  #include <linux/ftrace.h>
> +#include <linux/percpu-defs.h>
> +#include <trace/events/sched.h>
>  
>  #include "trace.h"
>  
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/latency.h>
> +
>  static struct trace_array		*irqsoff_trace __read_mostly;
>  static int				tracer_enabled __read_mostly;
>  
>  static DEFINE_PER_CPU(int, tracing_cpu);
> +static DEFINE_PER_CPU(cycle_t, lat_ts[LT_MAX]);
>  
>  static DEFINE_RAW_SPINLOCK(max_trace_lock);
>  
> @@ -419,9 +425,23 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
>  	atomic_dec(&data->disabled);
>  }
>  
> +static inline void latency_preempt_timing_start(enum latency_type ltype)
> +{
> +	this_cpu_write(lat_ts[ltype], (cycle_t) trace_clock_local());
> +}
> +
> +static inline void latency_preempt_timing_stop(enum latency_type type)
> +{
> +	trace_latency_preempt(type,
> +		(cycle_t) trace_clock_local() - this_cpu_read(lat_ts[type]));
> +}
> +
>  /* start and stop critical timings used to for stoppage (in idle) */
>  void start_critical_timings(void)
>  {
> +	if (unlikely(trace_latency_preempt_enabled()))
> +		latency_preempt_timing_start(LT_CRITTIME);
> +
>  	if (preempt_trace() || irq_trace())
>  		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
>  }
> @@ -431,6 +451,9 @@ void stop_critical_timings(void)
>  {
>  	if (preempt_trace() || irq_trace())
>  		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
> +
> +	if (unlikely(trace_latency_preempt_enabled()))
> +		latency_preempt_timing_stop(LT_CRITTIME);
>  }
>  EXPORT_SYMBOL_GPL(stop_critical_timings);
>  
> @@ -438,6 +461,9 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
>  #ifdef CONFIG_PROVE_LOCKING
>  void time_hardirqs_on(unsigned long a0, unsigned long a1)
>  {
> +	if (unlikely(trace_latency_preempt_enabled()))
> +		latency_preempt_timing_stop(LT_IRQ);
> +
>  	if (!preempt_trace() && irq_trace())
>  		stop_critical_timing(a0, a1);
>  }
> @@ -446,6 +472,9 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
>  {
>  	if (!preempt_trace() && irq_trace())
>  		start_critical_timing(a0, a1);
> +
> +	if (unlikely(trace_latency_preempt_enabled()))
> +		latency_preempt_timing_start(LT_IRQ);
>  }
>  
>  #else /* !CONFIG_PROVE_LOCKING */
> @@ -503,6 +532,9 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
>  #ifdef CONFIG_PREEMPT_TRACER
>  void trace_preempt_on(unsigned long a0, unsigned long a1)
>  {
> +	if (unlikely(trace_latency_preempt_enabled()))
> +		latency_preempt_timing_stop(LT_PREEMPT);
> +
>  	if (preempt_trace() && !irq_trace())
>  		stop_critical_timing(a0, a1);
>  }
> @@ -511,6 +543,9 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
>  {
>  	if (preempt_trace() && !irq_trace())
>  		start_critical_timing(a0, a1);
> +
> +	if (unlikely(trace_latency_preempt_enabled()))
> +		latency_preempt_timing_start(LT_PREEMPT);
>  }
>  #endif /* CONFIG_PREEMPT_TRACER */
>  

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ