lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160818102135.549efeea@gandalf.local.home>
Date:	Thu, 18 Aug 2016 10:21:35 -0400
From:	Steven Rostedt <rostedt@...dmis.org>
To:	Binoy Jayan <binoy.jayan@...aro.org>
Cc:	Arnd Bergmann <arnd@...db.de>, linaro-kernel@...ts.linaro.org,
	Daniel Wagner <daniel.wagner@...-carit.de>,
	Carsten Emde <C.Emde@...dl.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/3] tracing: Add trace_irqsoff tracepoints

On Thu, 18 Aug 2016 12:39:25 +0530
Binoy Jayan <binoy.jayan@...aro.org> wrote:

>  
> +static inline void mark_timestamp(cycle_t __percpu *ts)
> +{
> +	int cpu = raw_smp_processor_id();
> +	per_cpu(*ts, cpu) = ftrace_now(cpu);

Just an FYI. ftrace_now() is whatever the clock source is that is
specified by /sys/kernel/tracing/trace_clock. Which could be a slower
"global" clock, or even cycles. If that's what you want then this is
the right approach. But if you want something more accurate per cpu
(may not be accurate across CPUs) then you may want to use
trace_clock_local() (which is the "[local]" clock in the trace_clock
file.

Also, you should convert the above to:

  this_cpu_write(ts, ftrace_now(raw_smp_processor_id()));

As this_cpu_write is optimized over a per_cpu() use.


> +}
> +
> +static inline cycle_t get_delta(int cpu, cycle_t __percpu *ts)
> +{
> +       return ftrace_now(cpu) - per_cpu(*ts, cpu);

Use this_cpu_read(ts)

> +}
>  /* start and stop critical timings used to for stoppage (in idle) */
>  void start_critical_timings(void)
>  {
> +	if (trace_latency_critical_timings_enabled())
> +		mark_timestamp(&ts_critical_timings);
> +
>  	if (preempt_trace() || irq_trace())
>  		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
>  }
> @@ -431,6 +451,13 @@ void stop_critical_timings(void)
>  {
>  	if (preempt_trace() || irq_trace())
>  		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
> +
> +	if (trace_latency_critical_timings_enabled()) {
> +		int cpu = raw_smp_processor_id();
> +		trace_latency_critical_timings(cpu,
> +			get_delta(cpu, &ts_critical_timings));
> +	}
> +
>  }
>  EXPORT_SYMBOL_GPL(stop_critical_timings);
>  
> @@ -438,6 +465,10 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
>  #ifdef CONFIG_PROVE_LOCKING
>  void time_hardirqs_on(unsigned long a0, unsigned long a1)
>  {
> +	if (trace_latency_irqs_enabled()) {
> +		int cpu = raw_smp_processor_id();
> +		trace_latency_irqs(cpu, get_delta(cpu, &ts_irqs));
> +	}
>  	if (!preempt_trace() && irq_trace())
>  		stop_critical_timing(a0, a1);
>  }
> @@ -446,6 +477,10 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
>  {
>  	if (!preempt_trace() && irq_trace())
>  		start_critical_timing(a0, a1);
> +
> +	if (trace_latency_irqs_enabled()) {
> +                mark_timestamp(&ts_irqs);
> +	}
>  }
>  
>  #else /* !CONFIG_PROVE_LOCKING */
> @@ -500,9 +535,16 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
>  #endif /* CONFIG_PROVE_LOCKING */
>  #endif /*  CONFIG_IRQSOFF_TRACER */
>  
> +int count = 0;
> +
>  #ifdef CONFIG_PREEMPT_TRACER
>  void trace_preempt_on(unsigned long a0, unsigned long a1)
>  {
> +	if (trace_latency_preempt_enabled()) {
> +		int cpu = raw_smp_processor_id();
> +		trace_latency_preempt(cpu, get_delta(cpu, &ts_preempt));

If cpu in the tracepoint will always be smp_processor_id() you don't
need to do that. It's already saved by the tracer.

-- Steve

> +	}
> +
>  	if (preempt_trace() && !irq_trace())
>  		stop_critical_timing(a0, a1);
>  }
> @@ -511,6 +553,10 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
>  {
>  	if (preempt_trace() && !irq_trace())
>  		start_critical_timing(a0, a1);
> +
> +	if (trace_latency_preempt_enabled()) {
> +                mark_timestamp(&ts_preempt);
> +	}
>  }
>  #endif /* CONFIG_PREEMPT_TRACER */
>  

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ