lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170407144334.GT1600@linux.vnet.ibm.com>
Date:   Fri, 7 Apr 2017 07:43:35 -0700
From:   "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To:     Steven Rostedt <rostedt@...dmis.org>
Cc:     linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: [PATCH 0/5 v2] tracing: Add usecase of synchronize_rcu_tasks()
 and stack_tracer_disable()

On Fri, Apr 07, 2017 at 10:01:06AM -0400, Steven Rostedt wrote:
> Paul,
> 
> Here's my latest. You OK with it?

Given your update to 3/5, I suspect that we could live with it.  I am
expecting some complaints about increases in idle-entry latency, but might
be best to wait for complaints rather than complexifying too proactively.

That said, there isn't supposed to be any tracing during the now very
small interval where RCU's idle-entry is incomplete.  Mightn't it be
better to (under CONFIG_PROVE_RCU or some such) give splats if tracing
showed up in that interval?

							Thanx, Paul

> -- Steve
> 
> 
> Paul E. McKenney (1):
>       rcu: Fix dyntick-idle tracing
> 
> Steven Rostedt (VMware) (4):
>       ftrace: Add use of synchronize_rcu_tasks() with dynamic trampolines
>       tracing: Replace the per_cpu() with this_cpu() in trace_stack.c
>       tracing: Add stack_tracer_disable/enable() functions
>       tracing: Rename trace_active to disable_stack_tracer and inline its modification
> 
> ----
> Changes since v1:
> 
>   . Make stack_tracer_{dis,en}able() into inlines
> 
>   . Rename trace_active to disable_stack_tracer and make global
> 
>   . Do not call preempt_disable() from stack_tracer_disable()
>     and likewise from stack_tracer_enable()
> 
>   . Add check for preemption disabled when CONFIG_PREEMPT_DEBUG
> 
>  include/linux/ftrace.h     | 38 ++++++++++++++++++++++++++++++++++++
>  kernel/rcu/tree.c          | 48 ++++++++++++++++++++++------------------------
>  kernel/trace/Kconfig       |  3 ++-
>  kernel/trace/ftrace.c      | 42 +++++++++++++++++-----------------------
>  kernel/trace/trace_stack.c | 27 +++++++++-----------------
>  5 files changed, 90 insertions(+), 68 deletions(-)
> 
> Diff from v1:
> 
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 40afee3..4bde7ff 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write,
>  		   void __user *buffer, size_t *lenp,
>  		   loff_t *ppos);
> 
> -void stack_tracer_disable(void);
> -void stack_tracer_enable(void);
> +/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
> +DECLARE_PER_CPU(int, disable_stack_tracer);
> +
> +/**
> + * stack_tracer_disable - temporarily disable the stack tracer
> + *
> + * There's a few locations (namely in RCU) where stack tracing
> + * cannot be executed. This function is used to disable stack
> + * tracing during those critical sections.
> + *
> + * This function must be called with preemption or interrupts
> + * disabled and stack_tracer_enable() must be called shortly after
> + * while preemption or interrupts are still disabled.
> + */
> +static inline void stack_tracer_disable(void)
> +{
> +	/* Preemption or interupts must be disabled */
> +	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
> +		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
> +	this_cpu_inc(disable_stack_tracer);
> +}
> +
> +/**
> + * stack_tracer_enable - re-enable the stack tracer
> + *
> + * After stack_tracer_disable() is called, stack_tracer_enable()
> + * must be called shortly afterward.
> + */
> +static inline void stack_tracer_enable(void)
> +{
> +	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
> +		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
> +	this_cpu_dec(disable_stack_tracer);
> +}
>  #else
>  static inline void stack_tracer_disable(void) { }
>  static inline void stack_tracer_enabe(void) { }
> diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
> index 5adbb73..84fafb6 100644
> --- a/kernel/trace/trace_stack.c
> +++ b/kernel/trace/trace_stack.c
> @@ -35,40 +35,12 @@ unsigned long stack_trace_max_size;
>  arch_spinlock_t stack_trace_max_lock =
>  	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> 
> -static DEFINE_PER_CPU(int, trace_active);
> +DEFINE_PER_CPU(int, disable_stack_tracer);
>  static DEFINE_MUTEX(stack_sysctl_mutex);
> 
>  int stack_tracer_enabled;
>  static int last_stack_tracer_enabled;
> 
> -/**
> - * stack_tracer_disable - temporarily disable the stack tracer
> - *
> - * There's a few locations (namely in RCU) where stack tracing
> - * can not be executed. This function is used to disable stack
> - * tracing during those critical sections.
> - *
> - * This function will disable preemption. stack_tracer_enable()
> - * must be called shortly after this is called.
> - */
> -void stack_tracer_disable(void)
> -{
> -	preempt_disable_notrace();
> -	this_cpu_inc(trace_active);
> -}
> -
> -/**
> - * stack_tracer_enable - re-enable the stack tracer
> - *
> - * After stack_tracer_disable() is called, stack_tracer_enable()
> - * must shortly be called afterward.
> - */
> -void stack_tracer_enable(void)
> -{
> -	this_cpu_dec(trace_active);
> -	preempt_enable_notrace();
> -}
> -
>  void stack_trace_print(void)
>  {
>  	long i;
> @@ -239,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
>  	preempt_disable_notrace();
> 
>  	/* no atomic needed, we only modify this variable by this cpu */
> -	this_cpu_inc(trace_active);
> -	if (this_cpu_read(trace_active) != 1)
> +	this_cpu_inc(disable_stack_tracer);
> +	if (this_cpu_read(disable_stack_tracer) != 1)
>  		goto out;
> 
>  	ip += MCOUNT_INSN_SIZE;
> @@ -248,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
>  	check_stack(ip, &stack);
> 
>   out:
> -	this_cpu_dec(trace_active);
> +	this_cpu_dec(disable_stack_tracer);
>  	/* prevent recursion in schedule */
>  	preempt_enable_notrace();
>  }
> @@ -290,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
>  	/*
>  	 * In case we trace inside arch_spin_lock() or after (NMI),
>  	 * we will cause circular lock, so we also need to increase
> -	 * the percpu trace_active here.
> +	 * the percpu disable_stack_tracer here.
>  	 */
> -	this_cpu_inc(trace_active);
> +	this_cpu_inc(disable_stack_tracer);
> 
>  	arch_spin_lock(&stack_trace_max_lock);
>  	*ptr = val;
>  	arch_spin_unlock(&stack_trace_max_lock);
> 
> -	this_cpu_dec(trace_active);
> +	this_cpu_dec(disable_stack_tracer);
>  	local_irq_restore(flags);
> 
>  	return count;
> @@ -334,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
>  {
>  	local_irq_disable();
> 
> -	this_cpu_inc(trace_active);
> +	this_cpu_inc(disable_stack_tracer);
> 
>  	arch_spin_lock(&stack_trace_max_lock);
> 
> @@ -348,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
>  {
>  	arch_spin_unlock(&stack_trace_max_lock);
> 
> -	this_cpu_dec(trace_active);
> +	this_cpu_dec(disable_stack_tracer);
> 
>  	local_irq_enable();
>  }
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ