lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 13 Apr 2017 09:58:17 +0200
From:   Valentin Rothberg <valentinrothberg@...il.com>
To:     Steven Rostedt <rostedt@...dmis.org>
Cc:     linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
        valentinrothberg@...il.com
Subject: Re: [for-next][PATCH 4/7] tracing: Rename trace_active to
 disable_stack_tracer and inline its modification

Hi Steven,

I just found this patch in linux-next commit 8aaf1ee70e19 with
scripts/checkkconfigsymbols.py complaining about an undefined
CONFIG_PREEMPT_DEBUG (see below). I guess it's just a typo, since
there is a symbol named DEBUG_PREEMPT.

If you want to, I can send a trivial patch to rename both references.

Best regards,
 Valentin

On Apr 10 '17 15:52, Steven Rostedt wrote:
> From: "Steven Rostedt (VMware)" <rostedt@...dmis.org>
> 
> In order to eliminate a function call, make "trace_active" into
> "disable_stack_tracer" and convert stack_tracer_disable() and friends into
> static inline functions.
> 
> Acked-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
> Signed-off-by: Steven Rostedt (VMware) <rostedt@...dmis.org>
> ---
>  include/linux/ftrace.h     | 36 +++++++++++++++++++++++++++++++--
>  kernel/trace/trace_stack.c | 50 +++++++++-------------------------------------
>  2 files changed, 43 insertions(+), 43 deletions(-)
> 
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 7b4e6572ab21..06b2990a35e4 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write,
>  		   void __user *buffer, size_t *lenp,
>  		   loff_t *ppos);
>  
> -void stack_tracer_disable(void);
> -void stack_tracer_enable(void);
> +/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
> +DECLARE_PER_CPU(int, disable_stack_tracer);
> +
> +/**
> + * stack_tracer_disable - temporarily disable the stack tracer
> + *
> + * There's a few locations (namely in RCU) where stack tracing
> + * cannot be executed. This function is used to disable stack
> + * tracing during those critical sections.
> + *
> + * This function must be called with preemption or interrupts
> + * disabled and stack_tracer_enable() must be called shortly after
> + * while preemption or interrupts are still disabled.
> + */
> +static inline void stack_tracer_disable(void)
> +{
> +	/* Preemption or interupts must be disabled */
> +	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
                   ^ undefined
> +		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
> +	this_cpu_inc(disable_stack_tracer);
> +}
> +
> +/**
> + * stack_tracer_enable - re-enable the stack tracer
> + *
> + * After stack_tracer_disable() is called, stack_tracer_enable()
> + * must be called shortly afterward.
> + */
> +static inline void stack_tracer_enable(void)
> +{
> +	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
                   ^ undefined
> +		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
> +	this_cpu_dec(disable_stack_tracer);
> +}
>  #else
>  static inline void stack_tracer_disable(void) { }
>  static inline void stack_tracer_enable(void) { }
> diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
> index 21e536cf66e4..f2f02ff350d4 100644
> --- a/kernel/trace/trace_stack.c
> +++ b/kernel/trace/trace_stack.c
> @@ -35,44 +35,12 @@ unsigned long stack_trace_max_size;
>  arch_spinlock_t stack_trace_max_lock =
>  	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
>  
> -static DEFINE_PER_CPU(int, trace_active);
> +DEFINE_PER_CPU(int, disable_stack_tracer);
>  static DEFINE_MUTEX(stack_sysctl_mutex);
>  
>  int stack_tracer_enabled;
>  static int last_stack_tracer_enabled;
>  
> -/**
> - * stack_tracer_disable - temporarily disable the stack tracer
> - *
> - * There's a few locations (namely in RCU) where stack tracing
> - * cannot be executed. This function is used to disable stack
> - * tracing during those critical sections.
> - *
> - * This function must be called with preemption or interrupts
> - * disabled and stack_tracer_enable() must be called shortly after
> - * while preemption or interrupts are still disabled.
> - */
> -void stack_tracer_disable(void)
> -{
> -	/* Preemption or interupts must be disabled */
> -	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
> -		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
> -	this_cpu_inc(trace_active);
> -}
> -
> -/**
> - * stack_tracer_enable - re-enable the stack tracer
> - *
> - * After stack_tracer_disable() is called, stack_tracer_enable()
> - * must be called shortly afterward.
> - */
> -void stack_tracer_enable(void)
> -{
> -	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
> -		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
> -	this_cpu_dec(trace_active);
> -}
> -
>  void stack_trace_print(void)
>  {
>  	long i;
> @@ -243,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
>  	preempt_disable_notrace();
>  
>  	/* no atomic needed, we only modify this variable by this cpu */
> -	__this_cpu_inc(trace_active);
> -	if (__this_cpu_read(trace_active) != 1)
> +	__this_cpu_inc(disable_stack_tracer);
> +	if (__this_cpu_read(disable_stack_tracer) != 1)
>  		goto out;
>  
>  	ip += MCOUNT_INSN_SIZE;
> @@ -252,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
>  	check_stack(ip, &stack);
>  
>   out:
> -	__this_cpu_dec(trace_active);
> +	__this_cpu_dec(disable_stack_tracer);
>  	/* prevent recursion in schedule */
>  	preempt_enable_notrace();
>  }
> @@ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
>  	/*
>  	 * In case we trace inside arch_spin_lock() or after (NMI),
>  	 * we will cause circular lock, so we also need to increase
> -	 * the percpu trace_active here.
> +	 * the percpu disable_stack_tracer here.
>  	 */
> -	__this_cpu_inc(trace_active);
> +	__this_cpu_inc(disable_stack_tracer);
>  
>  	arch_spin_lock(&stack_trace_max_lock);
>  	*ptr = val;
>  	arch_spin_unlock(&stack_trace_max_lock);
>  
> -	__this_cpu_dec(trace_active);
> +	__this_cpu_dec(disable_stack_tracer);
>  	local_irq_restore(flags);
>  
>  	return count;
> @@ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
>  {
>  	local_irq_disable();
>  
> -	__this_cpu_inc(trace_active);
> +	__this_cpu_inc(disable_stack_tracer);
>  
>  	arch_spin_lock(&stack_trace_max_lock);
>  
> @@ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
>  {
>  	arch_spin_unlock(&stack_trace_max_lock);
>  
> -	__this_cpu_dec(trace_active);
> +	__this_cpu_dec(disable_stack_tracer);
>  
>  	local_irq_enable();
>  }
> -- 
> 2.10.2
> 
> 

Powered by blists - more mailing lists