lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9990f8c6-1d50-8be1-f2e3-eb2ad477527d@linux.alibaba.com>
Date:   Wed, 27 Oct 2021 10:24:52 +0800
From:   王贇 <yun.wang@...ux.alibaba.com>
To:     Guo Ren <guoren@...nel.org>, Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...hat.com>,
        "James E.J. Bottomley" <James.Bottomley@...senPartnership.com>,
        Helge Deller <deller@....de>,
        Michael Ellerman <mpe@...erman.id.au>,
        Benjamin Herrenschmidt <benh@...nel.crashing.org>,
        Paul Mackerras <paulus@...ba.org>,
        Paul Walmsley <paul.walmsley@...ive.com>,
        Palmer Dabbelt <palmer@...belt.com>,
        Albert Ou <aou@...s.berkeley.edu>,
        Thomas Gleixner <tglx@...utronix.de>,
        Borislav Petkov <bp@...en8.de>, x86@...nel.org,
        "H. Peter Anvin" <hpa@...or.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Jiri Kosina <jikos@...nel.org>,
        Miroslav Benes <mbenes@...e.cz>,
        Petr Mladek <pmladek@...e.com>,
        Joe Lawrence <joe.lawrence@...hat.com>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        "Peter Zijlstra (Intel)" <peterz@...radead.org>,
        Nicholas Piggin <npiggin@...il.com>,
        Jisheng Zhang <jszhang@...nel.org>, linux-csky@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-parisc@...r.kernel.org,
        linuxppc-dev@...ts.ozlabs.org, linux-riscv@...ts.infradead.org,
        live-patching@...r.kernel.org
Subject: Re: [PATCH v6] ftrace: disable preemption when recursion locked

Hi, Steven, Miroslav

Should have fixed the comments about bit value, besides, add
a warn in trace_clear_recursion() to make sure the bit < 0
abusing case will get notified.

Please let me know if there are any other issues :-)

Regards,
Michael Wang

On 2021/10/27 上午10:11, 王贇 wrote:
> As the documentation explained, ftrace_test_recursion_trylock()
> and ftrace_test_recursion_unlock() were supposed to disable and
> enable preemption properly, however currently this work is done
> outside of the function, which could be missing by mistake.
> 
> And since the internal using of trace_test_and_set_recursion()
> and trace_clear_recursion() also require preemption disabled, we
> can just merge the logical.
> 
> This patch will make sure the preemption has been disabled when
> trace_test_and_set_recursion() return bit >= 0, and
> trace_clear_recursion() will enable the preemption if previously
> enabled.
> 
> CC: Petr Mladek <pmladek@...e.com>
> CC: Steven Rostedt <rostedt@...dmis.org>
> CC: Miroslav Benes <mbenes@...e.cz>
> Reported-by: Abaci <abaci@...ux.alibaba.com>
> Suggested-by: Peter Zijlstra <peterz@...radead.org>
> Signed-off-by: Michael Wang <yun.wang@...ux.alibaba.com>
> ---
>  arch/csky/kernel/probes/ftrace.c     |  2 --
>  arch/parisc/kernel/ftrace.c          |  2 --
>  arch/powerpc/kernel/kprobes-ftrace.c |  2 --
>  arch/riscv/kernel/probes/ftrace.c    |  2 --
>  arch/x86/kernel/kprobes/ftrace.c     |  2 --
>  include/linux/trace_recursion.h      | 13 ++++++++++++-
>  kernel/livepatch/patch.c             | 13 +++++++------
>  kernel/trace/ftrace.c                | 15 +++++----------
>  kernel/trace/trace_functions.c       |  5 -----
>  9 files changed, 24 insertions(+), 32 deletions(-)
> 
> diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
> index b388228..834cffc 100644
> --- a/arch/csky/kernel/probes/ftrace.c
> +++ b/arch/csky/kernel/probes/ftrace.c
> @@ -17,7 +17,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  		return;
> 
>  	regs = ftrace_get_regs(fregs);
> -	preempt_disable_notrace();
>  	p = get_kprobe((kprobe_opcode_t *)ip);
>  	if (!p) {
>  		p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
> @@ -57,7 +56,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  		__this_cpu_write(current_kprobe, NULL);
>  	}
>  out:
> -	preempt_enable_notrace();
>  	ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
> index 7d14242..90c4345 100644
> --- a/arch/parisc/kernel/ftrace.c
> +++ b/arch/parisc/kernel/ftrace.c
> @@ -210,7 +210,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  		return;
> 
>  	regs = ftrace_get_regs(fregs);
> -	preempt_disable_notrace();
>  	p = get_kprobe((kprobe_opcode_t *)ip);
>  	if (unlikely(!p) || kprobe_disabled(p))
>  		goto out;
> @@ -239,7 +238,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  	}
>  	__this_cpu_write(current_kprobe, NULL);
>  out:
> -	preempt_enable_notrace();
>  	ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
> index 7154d58..072ebe7 100644
> --- a/arch/powerpc/kernel/kprobes-ftrace.c
> +++ b/arch/powerpc/kernel/kprobes-ftrace.c
> @@ -26,7 +26,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
>  		return;
> 
>  	regs = ftrace_get_regs(fregs);
> -	preempt_disable_notrace();
>  	p = get_kprobe((kprobe_opcode_t *)nip);
>  	if (unlikely(!p) || kprobe_disabled(p))
>  		goto out;
> @@ -61,7 +60,6 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
>  		__this_cpu_write(current_kprobe, NULL);
>  	}
>  out:
> -	preempt_enable_notrace();
>  	ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
> index aab85a8..7142ec4 100644
> --- a/arch/riscv/kernel/probes/ftrace.c
> +++ b/arch/riscv/kernel/probes/ftrace.c
> @@ -15,7 +15,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  	if (bit < 0)
>  		return;
> 
> -	preempt_disable_notrace();
>  	p = get_kprobe((kprobe_opcode_t *)ip);
>  	if (unlikely(!p) || kprobe_disabled(p))
>  		goto out;
> @@ -52,7 +51,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  		__this_cpu_write(current_kprobe, NULL);
>  	}
>  out:
> -	preempt_enable_notrace();
>  	ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
> index 596de2f..dd2ec14 100644
> --- a/arch/x86/kernel/kprobes/ftrace.c
> +++ b/arch/x86/kernel/kprobes/ftrace.c
> @@ -25,7 +25,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  	if (bit < 0)
>  		return;
> 
> -	preempt_disable_notrace();
>  	p = get_kprobe((kprobe_opcode_t *)ip);
>  	if (unlikely(!p) || kprobe_disabled(p))
>  		goto out;
> @@ -59,7 +58,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>  		__this_cpu_write(current_kprobe, NULL);
>  	}
>  out:
> -	preempt_enable_notrace();
>  	ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
> index abe1a50..64c03ee 100644
> --- a/include/linux/trace_recursion.h
> +++ b/include/linux/trace_recursion.h
> @@ -135,6 +135,9 @@ static __always_inline int trace_get_context_bit(void)
>  # define do_ftrace_record_recursion(ip, pip)	do { } while (0)
>  #endif
> 
> +/*
> + * Preemption is promised to be disabled when return bit >= 0.
> + */
>  static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
>  							int start)
>  {
> @@ -162,11 +165,19 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign
>  	current->trace_recursion = val;
>  	barrier();
> 
> +	preempt_disable_notrace();
> +
>  	return bit;
>  }
> 
> +/*
> + * Preemption will be enabled (if it was previously enabled).
> + */
>  static __always_inline void trace_clear_recursion(int bit)
>  {
> +	WARN_ON_ONCE(bit < 0);
> +
> +	preempt_enable_notrace();
>  	barrier();
>  	trace_recursion_clear(bit);
>  }
> @@ -178,7 +189,7 @@ static __always_inline void trace_clear_recursion(int bit)
>   * tracing recursed in the same context (normal vs interrupt),
>   *
>   * Returns: -1 if a recursion happened.
> - *           >= 0 if no recursion
> + *           >= 0 if no recursion.
>   */
>  static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
>  							 unsigned long parent_ip)
> diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
> index e8029ae..b8d75fb 100644
> --- a/kernel/livepatch/patch.c
> +++ b/kernel/livepatch/patch.c
> @@ -49,14 +49,16 @@ static void notrace klp_ftrace_handler(unsigned long ip,
> 
>  	ops = container_of(fops, struct klp_ops, fops);
> 
> +	/*
> +	 *
> +	 * The ftrace_test_recursion_trylock() will disable preemption,
> +	 * which is required for the variant of synchronize_rcu() that is
> +	 * used to allow patching functions where RCU is not watching.
> +	 * See klp_synchronize_transition() for more details.
> +	 */
>  	bit = ftrace_test_recursion_trylock(ip, parent_ip);
>  	if (WARN_ON_ONCE(bit < 0))
>  		return;
> -	/*
> -	 * A variant of synchronize_rcu() is used to allow patching functions
> -	 * where RCU is not watching, see klp_synchronize_transition().
> -	 */
> -	preempt_disable_notrace();
> 
>  	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
>  				      stack_node);
> @@ -120,7 +122,6 @@ static void notrace klp_ftrace_handler(unsigned long ip,
>  	klp_arch_set_pc(fregs, (unsigned long)func->new_func);
> 
>  unlock:
> -	preempt_enable_notrace();
>  	ftrace_test_recursion_unlock(bit);
>  }
> 
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index b7be1df..7392bc7 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -7198,16 +7198,15 @@ void ftrace_reset_array_ops(struct trace_array *tr)
>  	struct ftrace_ops *op;
>  	int bit;
> 
> +	/*
> +	 * The ftrace_test_and_set_recursion() will disable preemption,
> +	 * which is required since some of the ops may be dynamically
> +	 * allocated, they must be freed after a synchronize_rcu().
> +	 */
>  	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
>  	if (bit < 0)
>  		return;
> 
> -	/*
> -	 * Some of the ops may be dynamically allocated,
> -	 * they must be freed after a synchronize_rcu().
> -	 */
> -	preempt_disable_notrace();
> -
>  	do_for_each_ftrace_op(op, ftrace_ops_list) {
>  		/* Stub functions don't need to be called nor tested */
>  		if (op->flags & FTRACE_OPS_FL_STUB)
> @@ -7231,7 +7230,6 @@ void ftrace_reset_array_ops(struct trace_array *tr)
>  		}
>  	} while_for_each_ftrace_op(op);
>  out:
> -	preempt_enable_notrace();
>  	trace_clear_recursion(bit);
>  }
> 
> @@ -7279,12 +7277,9 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
>  	if (bit < 0)
>  		return;
> 
> -	preempt_disable_notrace();
> -
>  	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
>  		op->func(ip, parent_ip, op, fregs);
> 
> -	preempt_enable_notrace();
>  	trace_clear_recursion(bit);
>  }
>  NOKPROBE_SYMBOL(ftrace_ops_assist_func);
> diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
> index 1f0e63f..9f1bfbe 100644
> --- a/kernel/trace/trace_functions.c
> +++ b/kernel/trace/trace_functions.c
> @@ -186,7 +186,6 @@ static void function_trace_start(struct trace_array *tr)
>  		return;
> 
>  	trace_ctx = tracing_gen_ctx();
> -	preempt_disable_notrace();
> 
>  	cpu = smp_processor_id();
>  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
> @@ -194,7 +193,6 @@ static void function_trace_start(struct trace_array *tr)
>  		trace_function(tr, ip, parent_ip, trace_ctx);
> 
>  	ftrace_test_recursion_unlock(bit);
> -	preempt_enable_notrace();
>  }
> 
>  #ifdef CONFIG_UNWINDER_ORC
> @@ -298,8 +296,6 @@ static inline void process_repeats(struct trace_array *tr,
>  	if (bit < 0)
>  		return;
> 
> -	preempt_disable_notrace();
> -
>  	cpu = smp_processor_id();
>  	data = per_cpu_ptr(tr->array_buffer.data, cpu);
>  	if (atomic_read(&data->disabled))
> @@ -324,7 +320,6 @@ static inline void process_repeats(struct trace_array *tr,
> 
>  out:
>  	ftrace_test_recursion_unlock(bit);
> -	preempt_enable_notrace();
>  }
> 
>  static void
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ