lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 19 Jun 2009 06:11:59 +0200
From:	Frederic Weisbecker <fweisbec@...il.com>
To:	Steven Rostedt <rostedt@...dmis.org>
Cc:	linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...e.hu>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Benjamin Herrenschmidt <benh@...nel.crashing.org>,
	Paul Mackerras <paulus@...ba.org>,
	Heiko Carstens <heiko.carstens@...ibm.com>,
	Martin Schwidefsky <schwidefsky@...ibm.com>,
	Helge Deller <deller@....de>, Kyle McMartin <kyle@...artin.ca>
Subject: Re: [PATCH 2/2] function-graph: add stack frame test

On Thu, Jun 18, 2009 at 06:44:11PM -0400, Steven Rostedt wrote:
> From: Steven Rostedt <srostedt@...hat.com>
> 
> In case gcc does something funny with the stack frames, or the return
> from function code, we would like to detect that.
> 
> An arch may implement passing of a variable that is unique to the
> function and can be saved on entering a function and can be tested
> when exiting the function. Usually the frame pointer can be used for
> this purpose.
> 
> This patch also implements this for x86. Where it passes in the stack
> frame of the parent function, and will test that frame on exit.
> 
> There was a case in x86_32 with optimize for size (-Os) where, for a
> few functions, gcc would align the stack frame and place a copy of the
> return address into it. The function graph tracer modified the copy and
> not the actual return address. On return from the funtion, it did not go
> to the tracer hook, but returned to the parent. This broke the function
> graph tracer, because the return of the parent (where gcc did not do
> this funky manipulation) returned to the location that the child function
> was suppose to. This caused strange kernel crashes.
> 
> This test detected the problem and pointed out where the issue was.
> 
> This modifies the parameters of one of the functions that the arch
> specific code calls, so it includes changes to arch code to accommodate
> the new prototype.
> 
> Note, I notice that the parsic arch implements its own push_return_trace.
> This is now a generic function and the ftrace_push_return_trace should be
> used instead. This patch does not touch that code.
> 
> Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
> Cc: Paul Mackerras <paulus@...ba.org>
> Cc: Heiko Carstens <heiko.carstens@...ibm.com>
> Cc: Martin Schwidefsky <schwidefsky@...ibm.com>
> Cc: Frederic Weisbecker <fweisbec@...il.com>
> Cc: Helge Deller <deller@....de>
> Cc: Kyle McMartin <kyle@...artin.ca>
> Signed-off-by: Steven Rostedt <rostedt@...dmis.org>
> ---
>  arch/powerpc/kernel/ftrace.c         |    2 +-
>  arch/s390/kernel/ftrace.c            |    2 +-
>  arch/x86/Kconfig                     |    1 +
>  arch/x86/kernel/entry_32.S           |    2 +
>  arch/x86/kernel/entry_64.S           |    2 +
>  arch/x86/kernel/ftrace.c             |    6 +++-
>  include/linux/ftrace.h               |    4 ++-
>  kernel/trace/Kconfig                 |    7 ++++++
>  kernel/trace/trace_functions_graph.c |   36 ++++++++++++++++++++++++++++++---
>  9 files changed, 53 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
> index 2d182f1..58d6a61 100644
> --- a/arch/powerpc/kernel/ftrace.c
> +++ b/arch/powerpc/kernel/ftrace.c
> @@ -605,7 +605,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
>  		return;
>  	}
>  
> -	if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
> +	if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
>  		*parent = old;
>  		return;
>  	}
> diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
> index 82ddfd3..3e298e6 100644
> --- a/arch/s390/kernel/ftrace.c
> +++ b/arch/s390/kernel/ftrace.c
> @@ -190,7 +190,7 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
>  		goto out;
>  	if (unlikely(atomic_read(&current->tracing_graph_pause)))
>  		goto out;
> -	if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
> +	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
>  		goto out;
>  	trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
>  	/* Only trace if the calling function expects to. */
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 356d2ec..fcf12af 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -33,6 +33,7 @@ config X86
>  	select HAVE_DYNAMIC_FTRACE
>  	select HAVE_FUNCTION_TRACER
>  	select HAVE_FUNCTION_GRAPH_TRACER
> +	select HAVE_FUNCTION_GRAPH_FP_TEST
>  	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
>  	select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
>  	select HAVE_FTRACE_SYSCALLS
> diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
> index c929add..0d4b285 100644
> --- a/arch/x86/kernel/entry_32.S
> +++ b/arch/x86/kernel/entry_32.S
> @@ -1154,6 +1154,7 @@ ENTRY(ftrace_graph_caller)
>  	pushl %edx
>  	movl 0xc(%esp), %edx
>  	lea 0x4(%ebp), %eax
> +	movl (%ebp), %ecx
>  	subl $MCOUNT_INSN_SIZE, %edx
>  	call prepare_ftrace_return
>  	popl %edx
> @@ -1168,6 +1169,7 @@ return_to_handler:
>  	pushl %eax
>  	pushl %ecx
>  	pushl %edx
> +	movl %ebp, %eax


Ah, I see...



>  	call ftrace_return_to_handler
>  	movl %eax, 0xc(%esp)
>  	popl %edx
> diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
> index de74f0a..c251be7 100644
> --- a/arch/x86/kernel/entry_64.S
> +++ b/arch/x86/kernel/entry_64.S
> @@ -135,6 +135,7 @@ ENTRY(ftrace_graph_caller)
>  
>  	leaq 8(%rbp), %rdi
>  	movq 0x38(%rsp), %rsi
> +	movq (%rbp), %rdx
>  	subq $MCOUNT_INSN_SIZE, %rsi
>  
>  	call	prepare_ftrace_return
> @@ -150,6 +151,7 @@ GLOBAL(return_to_handler)
>  	/* Save the return values */
>  	movq %rax, (%rsp)
>  	movq %rdx, 8(%rsp)
> +	movq %rbp, %rdi
>  
>  	call ftrace_return_to_handler
>  
> diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
> index b79c553..d94e1ea 100644
> --- a/arch/x86/kernel/ftrace.c
> +++ b/arch/x86/kernel/ftrace.c
> @@ -408,7 +408,8 @@ int ftrace_disable_ftrace_graph_caller(void)
>   * Hook the return address and push it in the stack of return addrs
>   * in current thread info.
>   */
> -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
> +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
> +			   unsigned long frame_pointer)
>  {
>  	unsigned long old;
>  	int faulted;
> @@ -453,7 +454,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
>  		return;
>  	}
>  
> -	if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
> +	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
> +		    frame_pointer) == -EBUSY) {
>  		*parent = old;
>  		return;
>  	}
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 39b95c5..dc3b132 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -362,6 +362,7 @@ struct ftrace_ret_stack {
>  	unsigned long func;
>  	unsigned long long calltime;
>  	unsigned long long subtime;
> +	unsigned long fp;
>  };
>  
>  /*
> @@ -372,7 +373,8 @@ struct ftrace_ret_stack {
>  extern void return_to_handler(void);
>  
>  extern int
> -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
> +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
> +			 unsigned long frame_pointer);
>  
>  /*
>   * Sometimes we don't want to trace a function with the function
> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index 1eac852..b17ed87 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -18,6 +18,13 @@ config HAVE_FUNCTION_TRACER
>  config HAVE_FUNCTION_GRAPH_TRACER
>  	bool
>  
> +config HAVE_FUNCTION_GRAPH_FP_TEST
> +	bool
> +	help
> +	 An arch may pass in a unique value (frame pointer) to both the
> +	 entering and exiting of a function. On exit, the value is compared
> +	 and if it does not match, then it will panic the kernel.
> +
>  config HAVE_FUNCTION_TRACE_MCOUNT_TEST
>  	bool
>  	help
> diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
> index 8b59241..d2249ab 100644
> --- a/kernel/trace/trace_functions_graph.c
> +++ b/kernel/trace/trace_functions_graph.c
> @@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = {
>  
>  /* Add a function return address to the trace stack on thread info.*/
>  int
> -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
> +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
> +			 unsigned long frame_pointer)
>  {
>  	unsigned long long calltime;
>  	int index;
> @@ -85,6 +86,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
>  	current->ret_stack[index].func = func;
>  	current->ret_stack[index].calltime = calltime;
>  	current->ret_stack[index].subtime = 0;
> +	current->ret_stack[index].fp = frame_pointer;
>  	*depth = index;
>  
>  	return 0;
> @@ -92,7 +94,8 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
>  
>  /* Retrieve a function return address to the trace stack on thread info.*/
>  static void
> -ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
> +ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
> +			unsigned long frame_pointer)
>  {
>  	int index;
>  
> @@ -106,6 +109,31 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
>  		return;
>  	}
>  
> +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
> +	/*
> +	 * The arch may choose to record the frame pointer used
> +	 * and check it here to make sure that it is what we expect it
> +	 * to be. If gcc does not set the place holder of the return
> +	 * address in the frame pointer, and does a copy instead, then
> +	 * the function graph trace will fail. This test detects this
> +	 * case.
> +	 *
> +	 * Currently, x86_32 with optimize for size (-Os) makes the latest
> +	 * gcc do the above.
> +	 */
> +	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
> +		ftrace_graph_stop();
> +		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
> +		     "  from func %pF return to %lx\n",
> +		     current->ret_stack[index].fp,
> +		     frame_pointer,
> +		     (void *)current->ret_stack[index].func,
> +		     current->ret_stack[index].ret);
> +		*ret = (unsigned long)panic;
> +		return;
> +	}
> +#endif


Nice thing! I don't see another solution to detect this problem for
now.

BTW, does this weird copy of return address in the stack only occur
in few functions or most of them?

Acked-by: Frederic Weisbecker <fweisbec@...il.com>



> +
>  	*ret = current->ret_stack[index].ret;
>  	trace->func = current->ret_stack[index].func;
>  	trace->calltime = current->ret_stack[index].calltime;
> @@ -117,12 +145,12 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
>   * Send the trace to the ring-buffer.
>   * @return the original return address.
>   */
> -unsigned long ftrace_return_to_handler(void)
> +unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
>  {
>  	struct ftrace_graph_ret trace;
>  	unsigned long ret;
>  
> -	ftrace_pop_return_trace(&trace, &ret);
> +	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
>  	trace.rettime = trace_clock_local();
>  	ftrace_graph_return(&trace);
>  	barrier();
> -- 
> 1.6.3.1
> 
> -- 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ