lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Fri, 24 May 2019 23:16:36 -0400 From: Steven Rostedt <rostedt@...dmis.org> To: linux-kernel@...r.kernel.org Cc: Ingo Molnar <mingo@...nel.org>, Andrew Morton <akpm@...ux-foundation.org>, Thomas Gleixner <tglx@...utronix.de>, Peter Zijlstra <peterz@...radead.org>, Masami Hiramatsu <mhiramat@...nel.org>, Josh Poimboeuf <jpoimboe@...hat.com>, Frederic Weisbecker <frederic@...nel.org>, Joel Fernandes <joel@...lfernandes.org>, Andy Lutomirski <luto@...nel.org>, Mark Rutland <mark.rutland@....com>, Namhyung Kim <namhyung@...nel.org>, "Frank Ch. Eigler" <fche@...hat.com> Subject: [PATCH 03/16 v3] fgraph: Have the current->ret_stack go down not up From: "Steven Rostedt (VMware)" <rostedt@...dmis.org> Change the direction of the current->ret_stack shadown stack to move the same as most normal arch stacks do. Suggested-by: Peter Zijlstra <peterz@...radead.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@...dmis.org> --- kernel/trace/fgraph.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 63e701771c20..b0f8ae269351 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -27,8 +27,9 @@ #define FGRAPH_RET_INDEX (FGRAPH_RET_SIZE / sizeof(long)) #define SHADOW_STACK_SIZE (PAGE_SIZE) #define SHADOW_STACK_INDEX (SHADOW_STACK_SIZE / sizeof(long)) -/* Leave on a buffer at the end */ -#define SHADOW_STACK_MAX_INDEX (SHADOW_STACK_INDEX - FGRAPH_RET_INDEX) +#define SHADOW_STACK_MAX_INDEX SHADOW_STACK_INDEX +/* Leave on a little buffer at the bottom */ +#define SHADOW_STACK_MIN_INDEX FGRAPH_RET_INDEX #define RET_STACK(t, index) ((struct ftrace_ret_stack *)(&(t)->ret_stack[index])) #define RET_STACK_INC(c) ({ c += FGRAPH_RET_INDEX; }) @@ -89,16 +90,16 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, smp_rmb(); /* The return trace stack is full */ - if (current->curr_ret_stack >= SHADOW_STACK_MAX_INDEX) { + if (current->curr_ret_stack <= SHADOW_STACK_MIN_INDEX) { atomic_inc(¤t->trace_overrun); return -EBUSY; } calltime = trace_clock_local(); - index = current->curr_ret_stack; - RET_STACK_INC(current->curr_ret_stack); - ret_stack = RET_STACK(current, index); + RET_STACK_DEC(current->curr_ret_stack); + ret_stack = RET_STACK(current, current->curr_ret_stack); + /* Make sure interrupts see the current value of curr_ret_stack */ barrier(); ret_stack->ret = ret; ret_stack->func = func; @@ -129,7 +130,7 @@ int function_graph_enter(unsigned long ret, unsigned long func, return 0; out_ret: - RET_STACK_DEC(current->curr_ret_stack); + RET_STACK_INC(current->curr_ret_stack); out: current->curr_ret_depth--; return -EBUSY; @@ -144,9 +145,8 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, int index; index = current->curr_ret_stack; - RET_STACK_DEC(index); - if (unlikely(index < 0 || index > SHADOW_STACK_MAX_INDEX)) { + if (unlikely(index < 0 || index >= SHADOW_STACK_MAX_INDEX)) { ftrace_graph_stop(); WARN_ON(1); /* Might as well panic, otherwise we have no where to go */ @@ -239,7 +239,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) * curr_ret_stack is after that. */ barrier(); - RET_STACK_DEC(current->curr_ret_stack); + RET_STACK_INC(current->curr_ret_stack); if (unlikely(!ret)) { ftrace_graph_stop(); @@ -302,9 +302,9 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, if (ret != (unsigned long)return_to_handler) return ret; - RET_STACK_DEC(index); + RET_STACK_INC(index); - for (i = index; i >= 0; RET_STACK_DEC(i)) { + for (i = index; i < SHADOW_STACK_MAX_INDEX; RET_STACK_INC(i)) { ret_stack = RET_STACK(task, i); if (ret_stack->retp == retp) return ret_stack->ret; @@ -322,13 +322,13 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, return ret; task_idx = task->curr_ret_stack; - RET_STACK_DEC(task_idx); + RET_STACK_INC(task_idx); - if (!task->ret_stack || task_idx < *idx) + if (!task->ret_stack || task_idx > *idx) return ret; task_idx -= *idx; - RET_STACK_INC(*idx); + RET_STACK_DEC(*idx); return RET_STACK(task, task_idx); } @@ -391,7 +391,7 @@ static int alloc_retstack_tasklist(unsigned long **ret_stack_list) if (t->ret_stack == NULL) { atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); - t->curr_ret_stack = 0; + t->curr_ret_stack = SHADOW_STACK_MAX_INDEX; t->curr_ret_depth = -1; /* Make sure the tasks see the 0 first: */ smp_wmb(); @@ -436,10 +436,11 @@ ftrace_graph_probe_sched_switch(void *ignore, bool preempt, */ timestamp -= next->ftrace_timestamp; - for (index = next->curr_ret_stack - FGRAPH_RET_INDEX; index >= 0; ) { + for (index = next->curr_ret_stack + FGRAPH_RET_INDEX; + index < SHADOW_STACK_MAX_INDEX; ) { ret_stack = RET_STACK(next, index); ret_stack->calltime += timestamp; - index -= FGRAPH_RET_INDEX; + index += FGRAPH_RET_INDEX; } } @@ -530,7 +531,7 @@ void ftrace_graph_init_task(struct task_struct *t) { /* Make sure we do not use the parent ret_stack */ t->ret_stack = NULL; - t->curr_ret_stack = 0; + t->curr_ret_stack = SHADOW_STACK_MAX_INDEX; t->curr_ret_depth = -1; if (ftrace_graph_active) { -- 2.20.1
Powered by blists - more mailing lists