[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <6d4ec74fae6d4f65ab44c701495d48dc60548f7e.1469136008.git.jpoimboe@redhat.com>
Date: Thu, 21 Jul 2016 16:21:45 -0500
From: Josh Poimboeuf <jpoimboe@...hat.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...nel.org>,
"H . Peter Anvin" <hpa@...or.com>
Cc: x86@...nel.org, linux-kernel@...r.kernel.org,
Andy Lutomirski <luto@...capital.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Steven Rostedt <rostedt@...dmis.org>,
Brian Gerst <brgerst@...il.com>,
Kees Cook <keescook@...omium.org>,
Peter Zijlstra <peterz@...radead.org>,
Frederic Weisbecker <fweisbec@...il.com>,
Byungchul Park <byungchul.park@....com>
Subject: [PATCH 08/19] x86/dumpstack: don't disable preemption in show_stack_log_lvl() and dump_trace()
show_stack_log_lvl() and dump_trace() are already preemption safe:
- If they're running in irq or exception context, preemption is already
disabled, and the percpu irq stack pointers can be trusted.
- If they're running with preemption enabled, they must be running on
the task stack anyway, so it doesn't matter if they're comparing the
stack pointer against the percpu irq stack pointer from this CPU or
another one: either way it won't match.
Signed-off-by: Josh Poimboeuf <jpoimboe@...hat.com>
---
arch/x86/kernel/dumpstack_32.c | 14 ++++++--------
arch/x86/kernel/dumpstack_64.c | 29 ++++++++++-------------------
2 files changed, 16 insertions(+), 27 deletions(-)
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index c533b8b..b07d5c9 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -24,16 +24,16 @@ static void *is_irq_stack(void *p, void *irq)
}
-static void *is_hardirq_stack(unsigned long *stack, int cpu)
+static void *is_hardirq_stack(unsigned long *stack)
{
- void *irq = per_cpu(hardirq_stack, cpu);
+ void *irq = this_cpu_read(hardirq_stack);
return is_irq_stack(stack, irq);
}
-static void *is_softirq_stack(unsigned long *stack, int cpu)
+static void *is_softirq_stack(unsigned long *stack);
{
- void *irq = per_cpu(softirq_stack, cpu);
+ void *irq = this_cpu_read(softirq_stack);
return is_irq_stack(stack, irq);
}
@@ -42,7 +42,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
- const unsigned cpu = get_cpu();
int graph = 0;
u32 *prev_esp;
@@ -53,9 +52,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
for (;;) {
void *end_stack;
- end_stack = is_hardirq_stack(stack, cpu);
+ end_stack = is_hardirq_stack(stack);
if (!end_stack)
- end_stack = is_softirq_stack(stack, cpu);
+ end_stack = is_softirq_stack(stack);
bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
@@ -74,7 +73,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
break;
touch_nmi_watchdog();
}
- put_cpu();
}
EXPORT_SYMBOL(dump_trace);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6a2d14e..634ed22 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -31,8 +31,8 @@ static char x86_stack_ids[][8] = {
#endif
};
-static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
- unsigned *usedp, char **idp)
+static unsigned long *in_exception_stack(unsigned long stack, unsigned *usedp,
+ char **idp)
{
unsigned k;
@@ -41,7 +41,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
* 'stack' is in one of them:
*/
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
- unsigned long end = per_cpu(orig_ist, cpu).ist[k];
+ unsigned long end = this_cpu_ptr(&orig_ist)->ist[k];
/*
* Is 'stack' above this exception frame's end?
* If yes then skip to the next frame.
@@ -111,7 +111,7 @@ enum stack_type {
};
static enum stack_type
-analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
+analyze_stack(struct task_struct *task, unsigned long *stack,
unsigned long **stack_end, unsigned long *irq_stack,
unsigned *used, char **id)
{
@@ -121,8 +121,7 @@ analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
if ((unsigned long)task_stack_page(task) == addr)
return STACK_IS_NORMAL;
- *stack_end = in_exception_stack(cpu, (unsigned long)stack,
- used, id);
+ *stack_end = in_exception_stack((unsigned long)stack, used, id);
if (*stack_end)
return STACK_IS_EXCEPTION;
@@ -149,8 +148,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
- const unsigned cpu = get_cpu();
- unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned long *irq_stack = (unsigned long *)this_cpu_read(irq_stack_ptr);
unsigned used = 0;
int graph = 0;
int done = 0;
@@ -169,8 +167,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
enum stack_type stype;
char *id;
- stype = analyze_stack(cpu, task, stack, &stack_end,
- irq_stack, &used, &id);
+ stype = analyze_stack(task, stack, &stack_end, irq_stack, &used,
+ &id);
/* Default finish unless specified to continue */
done = 1;
@@ -225,7 +223,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* This handles the process stack:
*/
bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
- put_cpu();
}
EXPORT_SYMBOL(dump_trace);
@@ -236,15 +233,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *irq_stack_end;
unsigned long *irq_stack;
unsigned long *stack;
- int cpu;
int i;
- preempt_disable();
- cpu = smp_processor_id();
-
- irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
- irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) -
- IRQ_USABLE_STACK_SIZE);
+ irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr);
+ irq_stack = irq_stack_end - IRQ_USABLE_STACK_SIZE;
sp = sp ? : get_stack_pointer(task, regs);
@@ -275,7 +267,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
stack++;
touch_nmi_watchdog();
}
- preempt_enable();
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
--
2.7.4
Powered by blists - more mailing lists