[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YUB5VchM3a/MiZpX@hirez.programming.kicks-ass.net>
Date: Tue, 14 Sep 2021 12:28:37 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: 王贇 <yun.wang@...ux.alibaba.com>
Cc: Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>,
"open list:PERFORMANCE EVENTS SUBSYSTEM"
<linux-perf-users@...r.kernel.org>,
"open list:PERFORMANCE EVENTS SUBSYSTEM"
<linux-kernel@...r.kernel.org>,
"open list:BPF (Safe dynamic programs and tools)"
<netdev@...r.kernel.org>,
"open list:BPF (Safe dynamic programs and tools)"
<bpf@...r.kernel.org>
Subject: Re: [RFC PATCH] perf: fix panic by mark recursion inside
perf_log_throttle
On Tue, Sep 14, 2021 at 09:58:44AM +0800, 王贇 wrote:
> On 2021/9/13 下午6:24, Peter Zijlstra wrote:
> > I'm confused tho; where does the #DF come from? Because taking a #PF
> > from NMI should be perfectly fine.
> >
> > AFAICT that callchain is something like:
> >
> > NMI
> > perf_event_nmi_handler()
> > (part of the chain is missing here)
> > perf_log_throttle()
> > perf_output_begin() /* events/ring_buffer.c */
> > rcu_read_lock()
> > rcu_lock_acquire()
> > lock_acquire()
> > trace_lock_acquire() --> perf_trace_foo
> >
> > ...
> > perf_callchain()
> > perf_callchain_user()
> > #PF (fully expected during a userspace callchain)
> > (some stuff, until the first __fentry)
> > perf_trace_function_call
> > perf_trace_buf_alloc()
> > perf_swevent_get_recursion_context()
> > *BOOM*
> >
> > Now, supposedly we then take another #PF from get_recursion_context() or
> > something, but that doesn't make sense. That should just work...
> >
> > Can you figure out what's going wrong there? going with the RIP, this
> > almost looks like 'swhash->recursion' goes splat, but again that makes
> > no sense, that's a per-cpu variable.
>
> That's true, I actually have tried several approach to avoid the issue, but
> it trigger panic as long as we access 'swhash->recursion', the array should
> be accessible but somehow broken, that's why I consider this a suspected
> stack overflow, since nmi repeated and trace seems very long, but just a
> suspect...
You can simply increase the exception stack size to test this:
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index a8d4ad856568..e9e2c3ba5923 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -15,7 +15,7 @@
#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
+#define EXCEPTION_STACK_ORDER (1 + KASAN_STACK_ORDER)
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
Also, something like this might be useful:
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index f248eb2ac2d4..4dfdbb9395eb 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -33,6 +33,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
bool in_entry_stack(unsigned long *stack, struct stack_info *info);
+bool in_exception_stack_guard(unsigned long *stack);
+
int get_stack_info(unsigned long *stack, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask);
bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5601b95944fa..056cf4f31599 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -126,6 +126,39 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
return true;
}
+noinstr bool in_exception_stack_guard(unsigned long *stack)
+{
+ unsigned long begin, end, stk = (unsigned long)stack;
+ const struct estack_pages *ep;
+ unsigned int k;
+
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+
+ begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
+ /*
+ * Handle the case where stack trace is collected _before_
+ * cea_exception_stacks had been initialized.
+ */
+ if (!begin)
+ return false;
+
+ end = begin + sizeof(struct cea_exception_stacks);
+ /* Bail if @stack is outside the exception stack area. */
+ if (stk < begin || stk >= end)
+ return false;
+
+ /* Calc page offset from start of exception stacks */
+ k = (stk - begin) >> PAGE_SHIFT;
+ /* Lookup the page descriptor */
+ ep = &estack_pages[k];
+ /* Guard page? */
+ if (!ep->size)
+ return true;
+
+ return false;
+}
+
+
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a58800973aed..8b043ed02c0d 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -459,6 +459,9 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
handle_stack_overflow("kernel stack overflow (double-fault)",
regs, address);
}
+
+ if (in_exception_stack_guard((void *)address))
+ pr_emerg("PANIC: exception stack guard: 0x%lx\n", address);
#endif
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
Powered by blists - more mailing lists