[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200529213321.303027161@infradead.org>
Date: Fri, 29 May 2020 23:27:38 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: tglx@...utronix.de, luto@...capital.net, peterz@...radead.org
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
Lai Jiangshan <laijs@...ux.alibaba.com>,
sean.j.christopherson@...el.com, andrew.cooper3@...rix.com,
daniel.thompson@...aro.org, a.darwish@...utronix.de,
rostedt@...dmis.org, bigeasy@...utronix.de
Subject: [PATCH 10/14] x86/entry: Remove DBn stacks
Both #DB itself, as all other IST users (NMI, #MC) now clear DR7 on
entry. Combined with not allowing breakpoints on entry/noinstr/NOKPROBE
text and no single step (EFLAGS.TF) inside the #DB handler should
guarantee us no nested #DB.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/entry/entry_64.S | 17 -----------------
arch/x86/include/asm/cpu_entry_area.h | 12 +++---------
arch/x86/kernel/asm-offsets_64.c | 3 ---
arch/x86/kernel/dumpstack_64.c | 7 ++-----
arch/x86/mm/cpu_entry_area.c | 1 -
5 files changed, 5 insertions(+), 35 deletions(-)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -396,11 +396,6 @@ SYM_CODE_END(\asmsym)
idtentry \vector asm_\cfunc \cfunc has_error_code=0
.endm
-/*
- * MCE and DB exceptions
- */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
-
/**
* idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
* @vector: Vector number
@@ -416,10 +411,6 @@ SYM_CODE_END(\asmsym)
* If hits in kernel mode then it needs to go through the paranoid
* entry as the exception can hit any random state. No preemption
* check on exit to keep the paranoid path simple.
- *
- * If the trap is #DB then the interrupt stack entry in the IST is
- * moved to the second stack, so a potential recursion will have a
- * fresh IST.
*/
.macro idtentry_mce_db vector asmsym cfunc
SYM_CODE_START(\asmsym)
@@ -445,16 +436,8 @@ SYM_CODE_START(\asmsym)
movq %rsp, %rdi /* pt_regs pointer */
- .if \vector == X86_TRAP_DB
- subq $DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB)
- .endif
-
call \cfunc
- .if \vector == X86_TRAP_DB
- addq $DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB)
- .endif
-
jmp paranoid_exit
/* Switch to the regular task stack and use the noist entry point */
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -11,15 +11,11 @@
#ifdef CONFIG_X86_64
/* Macro to enforce the same ordering and stack sizes */
-#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
+#define ESTACKS_MEMBERS(guardsize) \
char DF_stack_guard[guardsize]; \
char DF_stack[EXCEPTION_STKSZ]; \
char NMI_stack_guard[guardsize]; \
char NMI_stack[EXCEPTION_STKSZ]; \
- char DB2_stack_guard[guardsize]; \
- char DB2_stack[db2_holesize]; \
- char DB1_stack_guard[guardsize]; \
- char DB1_stack[EXCEPTION_STKSZ]; \
char DB_stack_guard[guardsize]; \
char DB_stack[EXCEPTION_STKSZ]; \
char MCE_stack_guard[guardsize]; \
@@ -28,12 +24,12 @@
/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {
- ESTACKS_MEMBERS(0, 0)
+ ESTACKS_MEMBERS(0)
};
/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {
- ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
+ ESTACKS_MEMBERS(PAGE_SIZE)
};
/*
@@ -42,8 +38,6 @@ struct cea_exception_stacks {
enum exception_stack_ordering {
ESTACK_DF,
ESTACK_NMI,
- ESTACK_DB2,
- ESTACK_DB1,
ESTACK_DB,
ESTACK_MCE,
N_EXCEPTION_STACKS
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -57,9 +57,6 @@ int main(void)
BLANK();
#undef ENTRY
- OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- DEFINE(DB_STACK_OFFSET, offsetof(struct cea_exception_stacks, DB_stack) -
- offsetof(struct cea_exception_stacks, DB1_stack));
BLANK();
#ifdef CONFIG_STACKPROTECTOR
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -22,15 +22,13 @@
static const char * const exception_stack_names[] = {
[ ESTACK_DF ] = "#DF",
[ ESTACK_NMI ] = "NMI",
- [ ESTACK_DB2 ] = "#DB2",
- [ ESTACK_DB1 ] = "#DB1",
[ ESTACK_DB ] = "#DB",
[ ESTACK_MCE ] = "#MC",
};
const char *stack_type_name(enum stack_type type)
{
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
if (type == STACK_TYPE_IRQ)
return "IRQ";
@@ -79,7 +77,6 @@ static const
struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
EPAGERANGE(DF),
EPAGERANGE(NMI),
- EPAGERANGE(DB1),
EPAGERANGE(DB),
EPAGERANGE(MCE),
};
@@ -91,7 +88,7 @@ static bool in_exception_stack(unsigned
struct pt_regs *regs;
unsigned int k;
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
/*
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -107,7 +107,6 @@ static void __init percpu_setup_exceptio
*/
cea_map_stack(DF);
cea_map_stack(NMI);
- cea_map_stack(DB1);
cea_map_stack(DB);
cea_map_stack(MCE);
}
Powered by blists - more mailing lists