[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251026201911.505204-6-xin@zytor.com>
Date: Sun, 26 Oct 2025 13:18:53 -0700
From: "Xin Li (Intel)" <xin@...or.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-doc@...r.kernel.org
Cc: pbonzini@...hat.com, seanjc@...gle.com, corbet@....net, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com,
x86@...nel.org, hpa@...or.com, xin@...or.com, luto@...nel.org,
peterz@...radead.org, andrew.cooper3@...rix.com, chao.gao@...el.com,
hch@...radead.org, sohil.mehta@...el.com
Subject: [PATCH v9 05/22] x86/cea: Use array indexing to simplify exception stack access
Refactor struct cea_exception_stacks to leverage array indexing for
exception stack access, improving code clarity and eliminating the
need for the ESTACKS_MEMBERS() macro.
Convert __this_cpu_ist_{bottom,top}_va() from macros to functions,
allowing removal of the now-obsolete CEA_ESTACK_BOT and CEA_ESTACK_TOP
macros.
Also drop CEA_ESTACK_SIZE, which just duplicated EXCEPTION_STKSZ.
Signed-off-by: Xin Li (Intel) <xin@...or.com>
---
Change in v9:
* Refactor first and then export in a separate patch (Dave Hansen).
Change in v7:
* Access cea_exception_stacks using array indexing (Dave Hansen).
* Use BUILD_BUG_ON(ESTACK_DF != 0) to ensure the starting index is 0
(Dave Hansen).
* Remove Suggested-bys (Dave Hansen).
* Move rename code in a separate patch (Dave Hansen).
Change in v5:
* Export accessor instead of data (Christoph Hellwig).
* Add TB from Xuelian Guo.
Change in v4:
* Rewrite the change log and add comments to the export (Dave Hansen).
---
arch/x86/include/asm/cpu_entry_area.h | 52 ++++++++++++---------------
arch/x86/kernel/dumpstack_64.c | 4 +--
arch/x86/mm/cpu_entry_area.c | 21 ++++++++++-
3 files changed, 44 insertions(+), 33 deletions(-)
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index d0f884c28178..509e52fc3a0f 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -16,6 +16,19 @@
#define VC_EXCEPTION_STKSZ 0
#endif
+/*
+ * The exception stack ordering in [cea_]exception_stacks
+ */
+enum exception_stack_ordering {
+ ESTACK_DF,
+ ESTACK_NMI,
+ ESTACK_DB,
+ ESTACK_MCE,
+ ESTACK_VC,
+ ESTACK_VC2,
+ N_EXCEPTION_STACKS
+};
+
/* Macro to enforce the same ordering and stack sizes */
#define ESTACKS_MEMBERS(guardsize, optional_stack_size) \
char ESTACK_DF_stack_guard[guardsize]; \
@@ -39,37 +52,22 @@ struct exception_stacks {
/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {
- ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
-};
-
-/*
- * The exception stack ordering in [cea_]exception_stacks
- */
-enum exception_stack_ordering {
- ESTACK_DF,
- ESTACK_NMI,
- ESTACK_DB,
- ESTACK_MCE,
- ESTACK_VC,
- ESTACK_VC2,
- N_EXCEPTION_STACKS
+ struct {
+ char stack_guard[PAGE_SIZE];
+ char stack[EXCEPTION_STKSZ];
+ } event_stacks[N_EXCEPTION_STACKS];
+ char IST_top_guard[PAGE_SIZE];
};
-#define CEA_ESTACK_SIZE(st) \
- sizeof(((struct cea_exception_stacks *)0)->st## _stack)
-
-#define CEA_ESTACK_BOT(ceastp, st) \
- ((unsigned long)&(ceastp)->st## _stack)
-
-#define CEA_ESTACK_TOP(ceastp, st) \
- (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
-
#define CEA_ESTACK_OFFS(st) \
- offsetof(struct cea_exception_stacks, st## _stack)
+ offsetof(struct cea_exception_stacks, event_stacks[st].stack)
#define CEA_ESTACK_PAGES \
(sizeof(struct cea_exception_stacks) / PAGE_SIZE)
+extern unsigned long __this_cpu_ist_top_va(enum exception_stack_ordering stack);
+extern unsigned long __this_cpu_ist_bottom_va(enum exception_stack_ordering stack);
+
#endif
#ifdef CONFIG_X86_32
@@ -144,10 +142,4 @@ static __always_inline struct entry_stack *cpu_entry_stack(int cpu)
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}
-#define __this_cpu_ist_top_va(name) \
- CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
-
-#define __this_cpu_ist_bottom_va(name) \
- CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)
-
#endif
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 40f51e278171..93b10b264e53 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -70,9 +70,9 @@ struct estack_pages {
#define EPAGERANGE(st) \
[PFN_DOWN(CEA_ESTACK_OFFS(st)) ... \
- PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \
+ PFN_DOWN(CEA_ESTACK_OFFS(st) + EXCEPTION_STKSZ - 1)] = { \
.offs = CEA_ESTACK_OFFS(st), \
- .size = CEA_ESTACK_SIZE(st), \
+ .size = EXCEPTION_STKSZ, \
.type = STACK_TYPE_EXCEPTION + st, }
/*
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 9fa371af8abc..b3d90f9cfbb1 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -18,6 +18,25 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
+/*
+ * Typically invoked by entry code, so must be noinstr.
+ */
+noinstr unsigned long __this_cpu_ist_bottom_va(enum exception_stack_ordering stack)
+{
+ struct cea_exception_stacks *s;
+
+ BUILD_BUG_ON(ESTACK_DF != 0);
+
+ s = __this_cpu_read(cea_exception_stacks);
+
+ return (unsigned long)&s->event_stacks[stack].stack;
+}
+
+noinstr unsigned long __this_cpu_ist_top_va(enum exception_stack_ordering stack)
+{
+ return __this_cpu_ist_bottom_va(stack) + EXCEPTION_STKSZ;
+}
+
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
static __always_inline unsigned int cea_offset(unsigned int cpu)
@@ -132,7 +151,7 @@ static void __init percpu_setup_debug_store(unsigned int cpu)
#define cea_map_stack(name) do { \
npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
- cea_map_percpu_pages(cea->estacks.name## _stack, \
+ cea_map_percpu_pages(cea->estacks.event_stacks[name].stack, \
estacks->name## _stack, npages, PAGE_KERNEL); \
} while (0)
--
2.51.0
Powered by blists - more mailing lists