[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <453981eae7e8158307f971d1632d5023adbe03c3.1769074722.git.houwenlong.hwl@antgroup.com>
Date: Thu, 22 Jan 2026 18:06:14 +0800
From: Hou Wenlong <houwenlong.hwl@...group.com>
To: linux-kernel@...r.kernel.org
Cc: Hou Wenlong <houwenlong.hwl@...group.com>,
Thomas Gleixner <tglx@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Juergen Gross <jgross@...e.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Ard Biesheuvel <ardb@...nel.org>,
Thomas Huth <thuth@...hat.com>,
Kiryl Shutsemau <kas@...nel.org>,
Uros Bizjak <ubizjak@...il.com>,
Brian Gerst <brgerst@...il.com>,
xen-devel@...ts.xenproject.org
Subject: [PATCH] x86/xen: Build identity mapping page tables dynamically for XENPV
After commit 47ffe0578aee ("x86/pvh: Add 64bit relocation page tables"),
the PVH entry uses a new set of page tables instead of the
preconstructed page tables in head64.S. Since those preconstructed page
tables are only used in XENPV now and XENPV does not actually need the
preconstructed identity page tables directly, they can be filled in
xen_setup_kernel_pagetable(). Therefore, build the identity mapping page
table dynamically to remove the preconstructed page tables and make the
code cleaner.
Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
---
arch/x86/include/asm/pgtable_64.h | 2 --
arch/x86/kernel/head_64.S | 28 ----------------------------
arch/x86/xen/mmu_pv.c | 9 +++++++++
3 files changed, 9 insertions(+), 30 deletions(-)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f06e5d6a2747..ce45882ccd07 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -19,10 +19,8 @@
extern p4d_t level4_kernel_pgt[512];
extern p4d_t level4_ident_pgt[512];
extern pud_t level3_kernel_pgt[512];
-extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
-extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
extern pgd_t init_top_pgt[];
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 21816b48537c..85d4a5094f6b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -616,38 +616,10 @@ SYM_DATA(early_recursion_flag, .long 0)
.data
-#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
-SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
- .org init_top_pgt + L4_PAGE_OFFSET*8, 0
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
- .org init_top_pgt + L4_START_KERNEL*8, 0
- /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
- .fill PTI_USER_PGD_FILL,8,0
-SYM_DATA_END(init_top_pgt)
-
-SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
- .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
- .fill 511, 8, 0
-SYM_DATA_END(level3_ident_pgt)
-SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
- /*
- * Since I easily can, map the first 1G.
- * Don't set NX because code runs from these pages.
- *
- * Note: This sets _PAGE_GLOBAL despite whether
- * the CPU supports it or it is enabled. But,
- * the CPU should ignore the bit.
- */
- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
-SYM_DATA_END(level2_ident_pgt)
-#else
SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
.fill 512,8,0
.fill PTI_USER_PGD_FILL,8,0
SYM_DATA_END(init_top_pgt)
-#endif
SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
.fill 511,8,0
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 9fa00c4a8858..7d77c233002b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -105,6 +105,9 @@ pte_t xen_make_pte_init(pteval_t pte);
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
#endif
+static pud_t level3_ident_pgt[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t level2_ident_pgt[PTRS_PER_PMD] __page_aligned_bss;
+
/*
* Protects atomic reservation decrease/increase against concurrent increases.
* Also protects non-atomic updates of current_pages and balloon lists.
@@ -1773,6 +1776,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Zap identity mapping */
init_top_pgt[0] = __pgd(0);
+ init_top_pgt[pgd_index(__PAGE_OFFSET_BASE_L4)].pgd =
+ __pa_symbol(level3_ident_pgt) + _KERNPG_TABLE_NOENC;
+ init_top_pgt[pgd_index(__START_KERNEL_map)].pgd =
+ __pa_symbol(level3_kernel_pgt) + _PAGE_TABLE_NOENC;
+ level3_ident_pgt[0].pud = __pa_symbol(level2_ident_pgt) + _KERNPG_TABLE_NOENC;
+
/* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[273] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
--
2.31.1
Powered by blists - more mailing lists