[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241014105912.3207374-47-ryan.roberts@arm.com>
Date: Mon, 14 Oct 2024 11:58:54 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Anshuman Khandual <anshuman.khandual@....com>,
Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
David Hildenbrand <david@...hat.com>,
Greg Marsden <greg.marsden@...cle.com>,
Ivan Ivanov <ivan.ivanov@...e.com>,
Kalesh Singh <kaleshsingh@...gle.com>,
Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Matthias Brugger <mbrugger@...e.com>,
Miroslav Benes <mbenes@...e.cz>,
Will Deacon <will@...nel.org>
Cc: Ryan Roberts <ryan.roberts@....com>,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH v1 47/57] arm64: Statically allocate and align for worst-case page size
Increase the size and alignment of the zero page and various static
buffers used for page tables to PAGE_SIZE_MAX. This resolves to the same
thing for compile-time page size builds.
For boot-time builds, we may in future consider freeing unused pages at
runtime when the selected page size is less than MAX.
Signed-off-by: Ryan Roberts <ryan.roberts@....com>
---
***NOTE***
Any confused maintainers may want to read the cover note here for context:
https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/
arch/arm64/include/asm/pgtable.h | 2 +-
arch/arm64/kernel/pi/map_kernel.c | 2 +-
arch/arm64/mm/mmu.c | 6 +++---
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7a4f5604be3f7..fd47f70a42396 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -61,7 +61,7 @@
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+extern unsigned long empty_zero_page[PAGE_SIZE_MAX / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
#define pte_ERROR(e) \
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index 7a62d4238449d..deb8cd50b0b0c 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -199,7 +199,7 @@ static void __init remap_idmap(bool use_lpa2, int page_shift)
static void __init map_fdt(u64 fdt, int page_shift)
{
- static u8 ptes[INIT_IDMAP_FDT_SIZE_MAX] __initdata __aligned(PAGE_SIZE);
+ static u8 ptes[INIT_IDMAP_FDT_SIZE_MAX] __initdata __aligned(PAGE_SIZE_MAX);
static bool first_time __initdata = true;
u64 limit = (u64)&ptes[INIT_IDMAP_FDT_SIZE_MAX];
u64 efdt = fdt + MAX_FDT_SIZE;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 84df9f278d24d..b4cd3b6a73c22 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -62,7 +62,7 @@ long __section(".mmuoff.data.write") __early_cpu_boot_status;
* Empty_zero_page is a special page that is used for zero-initialized data
* and COW.
*/
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+unsigned long empty_zero_page[PAGE_SIZE_MAX / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
static DEFINE_SPINLOCK(swapper_pgdir_lock);
@@ -783,8 +783,8 @@ void __pi_map_range(u64 *pgd, u64 limit, u64 start, u64 end, u64 pa,
pgprot_t prot, int level, pte_t *tbl, bool may_use_cont,
u64 va_offset);
-static u8 idmap_ptes[IDMAP_LEVELS_MAX - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
- kpti_ptes[IDMAP_LEVELS_MAX - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
+static u8 idmap_ptes[IDMAP_LEVELS_MAX - 1][PAGE_SIZE_MAX] __aligned(PAGE_SIZE_MAX) __ro_after_init,
+ kpti_ptes[IDMAP_LEVELS_MAX - 1][PAGE_SIZE_MAX] __aligned(PAGE_SIZE_MAX) __ro_after_init;
static void __init create_idmap(void)
{
--
2.43.0
Powered by blists - more mailing lists