[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241014105912.3207374-46-ryan.roberts@arm.com>
Date: Mon, 14 Oct 2024 11:58:53 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Anshuman Khandual <anshuman.khandual@....com>,
Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
David Hildenbrand <david@...hat.com>,
Greg Marsden <greg.marsden@...cle.com>,
Ivan Ivanov <ivan.ivanov@...e.com>,
Kalesh Singh <kaleshsingh@...gle.com>,
Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Matthias Brugger <mbrugger@...e.com>,
Miroslav Benes <mbenes@...e.cz>,
Will Deacon <will@...nel.org>
Cc: Ryan Roberts <ryan.roberts@....com>,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH v1 46/57] arm64: Generalize fixmap for boot-time page size
Some fixmap fixed address slots previously depended on PAGE_SIZE (i.e.
to determine how many slots were required to cover a given size). Since
we require the fixed address slots to be compile-time constant, let's
work out the worst case number of required slots when page size is
PAGE_SIZE_MIN instead.
Additionally, let's determine the worst-case number of PTE tables we
require and statically allocate enough memory.
For compile-time page size builds, the end result is the same as it was
previously.
Signed-off-by: Ryan Roberts <ryan.roberts@....com>
---
***NOTE***
Any confused maintainers may want to read the cover note here for context:
https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/
arch/arm64/include/asm/fixmap.h | 12 ++++++++----
arch/arm64/mm/fixmap.c | 34 ++++++++++++++++++++++-----------
2 files changed, 31 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 9a496d54dfe6e..c73fd3c1334ff 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -43,7 +43,7 @@ enum fixed_addresses {
* whether it crosses any page boundary.
*/
FIX_FDT_END,
- FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
+ FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE_MIN) + 1,
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
@@ -79,7 +79,7 @@ enum fixed_addresses {
* Temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*/
-#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE_MIN)
#define FIX_BTMAPS_SLOTS 7
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
@@ -101,8 +101,12 @@ enum fixed_addresses {
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE)
+#define __FIXADDR_TOT_SIZE(page_shift) \
+ (__end_of_fixed_addresses << (page_shift))
+#define __FIXADDR_TOT_START(page_shift) \
+ (FIXADDR_TOP - __FIXADDR_TOT_SIZE(page_shift))
+#define FIXADDR_TOT_SIZE __FIXADDR_TOT_SIZE(PAGE_SHIFT)
+#define FIXADDR_TOT_START __FIXADDR_TOT_START(PAGE_SHIFT)
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c
index 15ce3253ad359..a0dcf2375ccb4 100644
--- a/arch/arm64/mm/fixmap.c
+++ b/arch/arm64/mm/fixmap.c
@@ -17,27 +17,39 @@
#include <asm/tlbflush.h>
/* ensure that the fixmap region does not grow down into the PCI I/O region */
-static_assert(FIXADDR_TOT_START > PCI_IO_END);
+static_assert(__FIXADDR_TOT_START(PAGE_SHIFT_MAX) > PCI_IO_END);
-#define NR_BM_PTE_TABLES \
- SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
-#define NR_BM_PMD_TABLES \
- SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
+#define FIXMAP_LEVEL(page_shift, lvl, vstart, vend) \
+ SPAN_NR_ENTRIES(vstart, vend, PGTABLE_LEVEL_SHIFT(page_shift, lvl))
-static_assert(NR_BM_PMD_TABLES == 1);
+#define FIXMAP_PAGES(page_shift, level) \
+ FIXMAP_LEVEL(page_shift, level, \
+ __FIXADDR_TOT_START(page_shift), FIXADDR_TOP)
+
+#define FIXMAP_SIZE(page_shift, level) \
+ (FIXMAP_PAGES(page_shift, level) * (UL(1) << (page_shift)))
+
+#define FIXMAP_PTE_SIZE_MAX \
+ MAX_IF_HAVE_PGSZ(FIXMAP_SIZE(ARM64_PAGE_SHIFT_4K, 2), \
+ FIXMAP_SIZE(ARM64_PAGE_SHIFT_16K, 2), \
+ FIXMAP_SIZE(ARM64_PAGE_SHIFT_64K, 2))
+
+static_assert(FIXMAP_PAGES(ARM64_PAGE_SHIFT_4K, 1) == 1);
+static_assert(FIXMAP_PAGES(ARM64_PAGE_SHIFT_16K, 1) == 1);
+static_assert(FIXMAP_PAGES(ARM64_PAGE_SHIFT_64K, 1) == 1);
#define __BM_TABLE_IDX(addr, shift) \
(((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
-static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+static pte_t bm_pte[FIXMAP_PTE_SIZE_MAX / sizeof(pte_t)] __page_aligned_bss;
+static pmd_t bm_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[MAX_PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
static inline pte_t *fixmap_pte(unsigned long addr)
{
- return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
+ return &bm_pte[BM_PTE_TABLE_IDX(addr) * PTRS_PER_PTE + pte_index(addr)];
}
static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
@@ -46,7 +58,7 @@ static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
pte_t *ptep;
if (pmd_none(pmd)) {
- ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
+ ptep = &bm_pte[BM_PTE_TABLE_IDX(addr) * PTRS_PER_PTE];
__pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
}
}
--
2.43.0
Powered by blists - more mailing lists