[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4672c1a9720c4b4d6bd382602478ce22ed9d001a.1584360344.git.christophe.leroy@c-s.fr>
Date: Mon, 16 Mar 2020 12:36:27 +0000 (UTC)
From: Christophe Leroy <christophe.leroy@....fr>
To: Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Michael Ellerman <mpe@...erman.id.au>
Cc: linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Subject: [PATCH v1 41/46] powerpc/8xx: Map linear memory with huge pages
Map linear memory space with 512k and 8M pages whenever
possible.
Three mappings are performed:
- One for kernel text
- One for RO data
- One for the rest
Separating the mappings is done to be able to update the
protection later when using STRICT_KERNEL_RWX.
The ITLB miss handler now need to also handle huge TLBs
unless kernel text in pinned.
Signed-off-by: Christophe Leroy <christophe.leroy@....fr>
---
arch/powerpc/kernel/head_8xx.S | 4 +--
arch/powerpc/mm/nohash/8xx.c | 50 +++++++++++++++++++++++++++++++++-
2 files changed, 51 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 410cb48ab36f..49b9eee7dd57 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -223,7 +223,7 @@ InstructionTLBMiss:
3:
mtcr r11
#endif
-#ifdef CONFIG_HUGETLBFS
+#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MD_TWC, r11
#else
@@ -233,7 +233,7 @@ InstructionTLBMiss:
#endif
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
-#ifdef CONFIG_HUGETLBFS
+#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
mtspr SPRN_MI_TWC, r11
#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 57e0c7496a6a..d670f3f82091 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -126,20 +126,68 @@ void __init mmu_mapin_immr(void)
PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
+static void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+ pgprot_t prot, bool new)
+{
+ unsigned long v = PAGE_OFFSET + offset;
+ unsigned long p = offset;
+
+ WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
+
+ for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+ for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+
+ if (!new)
+ flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
+}
+
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ WARN_ON(top < einittext8);
+
mmu_mapin_immr();
- return 0;
+ if (__map_without_ltlbs)
+ return 0;
+
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
+
+ if (top > SZ_32M)
+ memblock_set_current_limit(top);
+
+ block_mapped_ram = top;
+
+ return top;
}
void mmu_mark_initmem_nx(void)
{
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
+ unsigned long sinittext = __pa(_sinittext);
+
+ mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
}
#endif
--
2.25.0
Powered by blists - more mailing lists