[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251120103854.1630306-2-daniel@thingy.jp>
Date: Thu, 20 Nov 2025 19:38:53 +0900
From: Daniel Palmer <daniel@...ngy.jp>
To: geert@...ux-m68k.org,
schmitzmic@...il.com,
gerg@...nel.org,
linux-m68k@...ts.linux-m68k.org
Cc: linux-kernel@...r.kernel.org,
Daniel Palmer <daniel@...ngy.jp>
Subject: [PATCH v2 1/2] m68k: mm: motorola: Split the early term containing the end of the RO region
On the 020/030 the end of the RO region (from the start of kernel text
to the end of the RO data) can end up on an "early termination" that
represents 64 pages and is modelled as a pmd in the kernel.
You cannot set flags on individual pages in the pmd as the pte level
doesn't exist in the tables.
This means it's not possible to set the write protect bit for a range of
pages in the pmd and you either need to push RW stuff onto the next pmd
which wastes memory or convert the early termination into a normal pmd
with ptes first.
There is already some logic for splitting the pmd at the start of
memory into ptes so the first page can be unmapped. Refactor that
logic out into a little function and use it for the existing use
case and splitting the pmd that the tail of the RO region is on
so its possible to set the wp bit on the RO pages within.
Signed-off-by: Daniel Palmer <daniel@...ngy.jp>
---
arch/m68k/mm/motorola.c | 47 ++++++++++++++++++++++++++++++++---------
1 file changed, 37 insertions(+), 10 deletions(-)
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 62283bc2ed79..9b5b9a52f819 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -298,6 +298,24 @@ static pmd_t * __init kernel_ptr_table(void)
return last_pmd_table;
}
+/*
+ * This splits an early term created by head.S into ptes
+ * so things like removing pages in the range, marking
+ * part of the range as write protected can happen.
+ */
+static inline pte_t * __init __split_early_term(unsigned long physaddr)
+{
+ pte_t *pte_dir, *pte_dir_tmp;
+
+ pte_dir = kernel_page_table();
+ pte_dir_tmp = pte_dir;
+
+ for (int i = 0; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
+ pte_val(*pte_dir_tmp++) = physaddr;
+
+ return pte_dir;
+}
+
static void __init map_node(int node)
{
unsigned long physaddr, virtaddr, size;
@@ -348,25 +366,34 @@ static void __init map_node(int node)
if (CPU_IS_020_OR_030) {
if (virtaddr) {
+ const unsigned long ro_tail_pmd =
+ ((unsigned long) __end_rodata) & PMD_MASK;
+
+ if (virtaddr == ro_tail_pmd) {
+#ifdef DEBUG
+ printk("[wp split]\n");
+#endif
+ pte_dir = __split_early_term(physaddr);
+ pmd_set(pmd_dir, pte_dir);
+}
+ else {
#ifdef DEBUG
- printk ("[early term]");
+ printk("[early term]");
#endif
- pmd_val(*pmd_dir) = physaddr;
- physaddr += PMD_SIZE;
+ pmd_val(*pmd_dir) = physaddr;
+ }
} else {
- int i;
#ifdef DEBUG
printk ("[zero map]");
#endif
- pte_dir = kernel_page_table();
+ pte_dir = __split_early_term(physaddr);
+ /* Remove the zero page */
+ pte_val(*pte_dir) = 0;
pmd_set(pmd_dir, pte_dir);
-
- pte_val(*pte_dir++) = 0;
- physaddr += PAGE_SIZE;
- for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
- pte_val(*pte_dir++) = physaddr;
}
+
size -= PMD_SIZE;
+ physaddr += PMD_SIZE;
virtaddr += PMD_SIZE;
} else {
if (!pmd_present(*pmd_dir)) {
--
2.51.0
Powered by blists - more mailing lists