[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126092630.1800589-22-ardb+git@google.com>
Date: Mon, 26 Jan 2026 10:26:41 +0100
From: Ard Biesheuvel <ardb+git@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org, will@...nel.org,
catalin.marinas@....com, mark.rutland@....com,
Ard Biesheuvel <ardb@...nel.org>, Ryan Roberts <ryan.roberts@....com>,
Anshuman Khandual <anshuman.khandual@....com>, Liz Prucka <lizprucka@...gle.com>,
Seth Jenkins <sethjenkins@...gle.com>, Kees Cook <kees@...nel.org>,
linux-hardening@...r.kernel.org
Subject: [PATCH v2 10/10] arm64: mm: Unmap kernel data/bss entirely from the
linear map
From: Ard Biesheuvel <ardb@...nel.org>
The linear aliases of the kernel text and rodata are mapped read-only in
the linear map as well. Given that the contents of these regions are
mostly identical to the version in the loadable image, mapping them
read-only and leaving their contents visible is a reasonable hardening
measure.
Data and bss, however, are now also mapped read-only but the contents of
these regions are more likely to contain data that we'd rather not leak.
So let's unmap these entirely in the linear map when the kernel is
running normally.
When going into hibernation or waking up from it, these regions need to
be mapped, so map the region initially, and toggle the valid bit so
map/unmap the region as needed.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
arch/arm64/mm/mmu.c | 40 ++++++++++++++++++--
1 file changed, 37 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fdbbb018adc5..06b2d11b4561 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
+#include <linux/suspend.h>
#include <linux/kfence.h>
#include <linux/pkeys.h>
#include <linux/mm_inline.h>
@@ -1027,6 +1028,31 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end,
end - start, prot, early_pgtable_alloc, flags);
}
+static void remap_linear_data_alias(bool unmap)
+{
+ set_memory_valid((unsigned long)lm_alias(__init_end),
+ (unsigned long)(__pgdir_start - __init_end) / PAGE_SIZE,
+ !unmap);
+}
+
+static int arm64_hibernate_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *unused)
+{
+ switch (mode) {
+ default:
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ remap_linear_data_alias(true);
+ break;
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ remap_linear_data_alias(false);
+ break;
+ }
+ return 0;
+}
+
void __init mark_linear_text_alias_ro(void)
{
/*
@@ -1035,6 +1061,16 @@ void __init mark_linear_text_alias_ro(void)
update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
(unsigned long)__init_begin - (unsigned long)_text,
PAGE_KERNEL_RO);
+
+ remap_linear_data_alias(true);
+
+ if (IS_ENABLED(CONFIG_HIBERNATION)) {
+ static struct notifier_block nb = {
+ .notifier_call = arm64_hibernate_pm_notify
+ };
+
+ register_pm_notifier(&nb);
+ }
}
#ifdef CONFIG_KFENCE
@@ -1163,7 +1199,7 @@ static void __init map_mem(void)
__map_memblock(kernel_start, init_begin, PAGE_KERNEL,
flags | NO_CONT_MAPPINGS);
__map_memblock(init_end, kernel_end, PAGE_KERNEL,
- flags | NO_CONT_MAPPINGS);
+ flags | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
/* map all the memory banks */
for_each_mem_range(i, &start, &end) {
@@ -1176,8 +1212,6 @@ static void __init map_mem(void)
flags);
}
- __map_memblock(init_end, kernel_end, PAGE_KERNEL_RO,
- flags | NO_CONT_MAPPINGS);
arm64_kfence_map_pool(early_kfence_pool);
}
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists