[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260119164747.1402434-10-ardb+git@google.com>
Date: Mon, 19 Jan 2026 17:47:52 +0100
From: Ard Biesheuvel <ardb+git@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org, will@...nel.org,
catalin.marinas@....com, mark.rutland@....com,
Ard Biesheuvel <ardb@...nel.org>, Ryan Roberts <ryan.roberts@....com>,
Liz Prucka <lizprucka@...gle.com>, Seth Jenkins <sethjenkins@...gle.com>,
Kees Cook <kees@...nel.org>, linux-hardening@...r.kernel.org
Subject: [PATCH 4/4] arm64: Unmap kernel data/bss entirely from the linear map
From: Ard Biesheuvel <ardb@...nel.org>
The linear aliases of the kernel text and rodata are mapped read-only as
well. Given that the contents of these regions are mostly identical to
the version in the loadable image, mapping them read-only is a
reasonable hardening measure.
Data and bss, however, are now also mapped read-only but the contents of
these regions are more likely to contain data that we'd rather not leak.
So let's unmap these entirely in the linear map when the kernel is
running normally.
Only when going into hibernation or waking up from it do these regions
need to be mapped, so take care of this using a PM notifier.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
arch/arm64/mm/mmu.c | 35 ++++++++++++++++++--
1 file changed, 32 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d978b07ab7b8..7b3ce9cafe64 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
+#include <linux/suspend.h>
#include <linux/kfence.h>
#include <linux/pkeys.h>
#include <linux/mm_inline.h>
@@ -1024,13 +1025,13 @@ static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
prot, early_pgtable_alloc, flags);
}
-static void remap_linear_data_alias(void)
+static void remap_linear_data_alias(bool unmap)
{
extern const u8 __pgdir_start[];
update_mapping_prot(__pa_symbol(__init_end), (unsigned long)lm_alias(__init_end),
(unsigned long)__pgdir_start - (unsigned long)__init_end,
- PAGE_KERNEL_RO);
+ unmap ? __pgprot(0) : PAGE_KERNEL_RO);
}
void __init remap_linear_kernel_alias(void)
@@ -1041,7 +1042,7 @@ void __init remap_linear_kernel_alias(void)
update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
(unsigned long)__init_begin - (unsigned long)_text,
PAGE_KERNEL_RO);
- remap_linear_data_alias();
+ remap_linear_data_alias(true);
}
#ifdef CONFIG_KFENCE
@@ -2257,3 +2258,31 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long i
return 0;
}
#endif
+
+#ifdef CONFIG_HIBERNATION
+static int arm64_hibernate_pm_notify(struct notifier_block *nb,
+ unsigned long mode, void *unused)
+{
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ remap_linear_data_alias(false);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ remap_linear_data_alias(true);
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block arm64_hibernate_pm_notifier = {
+ .notifier_call = arm64_hibernate_pm_notify,
+};
+
+static int arm64_hibernate_register_pm_notifier(void)
+{
+ return register_pm_notifier(&arm64_hibernate_pm_notifier);
+}
+late_initcall(arm64_hibernate_register_pm_notifier);
+#endif
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists