[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <47fdacc6-1928-4d77-8024-28133daac0f4@arm.com>
Date: Fri, 23 Jan 2026 12:22:39 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: Ard Biesheuvel <ardb+git@...gle.com>, linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org, will@...nel.org,
catalin.marinas@....com, mark.rutland@....com,
Ard Biesheuvel <ardb@...nel.org>, Ryan Roberts <ryan.roberts@....com>,
Liz Prucka <lizprucka@...gle.com>, Seth Jenkins <sethjenkins@...gle.com>,
Kees Cook <kees@...nel.org>, linux-hardening@...r.kernel.org
Subject: Re: [PATCH 4/4] arm64: Unmap kernel data/bss entirely from the linear
map
On 19/01/26 10:17 PM, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@...nel.org>
>
> The linear aliases of the kernel text and rodata are mapped read-only as
> well. Given that the contents of these regions are mostly identical to
> the version in the loadable image, mapping them read-only is a
> reasonable hardening measure.
>
> Data and bss, however, are now also mapped read-only but the contents of
> these regions are more likely to contain data that we'd rather not leak.
> So let's unmap these entirely in the linear map when the kernel is
> running normally.
>
> Only when going into hibernation or waking up from it do these regions
> need to be mapped, so take care of this using a PM notifier.
Just curious - why do we need them mapped while going into or coming back
from the hibernation ?
>
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> ---
> arch/arm64/mm/mmu.c | 35 ++++++++++++++++++--
> 1 file changed, 32 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index d978b07ab7b8..7b3ce9cafe64 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -24,6 +24,7 @@
> #include <linux/mm.h>
> #include <linux/vmalloc.h>
> #include <linux/set_memory.h>
> +#include <linux/suspend.h>
> #include <linux/kfence.h>
> #include <linux/pkeys.h>
> #include <linux/mm_inline.h>
> @@ -1024,13 +1025,13 @@ static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
> prot, early_pgtable_alloc, flags);
> }
>
> -static void remap_linear_data_alias(void)
> +static void remap_linear_data_alias(bool unmap)
> {
> extern const u8 __pgdir_start[];
>
> update_mapping_prot(__pa_symbol(__init_end), (unsigned long)lm_alias(__init_end),
> (unsigned long)__pgdir_start - (unsigned long)__init_end,
> - PAGE_KERNEL_RO);
> + unmap ? __pgprot(0) : PAGE_KERNEL_RO);
> }
>
> void __init remap_linear_kernel_alias(void)
> @@ -1041,7 +1042,7 @@ void __init remap_linear_kernel_alias(void)
> update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
> (unsigned long)__init_begin - (unsigned long)_text,
> PAGE_KERNEL_RO);
> - remap_linear_data_alias();
> + remap_linear_data_alias(true);
> }
>
> #ifdef CONFIG_KFENCE
> @@ -2257,3 +2258,31 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long i
> return 0;
> }
> #endif
> +
> +#ifdef CONFIG_HIBERNATION
> +static int arm64_hibernate_pm_notify(struct notifier_block *nb,
> + unsigned long mode, void *unused)
> +{
> + switch (mode) {
> + case PM_HIBERNATION_PREPARE:
> + case PM_RESTORE_PREPARE:
> + remap_linear_data_alias(false);
> + break;
> + case PM_POST_HIBERNATION:
> + case PM_POST_RESTORE:
> + remap_linear_data_alias(true);
> + break;
> + }
> + return 0;
> +}
> +
> +static struct notifier_block arm64_hibernate_pm_notifier = {
> + .notifier_call = arm64_hibernate_pm_notify,
> +};
> +
> +static int arm64_hibernate_register_pm_notifier(void)
> +{
> + return register_pm_notifier(&arm64_hibernate_pm_notifier);
> +}
> +late_initcall(arm64_hibernate_register_pm_notifier);
> +#endif
Powered by blists - more mailing lists