[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210524172606.08dac28d@xhacker.debian>
Date: Mon, 24 May 2021 17:26:06 +0800
From: Jisheng Zhang <Jisheng.Zhang@...aptics.com>
To: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Alexander Potapenko <glider@...gle.com>,
Marco Elver <elver@...gle.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
kasan-dev@...glegroups.com, linux-mm@...ck.org
Subject: [PATCH 2/2] arm64: remove page granularity limitation from KFENCE
KFENCE requires linear map to be mapped at page granularity, so that
it is possible to protect/unprotect single pages in the KFENCE pool.
Currently if KFENCE is enabled, arm64 maps all pages at page
granularity, it seems overkilled. In fact, we only need to map the
pages in KFENCE pool itself at page granularity. We acchieve this goal
by allocating KFENCE pool before paging_init() so we know the KFENCE
pool address, then we take care to map the pool at page granularity
during map_mem().
Signed-off-by: Jisheng Zhang <Jisheng.Zhang@...aptics.com>
---
arch/arm64/kernel/setup.c | 3 +++
arch/arm64/mm/mmu.c | 27 +++++++++++++++++++--------
2 files changed, 22 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 61845c0821d9..51c0d6e8b67b 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -18,6 +18,7 @@
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
+#include <linux/kfence.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@@ -345,6 +346,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
arm64_memblock_init();
+ kfence_alloc_pool();
+
paging_init();
acpi_table_upgrade();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 89b66ef43a0f..12712d31a054 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kexec.h>
+#include <linux/kfence.h>
#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -515,10 +516,16 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
- IS_ENABLED(CONFIG_KFENCE))
+ if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ /*
+ * KFENCE requires linear map to be mapped at page granularity, so
+ * temporarily skip mapping for __kfence_pool in the following
+ * for-loop
+ */
+ memblock_mark_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE);
+
/*
* Take care not to create a writable alias for the
* read-only text and rodata sections of the kernel image.
@@ -553,6 +560,15 @@ static void __init map_mem(pgd_t *pgdp)
__map_memblock(pgdp, kernel_start, kernel_end,
PAGE_KERNEL, NO_CONT_MAPPINGS);
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
+
+ /*
+ * Map the __kfence_pool at page granularity now.
+ */
+ __map_memblock(pgdp, __pa(__kfence_pool),
+ __pa(__kfence_pool + KFENCE_POOL_SIZE),
+ pgprot_tagged(PAGE_KERNEL),
+ NO_EXEC_MAPPINGS | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE);
}
void mark_rodata_ro(void)
@@ -1480,12 +1496,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
VM_BUG_ON(!mhp_range_allowed(start, size, true));
- /*
- * KFENCE requires linear map to be mapped at page granularity, so that
- * it is possible to protect/unprotect single pages in the KFENCE pool.
- */
- if (rodata_full || debug_pagealloc_enabled() ||
- IS_ENABLED(CONFIG_KFENCE))
+ if (rodata_full || debug_pagealloc_enabled())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
--
2.31.0
Powered by blists - more mailing lists