[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f533bb094a566242ec196afbde222796c6d6c084.1761763681.git.m.wieczorretman@pm.me>
Date: Wed, 29 Oct 2025 20:07:38 +0000
From: Maciej Wieczor-Retman <m.wieczorretman@...me>
To: xin@...or.com, peterz@...radead.org, kaleshsingh@...gle.com, kbingham@...nel.org, akpm@...ux-foundation.org, nathan@...nel.org, ryabinin.a.a@...il.com, dave.hansen@...ux.intel.com, bp@...en8.de, morbo@...gle.com, jeremy.linton@....com, smostafa@...gle.com, kees@...nel.org, baohua@...nel.org, vbabka@...e.cz, justinstitt@...gle.com, wangkefeng.wang@...wei.com, leitao@...ian.org, jan.kiszka@...mens.com, fujita.tomonori@...il.com, hpa@...or.com, urezki@...il.com, ubizjak@...il.com, ada.coupriediaz@....com, nick.desaulniers+lkml@...il.com, ojeda@...nel.org, brgerst@...il.com, elver@...gle.com, pankaj.gupta@....com, glider@...gle.com, mark.rutland@....com, trintaeoitogc@...il.com, jpoimboe@...nel.org, thuth@...hat.com, pasha.tatashin@...een.com, dvyukov@...gle.com, jhubbard@...dia.com, catalin.marinas@....com, yeoreum.yun@....com, mhocko@...e.com, lorenzo.stoakes@...cle.com, samuel.holland@...ive.com, vincenzo.frascino@....com, bigeasy@...utronix.de, surenb@...gle.com,
	ardb@...nel.org, Liam.Howlett@...cle.com, nicolas.schier@...ux.dev, ziy@...dia.com, kas@...nel.org, tglx@...utronix.de, mingo@...hat.com, broonie@...nel.org, corbet@....net, andreyknvl@...il.com, maciej.wieczor-retman@...el.com, david@...hat.com, maz@...nel.org, rppt@...nel.org, will@...nel.org, luto@...nel.org
Cc: kasan-dev@...glegroups.com, linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org, x86@...nel.org, linux-kbuild@...r.kernel.org, linux-mm@...ck.org, llvm@...ts.linux.dev, linux-doc@...r.kernel.org, m.wieczorretman@...me
Subject: [PATCH v6 11/18] x86/kasan: KASAN raw shadow memory PTE init
From: Maciej Wieczor-Retman <maciej.wieczor-retman@...el.com>
In KASAN's generic mode the default value in shadow memory is zero.
During initialization of shadow memory pages they are allocated and
zeroed.
In KASAN's tag-based mode the default tag for the arm64 architecture is
0xFE which corresponds to any memory that should not be accessed. On x86
(where tags are 4-bit wide instead of 8-bit wide) that tag is 0xE so
during the initializations all the bytes in shadow memory pages should
be filled with it.
Use memblock_alloc_try_nid_raw() instead of memblock_alloc_try_nid() to
avoid zeroing out the memory so it can be set with the KASAN invalid
tag.
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@...el.com>
---
Changelog v2:
- Remove dense mode references, use memset() instead of kasan_poison().
 arch/x86/mm/kasan_init_64.c | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 998b6010d6d3..e69b7210aaae 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -34,6 +34,18 @@ static __init void *early_alloc(size_t size, int nid, bool should_panic)
 	return ptr;
 }
 
+static __init void *early_raw_alloc(size_t size, int nid, bool should_panic)
+{
+	void *ptr = memblock_alloc_try_nid_raw(size, size,
+			__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+
+	if (!ptr && should_panic)
+		panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
+		      (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
+
+	return ptr;
+}
+
 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
 				      unsigned long end, int nid)
 {
@@ -63,8 +75,9 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
 		if (!pte_none(*pte))
 			continue;
 
-		p = early_alloc(PAGE_SIZE, nid, true);
-		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
+		p = early_raw_alloc(PAGE_SIZE, nid, true);
+		memset(p, PAGE_SIZE, KASAN_SHADOW_INIT);
+		entry = pfn_pte(PFN_DOWN(__pa_nodebug(p)), PAGE_KERNEL);
 		set_pte_at(&init_mm, addr, pte, entry);
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
@@ -436,7 +449,7 @@ void __init kasan_init(void)
 	 * it may contain some garbage. Now we can clear and write protect it,
 	 * since after the TLB flush no one should write to it.
 	 */
-	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
 	for (i = 0; i < PTRS_PER_PTE; i++) {
 		pte_t pte;
 		pgprot_t prot;
-- 
2.51.0
Powered by blists - more mailing lists
 
