[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+fCnZfKQwNWbYEhk70ykT1+cnibCBnvZJrhAMvu_b0Y8xZTSg@mail.gmail.com>
Date: Thu, 6 Feb 2025 00:45:49 +0100
From: Andrey Konovalov <andreyknvl@...il.com>
To: Maciej Wieczor-Retman <maciej.wieczor-retman@...el.com>
Cc: luto@...nel.org, xin@...or.com, kirill.shutemov@...ux.intel.com,
palmer@...belt.com, tj@...nel.org, brgerst@...il.com, ardb@...nel.org,
dave.hansen@...ux.intel.com, jgross@...e.com, will@...nel.org,
akpm@...ux-foundation.org, arnd@...db.de, corbet@....net, dvyukov@...gle.com,
richard.weiyang@...il.com, ytcoode@...il.com, tglx@...utronix.de,
hpa@...or.com, seanjc@...gle.com, paul.walmsley@...ive.com,
aou@...s.berkeley.edu, justinstitt@...gle.com, jason.andryuk@....com,
glider@...gle.com, ubizjak@...il.com, jannh@...gle.com, bhe@...hat.com,
vincenzo.frascino@....com, rafael.j.wysocki@...el.com,
ndesaulniers@...gle.com, mingo@...hat.com, catalin.marinas@....com,
junichi.nomura@....com, nathan@...nel.org, ryabinin.a.a@...il.com,
dennis@...nel.org, bp@...en8.de, kevinloughlin@...gle.com, morbo@...gle.com,
dan.j.williams@...el.com, julian.stecklina@...erus-technology.de,
peterz@...radead.org, cl@...ux.com, kees@...nel.org,
kasan-dev@...glegroups.com, x86@...nel.org,
linux-arm-kernel@...ts.infradead.org, linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org, llvm@...ts.linux.dev,
linux-doc@...r.kernel.org
Subject: Re: [PATCH 10/15] x86: KASAN raw shadow memory PTE init
On Tue, Feb 4, 2025 at 6:36 PM Maciej Wieczor-Retman
<maciej.wieczor-retman@...el.com> wrote:
>
> In KASAN's generic mode the default value in shadow memory is zero.
> During initialization of shadow memory pages they are allocated and
> zeroed.
>
> In KASAN's tag-based mode the default tag for the arm64 architecture is
> 0xFE which corresponds to any memory that should not be accessed. On x86
> (where tags are 4-bit wide instead of 8-bit wide) that tag is 0xE so
> during the initializations all the bytes in shadow memory pages should
> be filled with 0xE or 0xEE if two tags should be packed in one shadow
> byte.
>
> Use memblock_alloc_try_nid_raw() instead of memblock_alloc_try_nid() to
> avoid zeroing out the memory so it can be set with the KASAN invalid
> tag.
>
> Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@...el.com>
> ---
> arch/x86/mm/kasan_init_64.c | 19 ++++++++++++++++---
> include/linux/kasan.h | 25 +++++++++++++++++++++++++
> mm/kasan/kasan.h | 19 -------------------
> 3 files changed, 41 insertions(+), 22 deletions(-)
>
> diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
> index 9dddf19a5571..55d468d83682 100644
> --- a/arch/x86/mm/kasan_init_64.c
> +++ b/arch/x86/mm/kasan_init_64.c
> @@ -35,6 +35,18 @@ static __init void *early_alloc(size_t size, int nid, bool should_panic)
> return ptr;
> }
>
> +static __init void *early_raw_alloc(size_t size, int nid, bool should_panic)
> +{
> + void *ptr = memblock_alloc_try_nid_raw(size, size,
> + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
> +
> + if (!ptr && should_panic)
> + panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
> + (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
> +
> + return ptr;
> +}
> +
> static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
> unsigned long end, int nid)
> {
> @@ -64,8 +76,9 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
> if (!pte_none(*pte))
> continue;
>
> - p = early_alloc(PAGE_SIZE, nid, true);
> - entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
> + p = early_raw_alloc(PAGE_SIZE, nid, true);
> + memset(p, PAGE_SIZE, kasan_dense_tag(KASAN_SHADOW_INIT));
> + entry = pfn_pte(PFN_DOWN(__pa_nodebug(p)), PAGE_KERNEL);
> set_pte_at(&init_mm, addr, pte, entry);
> } while (pte++, addr += PAGE_SIZE, addr != end);
> }
> @@ -437,7 +450,7 @@ void __init kasan_init(void)
> * it may contain some garbage. Now we can clear and write protect it,
> * since after the TLB flush no one should write to it.
> */
> - memset(kasan_early_shadow_page, 0, PAGE_SIZE);
> + kasan_poison(kasan_early_shadow_page, PAGE_SIZE, KASAN_SHADOW_INIT, false);
> for (i = 0; i < PTRS_PER_PTE; i++) {
> pte_t pte;
> pgprot_t prot;
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 83146367170a..af8272c74409 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -151,6 +151,31 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
> __kasan_unpoison_range(addr, size);
> }
>
> +#ifdef CONFIG_KASAN_HW_TAGS
> +
> +static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
> +{
> + if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
> + return;
> + if (WARN_ON(size & KASAN_GRANULE_MASK))
> + return;
> +
> + hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
> +}
> +
> +#else /* CONFIG_KASAN_HW_TAGS */
> +
> +/**
> + * kasan_poison - mark the memory range as inaccessible
> + * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> + * @size - range size, must be aligned to KASAN_GRANULE_SIZE
> + * @value - value that's written to metadata for the range
> + * @init - whether to initialize the memory range (only for hardware tag-based)
> + */
> +void kasan_poison(const void *addr, size_t size, u8 value, bool init);
> +
> +#endif /* CONFIG_KASAN_HW_TAGS */
Please keep kasan_poison() and kasan_unpoison() in mm/kasan/kasan.h:
these are intended as internal-only functions (perhaps, we should add
this into the comment). Instead, add a purpose-specific wrapper
similar to the ones in include/linux/kasan.h.
> +
> void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
> static __always_inline void kasan_poison_pages(struct page *page,
> unsigned int order, bool init)
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index a56aadd51485..2405477c5899 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -466,16 +466,6 @@ static inline u8 kasan_random_tag(void) { return 0; }
>
> #ifdef CONFIG_KASAN_HW_TAGS
>
> -static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
> -{
> - if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
> - return;
> - if (WARN_ON(size & KASAN_GRANULE_MASK))
> - return;
> -
> - hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
> -}
> -
> static inline void kasan_unpoison(const void *addr, size_t size, bool init)
> {
> u8 tag = get_tag(addr);
> @@ -497,15 +487,6 @@ static inline bool kasan_byte_accessible(const void *addr)
>
> #else /* CONFIG_KASAN_HW_TAGS */
>
> -/**
> - * kasan_poison - mark the memory range as inaccessible
> - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> - * @size - range size, must be aligned to KASAN_GRANULE_SIZE
> - * @value - value that's written to metadata for the range
> - * @init - whether to initialize the memory range (only for hardware tag-based)
> - */
> -void kasan_poison(const void *addr, size_t size, u8 value, bool init);
> -
> /**
> * kasan_unpoison - mark the memory range as accessible
> * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> --
> 2.47.1
>
Powered by blists - more mailing lists