[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aLQ8J2vuYi2POPsE@pc636>
Date: Sun, 31 Aug 2025 14:12:23 +0200
From: Uladzislau Rezki <urezki@...il.com>
To: Andrey Ryabinin <ryabinin.a.a@...il.com>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
Michal Hocko <mhocko@...nel.org>, Baoquan He <bhe@...hat.com>,
LKML <linux-kernel@...r.kernel.org>, stable@...r.kernel.org
Subject: Re: [PATCH] mm/vmalloc, mm/kasan: respect gfp mask in
kasan_populate_vmalloc()
On Sun, Aug 31, 2025 at 02:10:58PM +0200, Uladzislau Rezki (Sony) wrote:
> kasan_populate_vmalloc() and its helpers ignore the caller's gfp_mask
> and always allocate memory using the hardcoded GFP_KERNEL flag. This
> makes them inconsistent with vmalloc(), which was recently extended to
> support GFP_NOFS and GFP_NOIO allocations.
>
> Page table allocations performed during shadow population also ignore
> the external gfp_mask. To preserve the intended semantics of GFP_NOFS
> and GFP_NOIO, wrap the apply_to_page_range() calls into the appropriate
> memalloc scope.
>
> This patch:
> - Extends kasan_populate_vmalloc() and helpers to take gfp_mask;
> - Passes gfp_mask down to alloc_pages_bulk() and __get_free_page();
> - Enforces GFP_NOFS/NOIO semantics with memalloc_*_save()/restore()
> around apply_to_page_range();
> - Updates vmalloc.c and percpu allocator call sites accordingly.
>
> To: Andrey Ryabinin <ryabinin.a.a@...il.com>
> Cc: <stable@...r.kernel.org>
> Fixes: 451769ebb7e7 ("mm/vmalloc: alloc GFP_NO{FS,IO} for vmalloc")
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
> ---
> include/linux/kasan.h | 6 +++---
> mm/kasan/shadow.c | 31 ++++++++++++++++++++++++-------
> mm/vmalloc.c | 8 ++++----
> 3 files changed, 31 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 890011071f2b..fe5ce9215821 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
> #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
>
> void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
> +int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
> void kasan_release_vmalloc(unsigned long start, unsigned long end,
> unsigned long free_region_start,
> unsigned long free_region_end,
> @@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
> unsigned long size)
> { }
> static inline int kasan_populate_vmalloc(unsigned long start,
> - unsigned long size)
> + unsigned long size, gfp_t gfp_mask)
> {
> return 0;
> }
> @@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
> static inline void kasan_populate_early_vm_area_shadow(void *start,
> unsigned long size) { }
> static inline int kasan_populate_vmalloc(unsigned long start,
> - unsigned long size)
> + unsigned long size, gfp_t gfp_mask)
> {
> return 0;
> }
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index d2c70cd2afb1..c7c0be119173 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
> }
> }
>
> -static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> +static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
> {
> unsigned long nr_populated, nr_total = nr_pages;
> struct page **page_array = pages;
>
> while (nr_pages) {
> - nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
> + nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
> if (!nr_populated) {
> ___free_pages_bulk(page_array, nr_total - nr_pages);
> return -ENOMEM;
> @@ -353,25 +353,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> return 0;
> }
>
> -static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
> {
> unsigned long nr_pages, nr_total = PFN_UP(end - start);
> struct vmalloc_populate_data data;
> + unsigned int flags;
> int ret = 0;
>
> - data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> + data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
> if (!data.pages)
> return -ENOMEM;
>
> while (nr_total) {
> nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
> - ret = ___alloc_pages_bulk(data.pages, nr_pages);
> + ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
> if (ret)
> break;
>
> data.start = start;
> +
> + /*
> + * page tables allocations ignore external gfp mask, enforce it
> + * by the scope API
> + */
> + if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> + flags = memalloc_nofs_save();
> + else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> + flags = memalloc_noio_save();
> +
> ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
> kasan_populate_vmalloc_pte, &data);
> +
> + if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> + memalloc_nofs_restore(flags);
> + else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> + memalloc_noio_restore(flags);
> +
> ___free_pages_bulk(data.pages, nr_pages);
> if (ret)
> break;
> @@ -385,7 +402,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> return ret;
> }
>
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
> +int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
> {
> unsigned long shadow_start, shadow_end;
> int ret;
> @@ -414,7 +431,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
> shadow_start = PAGE_ALIGN_DOWN(shadow_start);
> shadow_end = PAGE_ALIGN(shadow_end);
>
> - ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
> + ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
> if (ret)
> return ret;
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6dbcdceecae1..5edd536ba9d2 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> if (unlikely(!vmap_initialized))
> return ERR_PTR(-EBUSY);
>
> + /* Only reclaim behaviour flags are relevant. */
> + gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
> might_sleep();
>
> /*
> @@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> */
> va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
> if (!va) {
> - gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
> -
> va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
> if (unlikely(!va))
> return ERR_PTR(-ENOMEM);
> @@ -2089,7 +2089,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> BUG_ON(va->va_start < vstart);
> BUG_ON(va->va_end > vend);
>
> - ret = kasan_populate_vmalloc(addr, size);
> + ret = kasan_populate_vmalloc(addr, size, gfp_mask);
> if (ret) {
> free_vmap_area(va);
> return ERR_PTR(ret);
> @@ -4826,7 +4826,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>
> /* populate the kasan shadow space */
> for (area = 0; area < nr_vms; area++) {
> - if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
> + if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
> goto err_free_shadow;
> }
>
> --
> 2.47.2
>
+ Andrey Ryabinin <ryabinin.a.a@...il.com>
--
Uladzislau Rezki
Powered by blists - more mailing lists