[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0d24f6b7-0e4c-4879-87f2-e31ad988baad@gmail.com>
Date: Thu, 7 Aug 2025 18:05:21 +0200
From: Andrey Ryabinin <ryabinin.a.a@...il.com>
To: "Uladzislau Rezki (Sony)" <urezki@...il.com>, linux-mm@...ck.org,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Vlastimil Babka <vbabka@...e.cz>, Michal Hocko <mhocko@...nel.org>,
Baoquan He <bhe@...hat.com>, LKML <linux-kernel@...r.kernel.org>,
Alexander Potapenko <glider@...gle.com>
Subject: Re: [PATCH 5/8] mm/kasan, mm/vmalloc: Respect GFP flags in
kasan_populate_vmalloc()
On 8/7/25 9:58 AM, Uladzislau Rezki (Sony) wrote:
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index d2c70cd2afb1..5edfc1f6b53e 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
> }
> }
>
> -static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> +static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
> {
> unsigned long nr_populated, nr_total = nr_pages;
> struct page **page_array = pages;
>
> while (nr_pages) {
> - nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
> + nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
> if (!nr_populated) {
> ___free_pages_bulk(page_array, nr_total - nr_pages);
> return -ENOMEM;
> @@ -353,25 +353,33 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> return 0;
> }
>
> -static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
> {
> unsigned long nr_pages, nr_total = PFN_UP(end - start);
> + bool noblock = !gfpflags_allow_blocking(gfp_mask);
> struct vmalloc_populate_data data;
> + unsigned int flags;
> int ret = 0;
gfp_mask = (gfp_mask & GFP_RECLAIM_MASK);
But it might be better to do this in alloc_vmap_area().
In alloc_vmap_area() we have this:
retry:
if (IS_ERR_VALUE(addr)) {
preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
which probably needs GFP_RECLAIM_MASK too.
>
> - data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> + data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
> if (!data.pages)
> return -ENOMEM;
>
> while (nr_total) {
> nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
> - ret = ___alloc_pages_bulk(data.pages, nr_pages);
> + ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
> if (ret)
> break;
>
> data.start = start;
> + if (noblock)
> + flags = memalloc_noreclaim_save();
> +
This should be the same as in __vmalloc_area_node():
if (noblock)
flags = memalloc_noreclaim_save();
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
flags = memalloc_nofs_save();
else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
flags = memalloc_noio_save();
It would be better to fix noio/nofs stuff first with separate patch, as it's
bug and needs cc stable. And add support for noblock in follow up.
It might be a good idea to consolidate such logic in separate function,
memalloc_save(gfp_mask)/memalloc_restore(gfp_mask, flags) ?
> ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
> kasan_populate_vmalloc_pte, &data);
> + if (noblock)
> + memalloc_noreclaim_restore(flags);
> +
> ___free_pages_bulk(data.pages, nr_pages);
> if (ret)
Powered by blists - more mailing lists