[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zsi8Byjo4ayJORgS@pc638.lan>
Date: Fri, 23 Aug 2024 18:42:47 +0200
From: Uladzislau Rezki <urezki@...il.com>
To: Michal Hocko <mhocko@...e.com>
Cc: Hailong Liu <hailong.liu@...o.com>, Uladzislau Rezki <urezki@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Barry Song <21cnbao@...il.com>,
Christoph Hellwig <hch@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Tangquan Zheng <zhengtangquan@...o.com>, stable@...r.kernel.org,
Baoquan He <bhe@...hat.com>, Matthew Wilcox <willy@...radead.org>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [RESEND PATCH v1] mm/vmalloc: fix page mapping if
vm_area_alloc_pages() with high order fallback to order 0
Hello, Michal.
>
> Let me clarify what I would like to have clarified:
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6b783baf12a1..fea90a39f5c5 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3510,13 +3510,13 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
> EXPORT_SYMBOL_GPL(vmap_pfn);
> #endif /* CONFIG_VMAP_PFN */
>
> +/* GFP_NOFAIL semantic is implemented by __vmalloc_node_range_noprof */
> static inline unsigned int
> vm_area_alloc_pages(gfp_t gfp, int nid,
> unsigned int order, unsigned int nr_pages, struct page **pages)
> {
> unsigned int nr_allocated = 0;
> - gfp_t alloc_gfp = gfp;
> - bool nofail = gfp & __GFP_NOFAIL;
> + gfp_t alloc_gfp = gfp & ~ __GFP_NOFAIL;
> struct page *page;
> int i;
>
> @@ -3527,9 +3527,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> * more permissive.
> */
> if (!order) {
> - /* bulk allocator doesn't support nofail req. officially */
> - gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
> -
> while (nr_allocated < nr_pages) {
> unsigned int nr, nr_pages_request;
>
> @@ -3547,12 +3544,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> * but mempolicy wants to alloc memory by interleaving.
> */
> if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
> - nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
> + nr = alloc_pages_bulk_array_mempolicy_noprof(alloc_gfp,
> nr_pages_request,
> pages + nr_allocated);
>
> else
> - nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
> + nr = alloc_pages_bulk_array_node_noprof(alloc_gfp, nid,
> nr_pages_request,
> pages + nr_allocated);
>
> @@ -3566,13 +3563,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> if (nr != nr_pages_request)
> break;
> }
> - } else if (gfp & __GFP_NOFAIL) {
> - /*
> - * Higher order nofail allocations are really expensive and
> - * potentially dangerous (pre-mature OOM, disruptive reclaim
> - * and compaction etc.
> - */
> - alloc_gfp &= ~__GFP_NOFAIL;
> }
>
> /* High-order pages or fallback path if "bulk" fails. */
> --
>
See below the change. It does not do any functional change and it is rather
a small refactoring, which includes the comment i wanted to add and what you
wanted to be clarified(if i got you correctly):
<snip>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3f9b6bd707d2..24fad2e48799 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3531,8 +3531,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
unsigned int order, unsigned int nr_pages, struct page **pages)
{
unsigned int nr_allocated = 0;
- gfp_t alloc_gfp = gfp;
- bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
@@ -3543,9 +3541,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* more permissive.
*/
if (!order) {
- /* bulk allocator doesn't support nofail req. officially */
- gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
-
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;
@@ -3563,12 +3558,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
- nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
+ nr = alloc_pages_bulk_array_mempolicy_noprof(gfp & ~__GFP_NOFAIL,
nr_pages_request,
pages + nr_allocated);
-
else
- nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
+ /* bulk allocator doesn't support nofail req. officially */
+ nr = alloc_pages_bulk_array_node_noprof(gfp & ~__GFP_NOFAIL, nid,
nr_pages_request,
pages + nr_allocated);
@@ -3582,24 +3577,18 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
if (nr != nr_pages_request)
break;
}
- } else if (gfp & __GFP_NOFAIL) {
- /*
- * Higher order nofail allocations are really expensive and
- * potentially dangerous (pre-mature OOM, disruptive reclaim
- * and compaction etc.
- */
- alloc_gfp &= ~__GFP_NOFAIL;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
- if (!nofail && fatal_signal_pending(current))
+ if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
- page = alloc_pages_noprof(alloc_gfp, order);
+ page = alloc_pages_noprof(gfp, order);
else
- page = alloc_pages_node_noprof(nid, alloc_gfp, order);
+ page = alloc_pages_node_noprof(nid, gfp, order);
+
if (unlikely(!page))
break;
@@ -3666,7 +3655,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
page_order = vm_area_page_order(area);
- area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
+ /*
+ * Higher order nofail allocations are really expensive and
+ * potentially dangerous (pre-mature OOM, disruptive reclaim
+ * and compaction etc.
+ *
+ * Please note, the __vmalloc_node_range_noprof() falls-back
+ * to order-0 pages if high-order attempt has been unsuccessful.
+ */
+ area->nr_pages = vm_area_alloc_pages(page_order ?
+ gfp_mask &= ~__GFP_NOFAIL : gfp_mask | __GFP_NOWARN,
node, page_order, nr_small_pages, area->pages);
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
<snip>
Is that aligned with your wish?
Thanks!
--
Uladzislau Rezki
Powered by blists - more mailing lists