[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7BD9FB05-4125-4EA8-841D-9D08907D01D5@nvidia.com>
Date: Mon, 02 Jun 2025 12:59:56 -0400
From: Zi Yan <ziy@...dia.com>
To: David Hildenbrand <david@...hat.com>,
Johannes Weiner <hannes@...xchg.org>, Vlastimil Babka <vbabka@...e.cz>,
<linux-mm@...ck.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Oscar Salvador <osalvador@...e.de>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Brendan Jackman <jackmanb@...gle.com>, Richard Chang <richardycc@...gle.com>,
<linux-kernel@...r.kernel.org>, Zi Yan <ziy@...dia.com>
Subject: Re: [PATCH v7 6/6] mm/page_isolation: remove migratetype parameter
from more functions.
On 2 Jun 2025, at 11:18, Zi Yan wrote:
> migratetype is no longer overwritten during pageblock isolation,
> start_isolate_page_range(), has_unmovable_pages(), and
> set_migratetype_isolate() no longer need which migratetype to restore
> during isolation failure.
>
> For has_unmoable_pages(), it needs to know if the isolation is for CMA
> allocation, so adding PB_ISOLATE_MODE_CMA_ALLOC provide the information.
> At the same time change isolation flags to enum pb_isolate_mode
> (PB_ISOLATE_MODE_MEM_OFFLINE, PB_ISOLATE_MODE_CMA_ALLOC,
> PB_ISOLATE_MODE_OTHER). Remove REPORT_FAILURE and check
> PB_ISOLATE_MODE_MEM_OFFLINE, since only PB_ISOLATE_MODE_MEM_OFFLINE
> reports isolation failures.
>
> alloc_contig_range() no longer needs migratetype. Replace it with
> PB_ISOLATE_MODE_CMA_ALLOC to tell if an allocation is for CMA. So does
> __alloc_contig_migrate_range().
This paragraph should be changed to:
alloc_contig_range() no longer needs migratetype. Replace it with
a newly defined acr_flags_t to tell if an allocation is for CMA. So does
__alloc_contig_migrate_range(). Add ACR_OTHER (set to 0) to indicate
other cases.
>
> Signed-off-by: Zi Yan <ziy@...dia.com>
> Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> drivers/virtio/virtio_mem.c | 4 +-
> include/linux/gfp.h | 18 ++++++++-
> include/linux/page-isolation.h | 7 +---
> include/trace/events/kmem.h | 14 ++++---
> mm/cma.c | 3 +-
> mm/memory_hotplug.c | 6 +--
> mm/page_alloc.c | 27 ++++++-------
> mm/page_isolation.c | 70 +++++++++++++++-------------------
> 8 files changed, 79 insertions(+), 70 deletions(-)
>
The fixup to restore acr_flags_t:
From d0205580ab70aaf93f3f7c04b53dc595ee387bac Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@...dia.com>
Date: Mon, 2 Jun 2025 12:53:58 -0400
Subject: [PATCH] restore acr_flags_t.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
drivers/virtio/virtio_mem.c | 4 ++--
include/linux/gfp.h | 21 +++++----------------
include/linux/page-isolation.h | 15 +++++++++++++++
include/trace/events/kmem.h | 12 ++++++------
mm/cma.c | 3 +--
mm/page_alloc.c | 24 ++++++++++++------------
6 files changed, 41 insertions(+), 38 deletions(-)
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 535680a54ff5..6bce70b139b2 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1243,8 +1243,8 @@ static int virtio_mem_fake_offline(struct virtio_mem *vm, unsigned long pfn,
if (atomic_read(&vm->config_changed))
return -EAGAIN;
- rc = alloc_contig_range(pfn, pfn + nr_pages,
- PB_ISOLATE_MODE_OTHER, GFP_KERNEL);
+ rc = alloc_contig_range(pfn, pfn + nr_pages, ACR_OTHER,
+ GFP_KERNEL);
if (rc == -ENOMEM)
/* whoops, out of memory */
return rc;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 17b92888d6de..95065cec85e5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -423,25 +423,14 @@ static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
-/*
- * Pageblock isolation modes:
- * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
- * e.g., skip over PageHWPoison() pages and
- * PageOffline() pages. Unmovable pages will be
- * reported in this mode.
- * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
- * PB_ISOLATE_MODE_OTHER - isolate for other purposes
- */
-enum pb_isolate_mode {
- PB_ISOLATE_MODE_MEM_OFFLINE,
- PB_ISOLATE_MODE_CMA_ALLOC,
- PB_ISOLATE_MODE_OTHER,
-};
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_OTHER ((__force acr_flags_t)0) // other allocations
+#define ACR_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- enum pb_isolate_mode isol_mode,
- gfp_t gfp_mask);
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 7ed60a339a02..3e2f960e166c 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -38,6 +38,21 @@ static inline void set_pageblock_isolate(struct page *page)
}
#endif
+/*
+ * Pageblock isolation modes:
+ * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages and
+ * PageOffline() pages. Unmovable pages will be
+ * reported in this mode.
+ * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
+ * PB_ISOLATE_MODE_OTHER - isolate for other purposes
+ */
+enum pb_isolate_mode {
+ PB_ISOLATE_MODE_MEM_OFFLINE,
+ PB_ISOLATE_MODE_CMA_ALLOC,
+ PB_ISOLATE_MODE_OTHER,
+};
+
void __meminit init_pageblock_migratetype(struct page *page,
enum migratetype migratetype,
bool isolate);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index e0bcbc43a548..efffcf578217 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -312,9 +312,9 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
unsigned long nr_migrated,
unsigned long nr_reclaimed,
unsigned long nr_mapped,
- enum pb_isolate_mode isol_mode),
+ acr_flags_t alloc_flags),
- TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, isol_mode),
+ TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, alloc_flags),
TP_STRUCT__entry(
__field(unsigned long, start)
@@ -322,7 +322,7 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
__field(unsigned long, nr_migrated)
__field(unsigned long, nr_reclaimed)
__field(unsigned long, nr_mapped)
- __field(enum pb_isolate_mode, isol_mode)
+ __field(acr_flags_t, alloc_flags)
),
TP_fast_assign(
@@ -331,13 +331,13 @@ TRACE_EVENT(mm_alloc_contig_migrate_range_info,
__entry->nr_migrated = nr_migrated;
__entry->nr_reclaimed = nr_reclaimed;
__entry->nr_mapped = nr_mapped;
- __entry->isol_mode = isol_mode;
+ __entry->alloc_flags = alloc_flags;
),
- TP_printk("start=0x%lx end=0x%lx isol_mode=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
+ TP_printk("start=0x%lx end=0x%lx alloc_flags=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
__entry->start,
__entry->end,
- __entry->isol_mode,
+ __entry->alloc_flags,
__entry->nr_migrated,
__entry->nr_reclaimed,
__entry->nr_mapped)
diff --git a/mm/cma.c b/mm/cma.c
index 23aa35193122..9ee8fad797bc 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -822,8 +822,7 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma->alloc_mutex);
- ret = alloc_contig_range(pfn, pfn + count,
- PB_ISOLATE_MODE_CMA_ALLOC, gfp);
+ ret = alloc_contig_range(pfn, pfn + count, ACR_CMA, gfp);
mutex_unlock(&cma->alloc_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 02a0f5621d10..c12442fdb579 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6695,12 +6695,12 @@ static void alloc_contig_dump_pages(struct list_head *page_list)
/*
* [start, end) must belong to a single zone.
- * @isol_mode: using pb_isolate_mode filter the type of migration in
+ * @alloc_flags: using acr_flags_t to filter the type of migration in
* trace_mm_alloc_contig_migrate_range_info.
*/
static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned long start, unsigned long end,
- enum pb_isolate_mode isol_mode)
+ acr_flags_t alloc_flags)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
@@ -6772,7 +6772,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
putback_movable_pages(&cc->migratepages);
}
- trace_mm_alloc_contig_migrate_range_info(start, end, isol_mode,
+ trace_mm_alloc_contig_migrate_range_info(start, end, alloc_flags,
total_migrated,
total_reclaimed,
total_mapped);
@@ -6843,7 +6843,7 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
* @end: one-past-the-last PFN to allocate
- * @isol_mode: allocation information used for pageblock isolation
+ * @alloc_flags: allocation information
* @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
* action and reclaim modifiers are supported. Reclaim modifiers
* control allocation behavior during compaction/migration/reclaim.
@@ -6860,7 +6860,7 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
* need to be freed with free_contig_range().
*/
int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- enum pb_isolate_mode isol_mode, gfp_t gfp_mask)
+ acr_flags_t alloc_flags, gfp_t gfp_mask)
{
unsigned long outer_start, outer_end;
int ret = 0;
@@ -6875,9 +6875,9 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
.alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
-
- if (isol_mode == PB_ISOLATE_MODE_MEM_OFFLINE)
- return -EINVAL;
+ enum pb_isolate_mode mode = (alloc_flags & ACR_CMA) ?
+ PB_ISOLATE_MODE_CMA_ALLOC :
+ PB_ISOLATE_MODE_OTHER;
gfp_mask = current_gfp_context(gfp_mask);
if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
@@ -6904,7 +6904,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
* put back to page allocator so that buddy can use them.
*/
- ret = start_isolate_page_range(start, end, isol_mode);
+ ret = start_isolate_page_range(start, end, mode);
if (ret)
goto done;
@@ -6920,7 +6920,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
* allocated. So, if we fall through be sure to clear ret so that
* -EBUSY is not accidentally used or returned to caller.
*/
- ret = __alloc_contig_migrate_range(&cc, start, end, isol_mode);
+ ret = __alloc_contig_migrate_range(&cc, start, end, alloc_flags);
if (ret && ret != -EBUSY)
goto done;
@@ -6954,7 +6954,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
outer_start = find_large_buddy(start);
/* Make sure the range is really isolated. */
- if (test_pages_isolated(outer_start, end, isol_mode)) {
+ if (test_pages_isolated(outer_start, end, mode)) {
ret = -EBUSY;
goto done;
}
@@ -6997,7 +6997,7 @@ static int __alloc_contig_pages(unsigned long start_pfn,
{
unsigned long end_pfn = start_pfn + nr_pages;
- return alloc_contig_range_noprof(start_pfn, end_pfn, PB_ISOLATE_MODE_OTHER,
+ return alloc_contig_range_noprof(start_pfn, end_pfn, ACR_OTHER,
gfp_mask);
}
--
2.47.2
Best Regards,
Yan, Zi
Powered by blists - more mailing lists