[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <970A26A3-6E75-4474-997B-67B9547E15AC@nvidia.com>
Date: Thu, 08 May 2025 21:56:40 -0400
From: Zi Yan <ziy@...dia.com>
To: Johannes Weiner <hannes@...xchg.org>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...hat.com>, <linux-mm@...ck.org>,
Oscar Salvador <osalvador@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
Brendan Jackman <jackmanb@...gle.com>, Richard Chang <richardycc@...gle.com>,
<linux-kernel@...r.kernel.org>, Zi Yan <ziy@...dia.com>
Subject: Re: [PATCH v3 4/4] mm/page_isolation: remove migratetype parameter
from more functions.
On 8 May 2025, at 16:25, Zi Yan wrote:
> On 7 May 2025, at 17:10, Zi Yan wrote:
>
>> migratetype is no longer overwritten during pageblock isolation,
>> start_isolate_page_range(), has_unmovable_pages(), and
>> set_migratetype_isolate() no longer need which migratetype to restore
>> during isolation failure.
>>
>> For has_unmoable_pages(), it needs to know if the isolation is for CMA
>> allocation, so adding CMA_ALLOCATION to isolation flags to provide the
>> information.
>>
>> alloc_contig_range() no longer needs migratetype. Replace it with
>> a newly defined acr_flags_t to tell if an allocation is for CMA. So does
>> __alloc_contig_migrate_range().
>>
>> Signed-off-by: Zi Yan <ziy@...dia.com>
>> ---
>> drivers/virtio/virtio_mem.c | 3 +--
>> include/linux/gfp.h | 6 +++++-
>> include/linux/page-isolation.h | 15 +++++++++++---
>> include/trace/events/kmem.h | 14 +++++++------
>> mm/cma.c | 2 +-
>> mm/memory_hotplug.c | 1 -
>> mm/page_alloc.c | 22 ++++++++++-----------
>> mm/page_isolation.c | 36 ++++++++++++----------------------
>> 8 files changed, 50 insertions(+), 49 deletions(-)
>
> Here is the fixup 3/3 to address the type issue reported by kernel test robot.
>
> From 3c439f1f09b03c8362b43c0ac05e5f174f1a6655 Mon Sep 17 00:00:00 2001
> From: Zi Yan <ziy@...dia.com>
> Date: Thu, 8 May 2025 15:42:18 -0400
> Subject: [PATCH] fixup for mm/page_isolation: remove migratetype parameter
> from more functions.
>
> 1. fixed test_pages_isolated() and __test_page_isolated_in_pageblock()
> signature by using the new isol_flags_t type.
> 2. fixed test_pages_isolated() doc: flags -> isol_flags
>
> Signed-off-by: Zi Yan <ziy@...dia.com>
> ---
> include/linux/page-isolation.h | 2 +-
> mm/page_isolation.c | 6 +++---
> 2 files changed, 4 insertions(+), 4 deletions(-)
This is the second round of fixup 1/1 to address Johannes' comment on Patch 4.
From 760c00e808c74d62e8d879f281f38d6608c89296 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@...dia.com>
Date: Thu, 8 May 2025 20:54:40 -0400
Subject: [PATCH] fixup for fixup for mm/page_isolation: remove migratetype
parameter from more functions.
1. change MEMORY_OFFLINE and CMA_ALLOCATION to isolate_mode_t enums.
2. rename isol_flags_t to isolate_flags_t.
2. REPORT_FAILURE becomes the only isolate_flags_t.
Signed-off-by: Zi Yan <ziy@...dia.com>
---
include/linux/page-isolation.h | 26 +++++++++++++++++---------
mm/memory_hotplug.c | 2 +-
mm/page_alloc.c | 3 ++-
mm/page_isolation.c | 31 ++++++++++++++++++-------------
4 files changed, 38 insertions(+), 24 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 20c3f98b5afb..29b4ddcaea7a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -22,17 +22,25 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
+/*
+ * Isolation modes:
+ * ISOLATE_MODE_NONE - isolate for other purposes than those below
+ * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
+ * PageHWPoison() pages and PageOffline() pages.
+ * CMA_ALLOCATION - isolate for CMA allocations
+ */
+enum isolate_mode_t {
+ ISOLATE_MODE_NONE,
+ MEMORY_OFFLINE,
+ CMA_ALLOCATION,
+};
+
/*
* Isolation flags:
- * MEMORY_OFFLINE - isolate to offline (!allocate) memory e.g., skip over
- * PageHWPoison() pages and PageOffline() pages.
* REPORT_FAILURE - report details about the failure to isolate the range
- * CMA_ALLOCATION - isolate for CMA allocations
*/
-typedef unsigned int __bitwise isol_flags_t;
-#define MEMORY_OFFLINE ((__force isol_flags_t)BIT(0))
-#define REPORT_FAILURE ((__force isol_flags_t)BIT(1))
-#define CMA_ALLOCATION ((__force isol_flags_t)BIT(2))
+typedef unsigned int __bitwise isolate_flags_t;
+#define REPORT_FAILURE ((__force isolate_flags_t)BIT(0))
void set_pageblock_migratetype(struct page *page, int migratetype);
@@ -40,10 +48,10 @@ bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t flags);
+ isolate_mode_t mode, isolate_flags_t flags);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t isol_flags);
+ isolate_flags_t isol_flags);
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 155f0b4ff299..3dab006a537e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -2005,7 +2005,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
- MEMORY_OFFLINE | REPORT_FAILURE);
+ MEMORY_OFFLINE, REPORT_FAILURE);
if (ret) {
reason = "failure to isolate range";
goto failed_removal_pcplists_disabled;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 51d66f86b93d..3f208f8656f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6787,7 +6787,8 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
*/
ret = start_isolate_page_range(start, end,
- (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : 0);
+ (alloc_flags & ACR_CMA) ? CMA_ALLOCATION : ISOLATE_MODE_NONE,
+ 0);
if (ret)
goto done;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 5f00d7113766..fd4818862654 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -48,7 +48,7 @@ static inline void set_pageblock_isolate(struct page *page)
*
*/
static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t flags)
+ isolate_mode_t mode, isolate_flags_t flags)
{
struct page *page = pfn_to_page(start_pfn);
struct zone *zone = page_zone(page);
@@ -63,7 +63,7 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
* isolate CMA pageblocks even when they are not movable in fact
* so consider them movable here.
*/
- if (flags & CMA_ALLOCATION)
+ if (mode == CMA_ALLOCATION)
return NULL;
return page;
@@ -168,8 +168,9 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
* present in [start_pfn, end_pfn). The pageblock must intersect with
* [start_pfn, end_pfn).
*/
-static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
- unsigned long start_pfn, unsigned long end_pfn)
+static int set_migratetype_isolate(struct page *page, isolate_mode_t mode,
+ isolate_flags_t isol_flags, unsigned long start_pfn,
+ unsigned long end_pfn)
{
struct zone *zone = page_zone(page);
struct page *unmovable;
@@ -203,7 +204,7 @@ static int set_migratetype_isolate(struct page *page, isol_flags_t isol_flags,
end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
- isol_flags);
+ mode, isol_flags);
if (!unmovable) {
if (!pageblock_isolate_and_move_free_pages(zone, page)) {
spin_unlock_irqrestore(&zone->lock, flags);
@@ -309,6 +310,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* isolate_single_pageblock() -- tries to isolate a pageblock that might be
* within a free or in-use page.
* @boundary_pfn: pageblock-aligned pfn that a page might cross
+ * @mode: isolation mode
* @flags: isolation flags
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
@@ -327,7 +329,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* either. The function handles this by splitting the free page or migrating
* the in-use page then splitting the free page.
*/
-static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t flags,
+static int isolate_single_pageblock(unsigned long boundary_pfn,
+ isolate_mode_t mode, isolate_flags_t flags,
bool isolate_before, bool skip_isolation)
{
unsigned long start_pfn;
@@ -357,7 +360,8 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
VM_BUG_ON(!get_pageblock_isolate(pfn_to_page(isolate_pageblock)));
} else {
ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock),
- flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages);
+ mode, flags, isolate_pageblock,
+ isolate_pageblock + pageblock_nr_pages);
if (ret)
return ret;
@@ -455,6 +459,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
* start_isolate_page_range() - mark page range MIGRATE_ISOLATE
* @start_pfn: The first PFN of the range to be isolated.
* @end_pfn: The last PFN of the range to be isolated.
+ * @mode: isolation mode
* @flags: isolation flags
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
@@ -488,7 +493,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, isol_flags_t fla
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t flags)
+ isolate_mode_t mode, isolate_flags_t flags)
{
unsigned long pfn;
struct page *page;
@@ -499,7 +504,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, false,
+ ret = isolate_single_pageblock(isolate_start, mode, flags, false,
skip_isolation);
if (ret)
return ret;
@@ -508,7 +513,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, true,
+ ret = isolate_single_pageblock(isolate_end, mode, flags, true,
skip_isolation);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start));
@@ -520,7 +525,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn < isolate_end - pageblock_nr_pages;
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
- if (page && set_migratetype_isolate(page, flags,
+ if (page && set_migratetype_isolate(page, mode, flags,
start_pfn, end_pfn)) {
undo_isolate_page_range(isolate_start, pfn);
unset_migratetype_isolate(
@@ -563,7 +568,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
*/
static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
- isol_flags_t flags)
+ isolate_flags_t flags)
{
struct page *page;
@@ -610,7 +615,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
* Returns 0 if true, -EBUSY if one or more pages are in use.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- isol_flags_t isol_flags)
+ isolate_flags_t isol_flags)
{
unsigned long pfn, flags;
struct page *page;
--
2.47.2
--
Best Regards,
Yan, Zi
Powered by blists - more mailing lists