[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240306041526.892167-9-hannes@cmpxchg.org>
Date: Tue, 5 Mar 2024 23:08:39 -0500
From: Johannes Weiner <hannes@...xchg.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Vlastimil Babka <vbabka@...e.cz>,
Mel Gorman <mgorman@...hsingularity.net>,
Zi Yan <ziy@...dia.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
"Huang, Ying" <ying.huang@...el.com>,
David Hildenbrand <david@...hat.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 08/10] mm: page_alloc: set migratetype inside move_freepages()
From: Zi Yan <ziy@...dia.com>
This avoids changing migratetype after move_freepages() or
move_freepages_block(), which is error prone. It also prepares for
upcoming changes to fix move_freepages() not moving free pages
partially in the range.
Signed-off-by: Zi Yan <ziy@...dia.com>
Signed-off-by: Johannes Weiner <hannes@...xchg.org>
---
mm/page_alloc.c | 27 +++++++++++++--------------
mm/page_isolation.c | 7 +++----
2 files changed, 16 insertions(+), 18 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 82e6c4068647..a057b82c4f1d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1581,9 +1581,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
#endif
/*
- * Move the free pages in a range to the freelist tail of the requested type.
- * Note that start_page and end_pages are not aligned on a pageblock
- * boundary. If alignment is required, use move_freepages_block()
+ * Change the type of a block and move all its free pages to that
+ * type's freelist.
*/
static int move_freepages(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn, int migratetype)
@@ -1593,6 +1592,9 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
unsigned int order;
int pages_moved = 0;
+ VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
+ VM_WARN_ON(start_pfn + pageblock_nr_pages - 1 != end_pfn);
+
for (pfn = start_pfn; pfn <= end_pfn;) {
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
@@ -1610,6 +1612,8 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
pages_moved += 1 << order;
}
+ set_pageblock_migratetype(pfn_to_page(start_pfn), migratetype);
+
return pages_moved;
}
@@ -1837,7 +1841,6 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
move_freepages(zone, start_pfn, end_pfn, start_type);
- set_pageblock_migratetype(page, start_type);
return __rmqueue_smallest(zone, order, start_type);
}
@@ -1911,12 +1914,10 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
/* Yoink! */
mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */
- if (migratetype_is_mergeable(mt)) {
- if (move_freepages_block(zone, page, MIGRATE_HIGHATOMIC) != -1) {
- set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
+ if (migratetype_is_mergeable(mt))
+ if (move_freepages_block(zone, page,
+ MIGRATE_HIGHATOMIC) != -1)
zone->nr_reserved_highatomic += pageblock_nr_pages;
- }
- }
out_unlock:
spin_unlock_irqrestore(&zone->lock, flags);
@@ -1995,7 +1996,6 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* not fail on zone boundaries.
*/
WARN_ON_ONCE(ret == -1);
- set_pageblock_migratetype(page, ac->migratetype);
if (ret > 0) {
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
@@ -2698,10 +2698,9 @@ int __isolate_free_page(struct page *page, unsigned int order)
* Only change normal pageblocks (i.e., they can merge
* with others)
*/
- if (migratetype_is_mergeable(mt) &&
- move_freepages_block(zone, page,
- MIGRATE_MOVABLE) != -1)
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ if (migratetype_is_mergeable(mt))
+ move_freepages_block(zone, page,
+ MIGRATE_MOVABLE);
}
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 71539d7b96cf..f84f0981b2df 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -188,7 +188,6 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
return -EBUSY;
}
__mod_zone_freepage_state(zone, -nr_pages, mt);
- set_pageblock_migratetype(page, MIGRATE_ISOLATE);
zone->nr_isolate_pageblock++;
spin_unlock_irqrestore(&zone->lock, flags);
return 0;
@@ -262,10 +261,10 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
*/
WARN_ON_ONCE(nr_pages == -1);
__mod_zone_freepage_state(zone, nr_pages, migratetype);
- }
- set_pageblock_migratetype(page, migratetype);
- if (isolated_page)
+ } else {
+ set_pageblock_migratetype(page, migratetype);
__putback_isolated_page(page, order, migratetype);
+ }
zone->nr_isolate_pageblock--;
out:
spin_unlock_irqrestore(&zone->lock, flags);
--
2.44.0
Powered by blists - more mailing lists