[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <81b1d642-2ec0-49f5-89fc-19a3828419ff@suse.cz>
Date: Wed, 27 Mar 2024 10:30:30 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Johannes Weiner <hannes@...xchg.org>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Mel Gorman <mgorman@...hsingularity.net>, Zi Yan <ziy@...dia.com>,
"Huang, Ying" <ying.huang@...el.com>, David Hildenbrand <david@...hat.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH V4 00/10] mm: page_alloc: freelist migratetype hygiene
On 3/20/24 7:02 PM, Johannes Weiner wrote:
> V4:
> - fixed !pcp_order_allowed() case in free_unref_folios()
> - reworded the patch 0 changelog a bit for the git log
> - rebased to mm-everything-2024-03-19-23-01
> - runtime-tested again with various CONFIG_DEBUG_FOOs enabled
>
> ---
>
> The page allocator's mobility grouping is intended to keep unmovable
> pages separate from reclaimable/compactable ones to allow on-demand
> defragmentation for higher-order allocations and huge pages.
>
> Currently, there are several places where accidental type mixing
> occurs: an allocation asks for a page of a certain migratetype and
> receives another. This ruins pageblocks for compaction, which in turn
> makes allocating huge pages more expensive and less reliable.
>
> The series addresses those causes. The last patch adds type checks on
> all freelist movements to prevent new violations being introduced.
>
> The benefits can be seen in a mixed workload that stresses the machine
> with a memcache-type workload and a kernel build job while
> periodically attempting to allocate batches of THP. The following data
> is aggregated over 50 consecutive defconfig builds:
Great stuff. What would you say to the following on top?
----8<----
>From 84f8a6d3a9e34c7ed8b438c3152d56e359a4ffb4 Mon Sep 17 00:00:00 2001
From: Vlastimil Babka <vbabka@...e.cz>
Date: Wed, 27 Mar 2024 10:19:47 +0100
Subject: [PATCH] mm: page_alloc: change move_freepages() to
__move_freepages_block()
The function is now supposed to be called only on a single pageblock and
checks start_pfn and end_pfn accordingly. Rename it to make this more
obvious and drop the end_pfn parameter which can be determined trivially
and none of the callers use it for anything else.
Also make the (now internal) end_pfn exclusive, which is more common.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/page_alloc.c | 43 ++++++++++++++++++++-----------------------
1 file changed, 20 insertions(+), 23 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 34c84ef16b66..75aefbd52ef9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1566,18 +1566,18 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* Change the type of a block and move all its free pages to that
* type's freelist.
*/
-static int move_freepages(struct zone *zone, unsigned long start_pfn,
- unsigned long end_pfn, int old_mt, int new_mt)
+static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
+ int old_mt, int new_mt)
{
struct page *page;
- unsigned long pfn;
+ unsigned long pfn, end_pfn;
unsigned int order;
int pages_moved = 0;
VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
- VM_WARN_ON(start_pfn + pageblock_nr_pages - 1 != end_pfn);
+ end_pfn = pageblock_end_pfn(start_pfn);
- for (pfn = start_pfn; pfn <= end_pfn;) {
+ for (pfn = start_pfn; pfn < end_pfn;) {
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
pfn++;
@@ -1603,14 +1603,13 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
unsigned long *start_pfn,
- unsigned long *end_pfn,
int *num_free, int *num_movable)
{
unsigned long pfn, start, end;
pfn = page_to_pfn(page);
start = pageblock_start_pfn(pfn);
- end = pageblock_end_pfn(pfn) - 1;
+ end = pageblock_end_pfn(pfn);
/*
* The caller only has the lock for @zone, don't touch ranges
@@ -1621,16 +1620,15 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
*/
if (!zone_spans_pfn(zone, start))
return false;
- if (!zone_spans_pfn(zone, end))
+ if (!zone_spans_pfn(zone, end - 1))
return false;
*start_pfn = start;
- *end_pfn = end;
if (num_free) {
*num_free = 0;
*num_movable = 0;
- for (pfn = start; pfn <= end;) {
+ for (pfn = start; pfn < end;) {
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
int nr = 1 << buddy_order(page);
@@ -1656,13 +1654,12 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
static int move_freepages_block(struct zone *zone, struct page *page,
int old_mt, int new_mt)
{
- unsigned long start_pfn, end_pfn;
+ unsigned long start_pfn;
- if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
- NULL, NULL))
+ if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return -1;
- return move_freepages(zone, start_pfn, end_pfn, old_mt, new_mt);
+ return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
}
#ifdef CONFIG_MEMORY_ISOLATION
@@ -1733,10 +1730,9 @@ static void split_large_buddy(struct zone *zone, struct page *page,
bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype)
{
- unsigned long start_pfn, end_pfn, pfn;
+ unsigned long start_pfn, pfn;
- if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
- NULL, NULL))
+ if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
return false;
/* No splits needed if buddies can't span multiple blocks */
@@ -1767,8 +1763,9 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
return true;
}
move:
- move_freepages(zone, start_pfn, end_pfn,
- get_pfnblock_migratetype(page, start_pfn), migratetype);
+ __move_freepages_block(zone, start_pfn,
+ get_pfnblock_migratetype(page, start_pfn),
+ migratetype);
return true;
}
#endif /* CONFIG_MEMORY_ISOLATION */
@@ -1868,7 +1865,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
unsigned int alloc_flags, bool whole_block)
{
int free_pages, movable_pages, alike_pages;
- unsigned long start_pfn, end_pfn;
+ unsigned long start_pfn;
int block_type;
block_type = get_pageblock_migratetype(page);
@@ -1901,8 +1898,8 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
goto single_page;
/* moving whole block can fail due to zone boundary conditions */
- if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
- &free_pages, &movable_pages))
+ if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
+ &movable_pages))
goto single_page;
/*
@@ -1932,7 +1929,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
*/
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
- move_freepages(zone, start_pfn, end_pfn, block_type, start_type);
+ __move_freepages_block(zone, start_pfn, block_type, start_type);
return __rmqueue_smallest(zone, order, start_type);
}
--
2.44.0
Powered by blists - more mailing lists