[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250915195153.462039-9-fvdl@google.com>
Date: Mon, 15 Sep 2025 19:51:49 +0000
From: Frank van der Linden <fvdl@...gle.com>
To: akpm@...ux-foundation.org, muchun.song@...ux.dev, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: hannes@...xchg.org, david@...hat.com, roman.gushchin@...ux.dev,
Frank van der Linden <fvdl@...gle.com>
Subject: [RFC PATCH 08/12] mm/compaction: simplify isolation order checks a bit
The code to isolate pages for migration always checked both
cc->alloc_contig and skip_isolation_on_order to determine
whether a page could be isolated for migration.
Simplify this a little bit by moving the cc->alloc_contig
check in to skip_isolation_on_order. Also rename alloc_contig
to migrate_large, since there will be an additional user
(CMA balancing) of this field soon, not just alloc_contig_range.
No functional change.
Signed-off-by: Frank van der Linden <fvdl@...gle.com>
---
mm/compaction.c | 26 ++++++++++++++------------
mm/internal.h | 2 +-
mm/page_alloc.c | 2 +-
3 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 6a2c06e356c5..2e6c30f50b89 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -793,13 +793,15 @@ static bool too_many_isolated(struct compact_control *cc)
/**
* skip_isolation_on_order() - determine when to skip folio isolation based on
* folio order and compaction target order
+ * @cc: compact control structure containing target order
* @order: to-be-isolated folio order
- * @target_order: compaction target order
*
* This avoids unnecessary folio isolations during compaction.
*/
-static bool skip_isolation_on_order(int order, int target_order)
+static bool skip_isolation_on_order(struct compact_control *cc, int order)
{
+ if (cc->migrate_large)
+ return false;
/*
* Unless we are performing global compaction (i.e.,
* is_via_compact_memory), skip any folios that are larger than the
@@ -807,7 +809,7 @@ static bool skip_isolation_on_order(int order, int target_order)
* the desired target_order, so migrating this folio would likely fail
* later.
*/
- if (!is_via_compact_memory(target_order) && order >= target_order)
+ if (!is_via_compact_memory(cc->order) && order >= cc->order)
return true;
/*
* We limit memory compaction to pageblocks and won't try
@@ -850,6 +852,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
unsigned long next_skip_pfn = 0;
bool skip_updated = false;
int ret = 0;
+ unsigned int order;
cc->migrate_pfn = low_pfn;
@@ -948,13 +951,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
if (PageHuge(page)) {
- const unsigned int order = compound_order(page);
/*
* skip hugetlbfs if we are not compacting for pages
* bigger than its order. THPs and other compound pages
* are handled below.
*/
- if (!cc->alloc_contig) {
+ if (!cc->migrate_large) {
+ order = compound_order(page);
if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
@@ -962,7 +965,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
goto isolate_fail;
}
- /* for alloc_contig case */
+ /* for migrate_large case */
if (locked) {
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
@@ -1030,11 +1033,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* skip them at once. The check is racy, but we can consider
* only valid values and the only danger is skipping too much.
*/
- if (PageCompound(page) && !cc->alloc_contig) {
- const unsigned int order = compound_order(page);
+ if (PageCompound(page)) {
+ order = compound_order(page);
/* Skip based on page order and compaction target order. */
- if (skip_isolation_on_order(order, cc->order)) {
+ if (skip_isolation_on_order(cc, order)) {
if (order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << order) - 1;
nr_scanned += (1UL << order) - 1;
@@ -1182,9 +1185,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/*
* Check LRU folio order under the lock
*/
- if (unlikely(skip_isolation_on_order(folio_order(folio),
- cc->order) &&
- !cc->alloc_contig)) {
+ order = folio_order(folio);
+ if (unlikely(skip_isolation_on_order(cc, order))) {
low_pfn += folio_nr_pages(folio) - 1;
nr_scanned += folio_nr_pages(folio) - 1;
folio_set_lru(folio);
diff --git a/mm/internal.h b/mm/internal.h
index 7916d8be8922..ffcb3aec05ed 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -909,7 +909,7 @@ struct compact_control {
* isolation or migration failures to
* ensure forward progress.
*/
- bool alloc_contig; /* alloc_contig_range allocation */
+ bool migrate_large; /* Always migrate large/huge pages */
};
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d3966d31c039..dc59aaa63ae6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6903,7 +6903,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
- .alloc_contig = true,
+ .migrate_large = true,
};
INIT_LIST_HEAD(&cc.migratepages);
enum pb_isolate_mode mode = (alloc_flags & ACR_FLAGS_CMA) ?
--
2.51.0.384.g4c02a37b29-goog
Powered by blists - more mailing lists