[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250925181109.11dd36e5@fangorn>
Date: Thu, 25 Sep 2025 18:11:09 -0400
From: Rik van Riel <riel@...riel.com>
To: Frank van der Linden <fvdl@...gle.com>
Cc: akpm@...ux-foundation.org, muchun.song@...ux.dev, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, hannes@...xchg.org, david@...hat.com,
roman.gushchin@...ux.dev, kernel-team@...a.com
Subject: [RFC PATCH 00/12] mm,cma: call CMA balancing from page reclaim code
Call CMA balancing from the page reclaim code, if page reclaim
is reclaiming pages that are unsuitable for the allocator.
To keep direct reclaim latencies low, kswapd will do CMA balancing
whenever some of the reclaimed pages are unsuitable for the allocator
that woke up kswapd, while the direct reclaimers will only do CMA
balancing if most of the reclaimed pages are unsuitable.
Signed-off-by: Rik van Riel <riel@...riel.com>
---
mm/vmscan.c | 31 ++++++++++++++++++++++++++++++-
1 file changed, 30 insertions(+), 1 deletion(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a48aec8bfd92..ec6bde5b07d3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -168,6 +168,9 @@ struct scan_control {
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
+ /* Number of pages reclaimed, but unsuitable to the allocator */
+ unsigned long nr_unsuitable;
+
struct {
unsigned int dirty;
unsigned int unqueued_dirty;
@@ -1092,6 +1095,19 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
return !data_race(folio_swap_flags(folio) & SWP_FS_OPS);
}
+#ifdef CONFIG_CMA
+static bool unsuitable_folio(struct folio *folio, struct scan_control *sc)
+{
+ return gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
+ folio_migratetype(folio) == MIGRATE_CMA;
+}
+#else
+static bool unsuitable_folio(struct folio *folio, struct scan_control *sc)
+{
+ return false;
+}
+#endif
+
/*
* shrink_folio_list() returns the number of reclaimed pages
*/
@@ -1103,7 +1119,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
struct folio_batch free_folios;
LIST_HEAD(ret_folios);
LIST_HEAD(demote_folios);
- unsigned int nr_reclaimed = 0, nr_demoted = 0;
+ unsigned int nr_reclaimed = 0, nr_demoted = 0, nr_unsuitable = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
struct swap_iocb *plug = NULL;
@@ -1530,6 +1546,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
* leave it off the LRU).
*/
nr_reclaimed += nr_pages;
+ if (unsuitable_folio(folio, sc))
+ nr_unsuitable += nr_pages;
continue;
}
}
@@ -1560,6 +1578,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
* all pages in it.
*/
nr_reclaimed += nr_pages;
+ if (unsuitable_folio(folio, sc))
+ nr_unsuitable += nr_pages;
folio_unqueue_deferred_split(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
@@ -1641,6 +1661,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
if (plug)
swap_write_unplug(plug);
+
+ sc->nr_unsuitable += nr_unsuitable;
+
return nr_reclaimed;
}
@@ -6431,6 +6454,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
delayacct_freepages_end();
+ /* Almost all memory reclaimed was unsuitable? Move data into CMA. */
+ if (sc->nr_unsuitable >= sc->nr_reclaimed - 2)
+ balance_cma_zonelist(zonelist, SWAP_CLUSTER_MAX);
+
if (sc->nr_reclaimed)
return sc->nr_reclaimed;
@@ -7169,6 +7196,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
if (!sc.nr_reclaimed)
pgdat->kswapd_failures++;
+ if (sc.nr_unsuitable)
+ balance_node_cma(pgdat->node_id, NULL);
out:
clear_reclaim_active(pgdat, highest_zoneidx);
--
2.47.3
Powered by blists - more mailing lists