[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260122020742.230219-1-realwujing@gmail.com>
Date: Wed, 21 Jan 2026 21:07:42 -0500
From: Qiliang Yuan <realwujing@...il.com>
To: akpm@...ux-foundation.org
Cc: david@...nel.org,
mhocko@...e.com,
vbabka@...e.cz,
willy@...radead.org,
lance.yang@...ux.dev,
hannes@...xchg.org,
surenb@...gle.com,
jackmanb@...gle.com,
ziy@...dia.com,
weixugc@...gle.com,
rppt@...nel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
edumazet@...gle.com,
jis1@...natelecom.cn,
wangh13@...natelecom.cn,
liyi1@...natelecom.cn,
sunshx@...natelecom.cn,
zhangzq20@...natelecom.cn,
zhangjn11@...natelecom.cn,
Qiliang Yuan <realwujing@...il.com>,
Qiliang Yuan <yuanql9@...natelecom.cn>
Subject: [PATCH v6] mm/page_alloc: boost watermarks on atomic allocation failure
Atomic allocations (GFP_ATOMIC) are prone to failure under heavy memory
pressure as they cannot enter direct reclaim. This patch introduces a
'Soft Boost' mechanism to mitigate this.
When a GFP_ATOMIC request fails or enters the slowpath, the preferred
zone's watermark_boost is increased. This triggers kswapd to proactively
reclaim memory, creating a safety buffer for future atomic bursts.
To prevent excessive reclaim during packet storms, a 1-second debounce
timer (last_boost_jiffies) is added to each zone to rate-limit boosts.
This approach reuses existing watermark_boost infrastructure, ensuring
minimal overhead and asynchronous background reclaim via kswapd.
Allocation failure logs:
[38535644.718700] node 0: slabs: 1031, objs: 43328, free: 0
[38535644.725059] node 1: slabs: 339, objs: 17616, free: 317
[38535645.428345] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535645.436888] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535645.447664] node 0: slabs: 940, objs: 40864, free: 144
[38535645.454026] node 1: slabs: 322, objs: 19168, free: 383
[38535645.556122] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535645.564576] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535649.655523] warn_alloc: 59 callbacks suppressed
[38535649.655527] swapper/100: page allocation failure: order:0, mode:0x480020(GFP_ATOMIC), nodemask=(null)
[38535649.671692] swapper/100 cpuset=/ mems_allowed=0-1
Signed-off-by: Qiliang Yuan <realwujing@...il.com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
v6:
- Replace magic number ">> 10" with ATOMIC_BOOST_SCALE_SHIFT define
- Add documentation explaining 0.1% zone size boost rationale
v5:
- Simplify to use native boost_watermark() instead of custom logic
v4:
- Add watermark_scale_boost and gradual decay via balance_pgdat
v3:
- Move debounce timer to per-zone; optimize zone selection
v2:
- Add debounce logic and zone-proportional boosting
v1:
- Initial: boost min_free_kbytes on GFP_ATOMIC failure
---
include/linux/mmzone.h | 1 +
mm/page_alloc.c | 36 +++++++++++++++++++++++++++++++++++-
2 files changed, 36 insertions(+), 1 deletion(-)
---
include/linux/mmzone.h | 1 +
mm/page_alloc.c | 36 +++++++++++++++++++++++++++++++++++-
2 files changed, 36 insertions(+), 1 deletion(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75ef7c9f9307..8e37e4e6765b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -882,6 +882,7 @@ struct zone {
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long _watermark[NR_WMARK];
unsigned long watermark_boost;
+ unsigned long last_boost_jiffies;
unsigned long nr_reserved_highatomic;
unsigned long nr_free_highatomic;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c380f063e8b7..8ea2435125d5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -218,6 +218,13 @@ unsigned int pageblock_order __read_mostly;
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags);
+/*
+ * Boost watermarks by ~0.1% of zone size on atomic allocation pressure.
+ * This provides zone-proportional safety buffers: ~1MB per 1GB of zone size.
+ * Larger zones under GFP_ATOMIC pressure need proportionally larger reserves.
+ */
+#define ATOMIC_BOOST_SCALE_SHIFT 10
+
/*
* results with 256, 32 in the lowmem_reserve sysctl:
* 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
@@ -2189,12 +2196,31 @@ static inline bool boost_watermark(struct zone *zone)
max_boost = max(pageblock_nr_pages, max_boost);
- zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
+ zone->watermark_boost = min(zone->watermark_boost +
+ max(pageblock_nr_pages, zone_managed_pages(zone) >> ATOMIC_BOOST_SCALE_SHIFT),
max_boost);
return true;
}
+static void boost_zones_for_atomic(struct alloc_context *ac, gfp_t gfp_mask)
+{
+ struct zoneref *z;
+ struct zone *zone;
+ unsigned long now = jiffies;
+
+ for_each_zone_zonelist(zone, z, ac->zonelist, ac->highest_zoneidx) {
+ /* 1 second debounce to avoid spamming boosts in a burst */
+ if (time_after(now, zone->last_boost_jiffies + HZ)) {
+ zone->last_boost_jiffies = now;
+ if (boost_watermark(zone))
+ wakeup_kswapd(zone, gfp_mask, 0, ac->highest_zoneidx);
+ /* Only boost the preferred zone to be precise */
+ break;
+ }
+ }
+}
+
/*
* When we are falling back to another migratetype during allocation, should we
* try to claim an entire block to satisfy further allocations, instead of
@@ -4742,6 +4768,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (page)
goto got_pg;
+ /* Proactively boost for atomic requests entering slowpath */
+ if ((gfp_mask & GFP_ATOMIC) && order == 0)
+ boost_zones_for_atomic(ac, gfp_mask);
+
/*
* For costly allocations, try direct compaction first, as it's likely
* that we have enough base pages and don't need to reclaim. For non-
@@ -4947,6 +4977,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto retry;
}
fail:
+ /* Boost watermarks on atomic allocation failure to trigger kswapd */
+ if (unlikely(page == NULL && (gfp_mask & GFP_ATOMIC) && order == 0))
+ boost_zones_for_atomic(ac, gfp_mask);
+
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:
--
2.51.0
Powered by blists - more mailing lists