[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260123064231.250767-1-realwujing@gmail.com>
Date: Fri, 23 Jan 2026 01:42:30 -0500
From: Qiliang Yuan <realwujing@...il.com>
To: vbabka@...e.cz
Cc: akpm@...ux-foundation.org,
david@...nel.org,
edumazet@...gle.com,
hannes@...xchg.org,
jackmanb@...gle.com,
jis1@...natelecom.cn,
lance.yang@...ux.dev,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
liyi1@...natelecom.cn,
mhocko@...e.com,
netdev@...r.kernel.org,
realwujing@...il.com,
rppt@...nel.org,
sunshx@...natelecom.cn,
surenb@...gle.com,
wangh13@...natelecom.cn,
weixugc@...gle.com,
willy@...radead.org,
yuanql9@...natelecom.cn,
zhangjn11@...natelecom.cn,
zhangzq20@...natelecom.cn,
ziy@...dia.com
Subject: [PATCH v7] mm/page_alloc: boost watermarks on atomic allocation failure
Atomic allocations (GFP_ATOMIC) are prone to failure under heavy memory
pressure as they cannot enter direct reclaim. This patch introduces a
watermark boost mechanism to mitigate this issue.
When a GFP_ATOMIC request enters the slowpath, the preferred zone's
watermark_boost is increased under zone->lock protection. This triggers
kswapd to proactively reclaim memory, creating a safety buffer for
future atomic allocations. A 1-second debounce timer prevents excessive
boosts during traffic bursts.
This approach reuses existing watermark_boost infrastructure with
minimal overhead and proper locking to ensure thread safety.
Allocation failure logs:
[38535644.718700] node 0: slabs: 1031, objs: 43328, free: 0
[38535644.725059] node 1: slabs: 339, objs: 17616, free: 317
[38535645.428345] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535645.436888] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535645.447664] node 0: slabs: 940, objs: 40864, free: 144
[38535645.454026] node 1: slabs: 322, objs: 19168, free: 383
[38535645.556122] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535645.564576] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535649.655523] warn_alloc: 59 callbacks suppressed
[38535649.655527] swapper/100: page allocation failure: order:0, mode:0x480020(GFP_ATOMIC), nodemask=(null)
[38535649.671692] swapper/100 cpuset=/ mems_allowed=0-1
Signed-off-by: Qiliang Yuan <realwujing@...il.com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
v7:
- Use local variable for boost_amount to improve code readability
- Add zone->lock protection in boost_zones_for_atomic()
- Add lockdep assertion in boost_watermark() to prevent locking mistakes
- Remove redundant boost call at fail label due to 1-second debounce
v6:
- Replace magic number ">> 10" with ATOMIC_BOOST_SCALE_SHIFT define
- Add documentation explaining 0.1% zone size boost rationale
v5:
- Simplify to use native boost_watermark() instead of custom logic
v4:
- Add watermark_scale_boost and gradual decay via balance_pgdat
v3:
- Move debounce timer to per-zone; optimize zone selection
v2:
- Add debounce logic and zone-proportional boosting
v1:
- Initial: boost min_free_kbytes on GFP_ATOMIC failure
include/linux/mmzone.h | 1 +
mm/page_alloc.c | 46 ++++++++++++++++++++++++++++++++++++++++--
2 files changed, 45 insertions(+), 2 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75ef7c9f9307..8e37e4e6765b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -882,6 +882,7 @@ struct zone {
/* zone watermarks, access with *_wmark_pages(zone) macros */
unsigned long _watermark[NR_WMARK];
unsigned long watermark_boost;
+ unsigned long last_boost_jiffies;
unsigned long nr_reserved_highatomic;
unsigned long nr_free_highatomic;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c380f063e8b7..94168571cc38 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -218,6 +218,13 @@ unsigned int pageblock_order __read_mostly;
static void __free_pages_ok(struct page *page, unsigned int order,
fpi_t fpi_flags);
+/*
+ * Boost watermarks by ~0.1% of zone size on atomic allocation pressure.
+ * This provides zone-proportional safety buffers: ~1MB per 1GB of zone size.
+ * Larger zones under GFP_ATOMIC pressure need proportionally larger reserves.
+ */
+#define ATOMIC_BOOST_SCALE_SHIFT 10
+
/*
* results with 256, 32 in the lowmem_reserve sysctl:
* 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
@@ -2161,6 +2168,9 @@ bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *pag
static inline bool boost_watermark(struct zone *zone)
{
unsigned long max_boost;
+ unsigned long boost_amount;
+
+ lockdep_assert_held(&zone->lock);
if (!watermark_boost_factor)
return false;
@@ -2189,12 +2199,40 @@ static inline bool boost_watermark(struct zone *zone)
max_boost = max(pageblock_nr_pages, max_boost);
- zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
- max_boost);
+ boost_amount = max(pageblock_nr_pages,
+ zone_managed_pages(zone) >> ATOMIC_BOOST_SCALE_SHIFT);
+ zone->watermark_boost = min(zone->watermark_boost + boost_amount,
+ max_boost);
return true;
}
+static void boost_zones_for_atomic(struct alloc_context *ac, gfp_t gfp_mask)
+{
+ struct zoneref *z;
+ struct zone *zone;
+ unsigned long now = jiffies;
+ bool should_wake;
+
+ for_each_zone_zonelist(zone, z, ac->zonelist, ac->highest_zoneidx) {
+ /* Rate-limit boosts to once per second per zone */
+ if (time_after(now, zone->last_boost_jiffies + HZ)) {
+ zone->last_boost_jiffies = now;
+
+ /* Modify watermark under lock, wake kswapd outside */
+ spin_lock(&zone->lock);
+ should_wake = boost_watermark(zone);
+ spin_unlock(&zone->lock);
+
+ if (should_wake)
+ wakeup_kswapd(zone, gfp_mask, 0, ac->highest_zoneidx);
+
+ /* Boost only the preferred zone */
+ break;
+ }
+ }
+}
+
/*
* When we are falling back to another migratetype during allocation, should we
* try to claim an entire block to satisfy further allocations, instead of
@@ -4742,6 +4780,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (page)
goto got_pg;
+ /* Boost watermarks for atomic requests entering slowpath */
+ if ((gfp_mask & GFP_ATOMIC) && order == 0)
+ boost_zones_for_atomic(ac, gfp_mask);
+
/*
* For costly allocations, try direct compaction first, as it's likely
* that we have enough base pages and don't need to reclaim. For non-
--
2.51.0
Powered by blists - more mailing lists