[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tencent_9DB6637676D639B4B7AEA09CC6A6F9E49D0A@qq.com>
Date: Sun, 4 Jan 2026 20:26:52 +0800
From: wujing <realwujing@...com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>
Cc: Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Brendan Jackman <jackmanb@...gle.com>,
Johannes Weiner <hannes@...xchg.org>,
Zi Yan <ziy@...dia.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Qiliang Yuan <yuanql9@...natelecom.cn>,
wujing <realwujing@...com>
Subject: [PATCH 1/1] mm/page_alloc: auto-tune min_free_kbytes on atomic allocation failure
Introduce a mechanism to dynamically increase vm.min_free_kbytes when
critical atomic allocations (GFP_ATOMIC, order-0) fail. This prevents
recurring network packet drops or other atomic failures by proactively
reserving more memory.
The adjustment doubles min_free_kbytes upon upon failure (exponential backoff),
capped at 1% of total RAM.
Observed failure logs:
[38535641.026406] node 0: slabs: 941, objs: 54656, free: 0
[38535641.037711] node 1: slabs: 349, objs: 22096, free: 272
[38535641.049025] node 1: slabs: 349, objs: 22096, free: 272
[38535642.795972] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535642.805017] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535642.816311] node 0: slabs: 854, objs: 42320, free: 0
[38535642.823066] node 1: slabs: 400, objs: 25360, free: 294
[38535643.070199] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535643.078861] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535643.089719] node 0: slabs: 841, objs: 41824, free: 0
[38535643.096513] node 1: slabs: 393, objs: 24480, free: 272
[38535643.484149] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535643.492831] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535643.503666] node 0: slabs: 898, objs: 43120, free: 159
[38535643.510140] node 1: slabs: 404, objs: 25424, free: 319
[38535644.699224] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535644.707911] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535644.718700] node 0: slabs: 1031, objs: 43328, free: 0
[38535644.725059] node 1: slabs: 339, objs: 17616, free: 317
[38535645.428345] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535645.436888] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535645.447664] node 0: slabs: 940, objs: 40864, free: 144
[38535645.454026] node 1: slabs: 322, objs: 19168, free: 383
[38535645.556122] SLUB: Unable to allocate memory on node -1, gfp=0x480020(GFP_ATOMIC)
[38535645.564576] cache: skbuff_head_cache, object size: 232, buffer size: 256, default order: 2, min order: 0
[38535649.655523] warn_alloc: 59 callbacks suppressed
[38535649.655527] swapper/100: page allocation failure: order:0, mode:0x480020(GFP_ATOMIC), nodemask=(null)
[38535649.671692] swapper/100 cpuset=/ mems_allowed=0-1
Signed-off-by: wujing <realwujing@...com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
mm/page_alloc.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c380f063e8b7..9a24e2b6cfbf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -30,6 +30,7 @@
#include <linux/oom.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
+#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/pagevec.h>
@@ -3975,6 +3976,9 @@ static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
mem_cgroup_show_protected_memory(NULL);
}
+static void boost_min_free_kbytes_workfn(struct work_struct *work);
+static DECLARE_WORK(boost_min_free_kbytes_work, boost_min_free_kbytes_workfn);
+
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
{
struct va_format vaf;
@@ -4947,6 +4951,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
goto retry;
}
fail:
+ /* Auto-tuning: trigger boost if atomic allocation fails */
+ if ((gfp_mask & GFP_ATOMIC) && order == 0)
+ schedule_work(&boost_min_free_kbytes_work);
+
warn_alloc(gfp_mask, ac->nodemask,
"page allocation failure: order:%u", order);
got_pg:
@@ -7682,3 +7690,28 @@ struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int or
return page;
}
EXPORT_SYMBOL_GPL(alloc_pages_nolock_noprof);
+
+static void boost_min_free_kbytes_workfn(struct work_struct *work)
+{
+ int new_min;
+
+ /* Cap at 1% of total RAM for safety */
+ unsigned long total_kbytes = totalram_pages() << (PAGE_SHIFT - 10);
+ int max_limit = total_kbytes / 100;
+
+ /* Exponential increase: double the current value */
+ new_min = min_free_kbytes * 2;
+
+ if (new_min > max_limit)
+ new_min = max_limit;
+
+ if (new_min > min_free_kbytes) {
+ min_free_kbytes = new_min;
+ /* Update user_min_free_kbytes so it persists through recalculations */
+ if (new_min > user_min_free_kbytes)
+ user_min_free_kbytes = new_min;
+
+ setup_per_zone_wmarks();
+ pr_info("Auto-tuning: unexpected atomic failure detected, increasing min_free_kbytes to %d\n", min_free_kbytes);
+ }
+}
--
2.39.5
Powered by blists - more mailing lists