[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251112192936.2574429-6-gourry@gourry.net>
Date: Wed, 12 Nov 2025 14:29:21 -0500
From: Gregory Price <gourry@...rry.net>
To: linux-mm@...ck.org
Cc: kernel-team@...a.com,
linux-cxl@...r.kernel.org,
linux-kernel@...r.kernel.org,
nvdimm@...ts.linux.dev,
linux-fsdevel@...r.kernel.org,
cgroups@...r.kernel.org,
dave@...olabs.net,
jonathan.cameron@...wei.com,
dave.jiang@...el.com,
alison.schofield@...el.com,
vishal.l.verma@...el.com,
ira.weiny@...el.com,
dan.j.williams@...el.com,
longman@...hat.com,
akpm@...ux-foundation.org,
david@...hat.com,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
vbabka@...e.cz,
rppt@...nel.org,
surenb@...gle.com,
mhocko@...e.com,
osalvador@...e.de,
ziy@...dia.com,
matthew.brost@...el.com,
joshua.hahnjy@...il.com,
rakie.kim@...com,
byungchul@...com,
gourry@...rry.net,
ying.huang@...ux.alibaba.com,
apopple@...dia.com,
mingo@...hat.com,
peterz@...radead.org,
juri.lelli@...hat.com,
vincent.guittot@...aro.org,
dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
tj@...nel.org,
hannes@...xchg.org,
mkoutny@...e.com,
kees@...nel.org,
muchun.song@...ux.dev,
roman.gushchin@...ux.dev,
shakeel.butt@...ux.dev,
rientjes@...gle.com,
jackmanb@...gle.com,
cl@...two.org,
harry.yoo@...cle.com,
axelrasmussen@...gle.com,
yuanchu@...gle.com,
weixugc@...gle.com,
zhengqi.arch@...edance.com,
yosry.ahmed@...ux.dev,
nphamcs@...il.com,
chengming.zhou@...ux.dev,
fabio.m.de.francesco@...ux.intel.com,
rrichter@....com,
ming.li@...omail.com,
usamaarif642@...il.com,
brauner@...nel.org,
oleg@...hat.com,
namcao@...utronix.de,
escape@...ux.alibaba.com,
dongjoo.seo1@...sung.com
Subject: [RFC PATCH v2 05/11] mm: restrict slub, oom, compaction, and page_alloc to sysram by default
Restrict page allocation and zone iteration behavior in mm to skip
SPM Nodes via cpusets, or mt_sysram_nodelist when cpusets is disabled.
This constrains core users of nodemasks to the mt_sysram_nodelist, which
is guaranteed to at least contain the set of nodes with sysram memory
blocks present at boot (or NULL if NUMA is compiled out).
If the sysram nodelist is empty (something in memory-tiers broken),
return NULL, which still allows all zones to be iterated.
Signed-off-by: Gregory Price <gourry@...rry.net>
---
mm/compaction.c | 3 +++
mm/oom_kill.c | 5 ++++-
mm/page_alloc.c | 18 ++++++++++++++----
mm/slub.c | 15 ++++++++++++---
4 files changed, 33 insertions(+), 8 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index d2176935d3dd..7b73179d1fbf 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -13,6 +13,7 @@
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
+#include <linux/memory-tiers.h>
#include <linux/sched/signal.h>
#include <linux/backing-dev.h>
#include <linux/sysctl.h>
@@ -2832,6 +2833,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(zone, gfp_mask))
continue;
+ else if (!mt_node_allowed(zone_to_nid(zone), gfp_mask))
+ continue;
if (prio > MIN_COMPACT_PRIORITY
&& compaction_deferred(zone, order)) {
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c145b0feecc1..386b4ceeaeb8 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -34,6 +34,7 @@
#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
+#include <linux/memory-tiers.h>
#include <linux/mempolicy.h>
#include <linux/security.h>
#include <linux/ptrace.h>
@@ -1118,6 +1119,8 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
bool out_of_memory(struct oom_control *oc)
{
unsigned long freed = 0;
+ if (!oc->nodemask)
+ oc->nodemask = mt_sysram_nodemask();
if (oom_killer_disabled)
return false;
@@ -1154,7 +1157,7 @@ bool out_of_memory(struct oom_control *oc)
*/
oc->constraint = constrained_alloc(oc);
if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
- oc->nodemask = NULL;
+ oc->nodemask = mt_sysram_nodemask();
check_panic_on_oom(oc);
if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bcaf1125d109..2ea6a50f6079 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -34,6 +34,7 @@
#include <linux/cpuset.h>
#include <linux/pagevec.h>
#include <linux/memory_hotplug.h>
+#include <linux/memory-tiers.h>
#include <linux/nodemask.h>
#include <linux/vmstat.h>
#include <linux/fault-inject.h>
@@ -3753,6 +3754,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(zone, gfp_mask))
continue;
+ else if (!mt_node_allowed(zone_to_nid(zone), gfp_mask))
+ continue;
/*
* When allocating a page cache page for writing, we
* want to get it from a node that is within its dirty
@@ -4555,6 +4558,8 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(zone, gfp_mask))
continue;
+ else if (!mt_node_allowed(zone_to_nid(zone), gfp_mask))
+ continue;
available = reclaimable = zone_reclaimable_pages(zone);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -4608,7 +4613,7 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
*/
if (cpusets_enabled() && ac->nodemask &&
!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
- ac->nodemask = NULL;
+ ac->nodemask = mt_sysram_nodemask();
return true;
}
@@ -4792,7 +4797,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* user oriented.
*/
if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
- ac->nodemask = NULL;
+ ac->nodemask = mt_sysram_nodemask();
ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
ac->highest_zoneidx, ac->nodemask);
}
@@ -4944,7 +4949,8 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
ac->nodemask = &cpuset_current_mems_allowed;
else
*alloc_flags |= ALLOC_CPUSET;
- }
+ } else if (!ac->nodemask) /* sysram_nodes may be NULL during __init */
+ ac->nodemask = mt_sysram_nodemask();
might_alloc(gfp_mask);
@@ -5053,6 +5059,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(zone, gfp))
continue;
+ else if (!mt_node_allowed(zone_to_nid(zone), gfp))
+ continue;
if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
@@ -5187,8 +5195,10 @@ struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order,
/*
* Restore the original nodemask if it was potentially replaced with
* &cpuset_current_mems_allowed to optimize the fast-path attempt.
+ *
+ * If not set, default to sysram nodes.
*/
- ac.nodemask = nodemask;
+ ac.nodemask = nodemask ? nodemask : mt_sysram_nodemask();
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
diff --git a/mm/slub.c b/mm/slub.c
index 1bf65c421325..c857db97c6a0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -28,6 +28,7 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
+#include <linux/memory-tiers.h>
#include <linux/ctype.h>
#include <linux/stackdepot.h>
#include <linux/debugobjects.h>
@@ -3576,11 +3577,19 @@ static struct slab *get_any_partial(struct kmem_cache *s,
zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
struct kmem_cache_node *n;
+ int nid = zone_to_nid(zone);
+ bool allowed;
- n = get_node(s, zone_to_nid(zone));
+ n = get_node(s, nid);
+ if (!n)
+ continue;
+
+ if (cpusets_enabled())
+ allowed = __cpuset_zone_allowed(zone, pc->flags);
+ else
+ allowed = mt_node_allowed(nid, pc->flags);
- if (n && cpuset_zone_allowed(zone, pc->flags) &&
- n->nr_partial > s->min_partial) {
+ if (allowed && (n->nr_partial > s->min_partial)) {
slab = get_partial_node(s, n, pc);
if (slab) {
/*
--
2.51.1
Powered by blists - more mailing lists