[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260108203755.1163107-4-gourry@gourry.net>
Date: Thu, 8 Jan 2026 15:37:50 -0500
From: Gregory Price <gourry@...rry.net>
To: linux-mm@...ck.org,
cgroups@...r.kernel.org,
linux-cxl@...r.kernel.org
Cc: linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
kernel-team@...a.com,
longman@...hat.com,
tj@...nel.org,
hannes@...xchg.org,
mkoutny@...e.com,
corbet@....net,
gregkh@...uxfoundation.org,
rafael@...nel.org,
dakr@...nel.org,
dave@...olabs.net,
jonathan.cameron@...wei.com,
dave.jiang@...el.com,
alison.schofield@...el.com,
vishal.l.verma@...el.com,
ira.weiny@...el.com,
dan.j.williams@...el.com,
akpm@...ux-foundation.org,
vbabka@...e.cz,
surenb@...gle.com,
mhocko@...e.com,
jackmanb@...gle.com,
ziy@...dia.com,
david@...nel.org,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
rppt@...nel.org,
axelrasmussen@...gle.com,
yuanchu@...gle.com,
weixugc@...gle.com,
yury.norov@...il.com,
linux@...musvillemoes.dk,
rientjes@...gle.com,
shakeel.butt@...ux.dev,
chrisl@...nel.org,
kasong@...cent.com,
shikemeng@...weicloud.com,
nphamcs@...il.com,
bhe@...hat.com,
baohua@...nel.org,
yosry.ahmed@...ux.dev,
chengming.zhou@...ux.dev,
roman.gushchin@...ux.dev,
muchun.song@...ux.dev,
osalvador@...e.de,
matthew.brost@...el.com,
joshua.hahnjy@...il.com,
rakie.kim@...com,
byungchul@...com,
gourry@...rry.net,
ying.huang@...ux.alibaba.com,
apopple@...dia.com,
cl@...two.org,
harry.yoo@...cle.com,
zhengqi.arch@...edance.com
Subject: [RFC PATCH v3 3/8] mm: restrict slub, compaction, and page_alloc to sysram
Restrict page allocation and zone iteration to N_MEMORY nodes via
cpusets - or node_states[N_MEMORY] when cpusets is disabled.
__GFP_THISNODE allows N_PRIVATE nodes to be used explicitly (all
nodes become valid targets with __GFP_THISNODE).
This constrains core users of nodemasks to the node_states[N_MEMORY],
which is guaranteed to at least contain the set of nodes with sysram
memory blocks present at boot.
Signed-off-by: Gregory Price <gourry@...rry.net>
---
include/linux/gfp.h | 6 ++++++
mm/compaction.c | 6 ++----
mm/page_alloc.c | 27 ++++++++++++++++-----------
mm/slub.c | 8 ++++++--
4 files changed, 30 insertions(+), 17 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b155929af5b1..0b6cdef7a232 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -321,6 +321,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
struct mempolicy *mpol, pgoff_t ilx, int nid);
struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr);
+bool numa_zone_allowed(int alloc_flags, struct zone *zone, gfp_t gfp_mask);
#else
static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
{
@@ -337,6 +338,11 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde
}
#define vma_alloc_folio_noprof(gfp, order, vma, addr) \
folio_alloc_noprof(gfp, order)
+static inline bool numa_zone_allowed(int alloc_flags, struct zone *zone,
+ gfp_t gfp_mask)
+{
+ return true;
+}
#endif
#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
diff --git a/mm/compaction.c b/mm/compaction.c
index 1e8f8eca318c..63ef9803607f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2829,10 +2829,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
ac->highest_zoneidx, ac->nodemask) {
enum compact_result status;
- if (cpusets_enabled() &&
- (alloc_flags & ALLOC_CPUSET) &&
- !__cpuset_zone_allowed(zone, gfp_mask))
- continue;
+ if (!numa_zone_allowed(alloc_flags, zone, gfp_mask))
+ continue;
if (prio > MIN_COMPACT_PRIORITY
&& compaction_deferred(zone, order)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bb89d81aa68c..76b12cef7dfc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3723,6 +3723,16 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
node_reclaim_distance;
}
+bool numa_zone_allowed(int alloc_flags, struct zone *zone, gfp_t gfp_mask)
+{
+ /* If cpusets is being used, check mems_allowed or sysram_nodes */
+ if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET))
+ return cpuset_zone_allowed(zone, gfp_mask);
+
+ /* Otherwise only allow N_PRIVATE if __GFP_THISNODE is present */
+ return (gfp_mask & __GFP_THISNODE) ||
+ node_isset(zone_to_nid(zone), node_states[N_MEMORY]);
+}
#else /* CONFIG_NUMA */
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
@@ -3814,10 +3824,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
struct page *page;
unsigned long mark;
- if (cpusets_enabled() &&
- (alloc_flags & ALLOC_CPUSET) &&
- !__cpuset_zone_allowed(zone, gfp_mask))
- continue;
+ if (!numa_zone_allowed(alloc_flags, zone, gfp_mask))
+ continue;
+
/*
* When allocating a page cache page for writing, we
* want to get it from a node that is within its dirty
@@ -4618,10 +4627,8 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
unsigned long min_wmark = min_wmark_pages(zone);
bool wmark;
- if (cpusets_enabled() &&
- (alloc_flags & ALLOC_CPUSET) &&
- !__cpuset_zone_allowed(zone, gfp_mask))
- continue;
+ if (!numa_zone_allowed(alloc_flags, zone, gfp_mask))
+ continue;
available = reclaimable = zone_reclaimable_pages(zone);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -5131,10 +5138,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
unsigned long mark;
- if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
- !__cpuset_zone_allowed(zone, gfp)) {
+ if (!numa_zone_allowed(alloc_flags, zone, gfp))
continue;
- }
if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
diff --git a/mm/slub.c b/mm/slub.c
index 861592ac5425..adebbddc48f6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3594,9 +3594,13 @@ static struct slab *get_any_partial(struct kmem_cache *s,
struct kmem_cache_node *n;
n = get_node(s, zone_to_nid(zone));
+ if (!n)
+ continue;
+
+ if (!numa_zone_allowed(ALLOC_CPUSET, zone, pc->flags))
+ continue;
- if (n && cpuset_zone_allowed(zone, pc->flags) &&
- n->nr_partial > s->min_partial) {
+ if (n->nr_partial > s->min_partial) {
slab = get_partial_node(s, n, pc);
if (slab) {
/*
--
2.52.0
Powered by blists - more mailing lists