[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250814153307.1553061-18-raghavendra.kt@amd.com>
Date: Thu, 14 Aug 2025 15:33:07 +0000
From: Raghavendra K T <raghavendra.kt@....com>
To: <raghavendra.kt@....com>
CC: <AneeshKumar.KizhakeVeetil@....com>, <Michael.Day@....com>,
<akpm@...ux-foundation.org>, <bharata@....com>, <dave.hansen@...el.com>,
<david@...hat.com>, <dongjoo.linux.dev@...il.com>, <feng.tang@...el.com>,
<gourry@...rry.net>, <hannes@...xchg.org>, <honggyu.kim@...com>,
<hughd@...gle.com>, <jhubbard@...dia.com>, <jon.grimm@....com>,
<k.shutemov@...il.com>, <kbusch@...a.com>, <kmanaouil.dev@...il.com>,
<leesuyeon0506@...il.com>, <leillc@...gle.com>, <liam.howlett@...cle.com>,
<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
<mgorman@...hsingularity.net>, <mingo@...hat.com>, <nadav.amit@...il.com>,
<nphamcs@...il.com>, <peterz@...radead.org>, <riel@...riel.com>,
<rientjes@...gle.com>, <rppt@...nel.org>, <santosh.shukla@....com>,
<shivankg@....com>, <shy828301@...il.com>, <sj@...nel.org>, <vbabka@...e.cz>,
<weixugc@...gle.com>, <willy@...radead.org>, <ying.huang@...ux.alibaba.com>,
<ziy@...dia.com>, <Jonathan.Cameron@...wei.com>, <dave@...olabs.net>,
<yuanchu@...gle.com>, <kinseyho@...gle.com>, <hdanton@...a.com>,
<harry.yoo@...cle.com>
Subject: [RFC PATCH V3 17/17] mm: Create a list of fallback target nodes
These fallback target nodes are used as hints for migration
when current target node is near full.
TBD: Implementing migration to fallback nodes
Signed-off-by: Raghavendra K T <raghavendra.kt@....com>
---
mm/kscand.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/mm/kscand.c b/mm/kscand.c
index 641150755517..a88df9ac2eaa 100644
--- a/mm/kscand.c
+++ b/mm/kscand.c
@@ -136,6 +136,7 @@ struct kscand_scanctrl {
struct kscand_nodeinfo *nodeinfo[MAX_NUMNODES];
unsigned long address;
unsigned long nr_to_scan;
+ nodemask_t nmask;
};
struct kscand_scanctrl kscand_scanctrl;
@@ -148,6 +149,8 @@ struct kmigrated_mm_slot {
spinlock_t migrate_lock;
/* Head of per mm migration list */
struct list_head migrate_head;
+ /* Indicates set of fallback nodes to migrate. */
+ nodemask_t migration_nmask;
/* Indicates weighted success, failure */
int msuccess, mfailed, fratio;
};
@@ -522,6 +525,7 @@ static void reset_scanctrl(struct kscand_scanctrl *scanctrl)
{
int node;
+ nodes_clear(scanctrl->nmask);
for_each_node_state(node, N_MEMORY)
reset_nodeinfo(scanctrl->nodeinfo[node]);
@@ -547,9 +551,11 @@ static int get_target_node(struct kscand_scanctrl *scanctrl)
int node, target_node = NUMA_NO_NODE;
unsigned long prev = 0;
+ nodes_clear(scanctrl->nmask);
for_each_node(node) {
if (node_is_toptier(node) && scanctrl->nodeinfo[node]) {
/* This creates a fallback migration node list */
+ node_set(node, scanctrl->nmask);
if (get_nodeinfo_nr_accessed(scanctrl->nodeinfo[node]) > prev) {
prev = get_nodeinfo_nr_accessed(scanctrl->nodeinfo[node]);
target_node = node;
@@ -1396,6 +1402,9 @@ static unsigned long kscand_scan_mm_slot(void)
total = get_slowtier_accesed(&kscand_scanctrl);
target_node = get_target_node(&kscand_scanctrl);
+ if (kmigrated_mm_slot)
+ nodes_copy(kmigrated_mm_slot->migration_nmask,
+ kscand_scanctrl.nmask);
mm_target_node = READ_ONCE(mm->target_node);
--
2.34.1
Powered by blists - more mailing lists