[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251110052343.208768-8-bharata@amd.com>
Date: Mon, 10 Nov 2025 10:53:42 +0530
From: Bharata B Rao <bharata@....com>
To: <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
CC: <Jonathan.Cameron@...wei.com>, <dave.hansen@...el.com>,
<gourry@...rry.net>, <mgorman@...hsingularity.net>, <mingo@...hat.com>,
<peterz@...radead.org>, <raghavendra.kt@....com>, <riel@...riel.com>,
<rientjes@...gle.com>, <sj@...nel.org>, <weixugc@...gle.com>,
<willy@...radead.org>, <ying.huang@...ux.alibaba.com>, <ziy@...dia.com>,
<dave@...olabs.net>, <nifan.cxl@...il.com>, <xuezhengchu@...wei.com>,
<yiannis@...corp.com>, <akpm@...ux-foundation.org>, <david@...hat.com>,
<byungchul@...com>, <kinseyho@...gle.com>, <joshua.hahnjy@...il.com>,
<yuanchu@...gle.com>, <balbirs@...dia.com>, <alok.rathore@...sung.com>,
<shivankg@....com>, Bharata B Rao <bharata@....com>
Subject: [RFC PATCH v3 7/8] mm: klruscand: use mglru scanning for page promotion
From: Kinsey Ho <kinseyho@...gle.com>
Introduce a new kernel daemon, klruscand, that periodically invokes the
MGLRU page table walk. It leverages the new callbacks to gather access
information and forwards it to pghot sub-system for promotion decisions.
This benefits from reusing the existing MGLRU page table walk
infrastructure, which is optimized with features such as hierarchical
scanning and bloom filters to reduce CPU overhead.
As an additional optimization to be added in the future, we can tune
the scan intervals for each memcg.
Signed-off-by: Kinsey Ho <kinseyho@...gle.com>
Signed-off-by: Yuanchu Xie <yuanchu@...gle.com>
[Reduced the scan interval to 500ms, KLRUSCAND to default n in config]
Signed-off-by: Bharata B Rao <bharata@....com>
---
mm/Kconfig | 8 ++++
mm/Makefile | 1 +
mm/klruscand.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 119 insertions(+)
create mode 100644 mm/klruscand.c
diff --git a/mm/Kconfig b/mm/Kconfig
index b5e84cb50253..84ec9a9aca13 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1383,6 +1383,14 @@ config PGHOT
by various sources. Asynchronous promotion is done by per-node
kernel threads.
+config KLRUSCAND
+ bool "Kernel lower tier access scan daemon"
+ default n
+ depends on PGHOT && LRU_GEN_WALKS_MMU
+ help
+ Scan for accesses from lower tiers by invoking MGLRU to perform
+ page table walks.
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index a6fac171c36e..1c0c79fec106 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -147,3 +147,4 @@ obj-$(CONFIG_EXECMEM) += execmem.o
obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
obj-$(CONFIG_PT_RECLAIM) += pt_reclaim.o
obj-$(CONFIG_PGHOT) += pghot.o
+obj-$(CONFIG_KLRUSCAND) += klruscand.o
diff --git a/mm/klruscand.c b/mm/klruscand.c
new file mode 100644
index 000000000000..13a41b38d67d
--- /dev/null
+++ b/mm/klruscand.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/memcontrol.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/memory-tiers.h>
+#include <linux/pghot.h>
+
+#include "internal.h"
+
+#define KLRUSCAND_INTERVAL 500
+#define BATCH_SIZE (2 << 16)
+
+static struct task_struct *scan_thread;
+static unsigned long pfn_batch[BATCH_SIZE];
+static int batch_index;
+
+static void flush_cb(void)
+{
+ int i;
+
+ for (i = 0; i < batch_index; i++) {
+ unsigned long pfn = pfn_batch[i];
+
+ pghot_record_access(pfn, NUMA_NO_NODE, PGHOT_PGTABLE_SCAN, jiffies);
+
+ if (i % 16 == 0)
+ cond_resched();
+ }
+ batch_index = 0;
+}
+
+static bool accessed_cb(unsigned long pfn)
+{
+ WARN_ON_ONCE(batch_index == BATCH_SIZE);
+
+ if (batch_index < BATCH_SIZE)
+ pfn_batch[batch_index++] = pfn;
+
+ return batch_index == BATCH_SIZE;
+}
+
+static int klruscand_run(void *unused)
+{
+ struct lru_gen_mm_walk *walk;
+
+ walk = kzalloc(sizeof(*walk),
+ __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (!walk)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ unsigned long next_wake_time;
+ long sleep_time;
+ struct mem_cgroup *memcg;
+ int flags;
+ int nid;
+
+ next_wake_time = jiffies + msecs_to_jiffies(KLRUSCAND_INTERVAL);
+
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct reclaim_state rs = { 0 };
+
+ if (node_is_toptier(nid))
+ continue;
+
+ rs.mm_walk = walk;
+ set_task_reclaim_state(current, &rs);
+ flags = memalloc_noreclaim_save();
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ struct lruvec *lruvec =
+ mem_cgroup_lruvec(memcg, pgdat);
+ unsigned long max_seq =
+ READ_ONCE((lruvec)->lrugen.max_seq);
+
+ lru_gen_scan_lruvec(lruvec, max_seq, accessed_cb, flush_cb);
+ cond_resched();
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ memalloc_noreclaim_restore(flags);
+ set_task_reclaim_state(current, NULL);
+ memset(walk, 0, sizeof(*walk));
+ }
+
+ sleep_time = next_wake_time - jiffies;
+ if (sleep_time > 0 && sleep_time != MAX_SCHEDULE_TIMEOUT)
+ schedule_timeout_idle(sleep_time);
+ }
+ kfree(walk);
+ return 0;
+}
+
+static int __init klruscand_init(void)
+{
+ struct task_struct *task;
+
+ task = kthread_run(klruscand_run, NULL, "klruscand");
+
+ if (IS_ERR(task)) {
+ pr_err("Failed to create klruscand kthread\n");
+ return PTR_ERR(task);
+ }
+
+ scan_thread = task;
+ return 0;
+}
+module_init(klruscand_init);
--
2.34.1
Powered by blists - more mailing lists