[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250814134826.154003-8-bharata@amd.com>
Date: Thu, 14 Aug 2025 19:18:26 +0530
From: Bharata B Rao <bharata@....com>
To: <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
CC: <Jonathan.Cameron@...wei.com>, <dave.hansen@...el.com>,
<gourry@...rry.net>, <hannes@...xchg.org>, <mgorman@...hsingularity.net>,
<mingo@...hat.com>, <peterz@...radead.org>, <raghavendra.kt@....com>,
<riel@...riel.com>, <rientjes@...gle.com>, <sj@...nel.org>,
<weixugc@...gle.com>, <willy@...radead.org>, <ying.huang@...ux.alibaba.com>,
<ziy@...dia.com>, <dave@...olabs.net>, <nifan.cxl@...il.com>,
<xuezhengchu@...wei.com>, <yiannis@...corp.com>, <akpm@...ux-foundation.org>,
<david@...hat.com>, <byungchul@...com>, <kinseyho@...gle.com>,
<joshua.hahnjy@...il.com>, <yuanchu@...gle.com>, <balbirs@...dia.com>,
Bharata B Rao <bharata@....com>
Subject: [RFC PATCH v1 7/7] mm: klruscand: use mglru scanning for page promotion
From: Kinsey Ho <kinseyho@...gle.com>
Introduce a new kernel daemon, klruscand, that periodically invokes the
MGLRU page table walk. It leverages the new callbacks to gather access
information and forwards it to the pghot hot page tracking sub-system
for promotion decisions.
This benefits from reusing the existing MGLRU page table walk
infrastructure, which is optimized with features such as hierarchical
scanning and bloom filters to reduce CPU overhead.
As an additional optimization to be added in the future, we can tune
the scan intervals for each memcg.
Signed-off-by: Kinsey Ho <kinseyho@...gle.com>
Signed-off-by: Yuanchu Xie <yuanchu@...gle.com>
Signed-off-by: Bharata B Rao <bharata@....com>
[Reduced the scan interval to 100ms, pfn_t to unsigned long]
---
mm/Kconfig | 8 ++++
mm/Makefile | 1 +
mm/klruscand.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 127 insertions(+)
create mode 100644 mm/klruscand.c
diff --git a/mm/Kconfig b/mm/Kconfig
index 8b236eb874cf..6d53c1208729 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1393,6 +1393,14 @@ config PGHOT
by various sources. Asynchronous promotion is done by per-node
kernel threads.
+config KLRUSCAND
+ bool "Kernel lower tier access scan daemon"
+ default y
+ depends on PGHOT && LRU_GEN_WALKS_MMU
+ help
+ Scan for accesses from lower tiers by invoking MGLRU to perform
+ page table walks.
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index 8799bd0c68ed..1d39ef55f3e5 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -148,3 +148,4 @@ obj-$(CONFIG_EXECMEM) += execmem.o
obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
obj-$(CONFIG_PT_RECLAIM) += pt_reclaim.o
obj-$(CONFIG_PGHOT) += kpromoted.o
+obj-$(CONFIG_KLRUSCAND) += klruscand.o
diff --git a/mm/klruscand.c b/mm/klruscand.c
new file mode 100644
index 000000000000..1a51aab29bd9
--- /dev/null
+++ b/mm/klruscand.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/memcontrol.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
+#include <linux/slab.h>
+#include <linux/sched/clock.h>
+#include <linux/memory-tiers.h>
+#include <linux/sched/mm.h>
+#include <linux/sched.h>
+#include <linux/pghot.h>
+
+#include "internal.h"
+
+#define KLRUSCAND_INTERVAL_MS 100
+#define BATCH_SIZE (2 << 16)
+
+static struct task_struct *scan_thread;
+static unsigned long pfn_batch[BATCH_SIZE];
+static int batch_index;
+
+static void flush_cb(void)
+{
+ int i = 0;
+
+ for (; i < batch_index; i++) {
+ u64 pfn = pfn_batch[i];
+
+ pghot_record_access((unsigned long)pfn, NUMA_NO_NODE,
+ PGHOT_PGTABLE_SCAN, jiffies);
+
+ if (i % 16 == 0)
+ cond_resched();
+ }
+ batch_index = 0;
+}
+
+static int accessed_cb(unsigned long pfn)
+{
+ if (batch_index >= BATCH_SIZE)
+ return -EAGAIN;
+
+ pfn_batch[batch_index++] = pfn;
+ return 0;
+}
+
+static int klruscand_run(void *unused)
+{
+ struct lru_gen_mm_walk *walk;
+
+ walk = kzalloc(sizeof(*walk),
+ __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (!walk)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ unsigned long next_wake_time;
+ long sleep_time;
+ struct mem_cgroup *memcg;
+ int flags;
+ int nid;
+
+ next_wake_time = jiffies + msecs_to_jiffies(KLRUSCAND_INTERVAL_MS);
+
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct reclaim_state rs = { 0 };
+
+ if (node_is_toptier(nid))
+ continue;
+
+ rs.mm_walk = walk;
+ set_task_reclaim_state(current, &rs);
+ flags = memalloc_noreclaim_save();
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ struct lruvec *lruvec =
+ mem_cgroup_lruvec(memcg, pgdat);
+ unsigned long max_seq =
+ READ_ONCE((lruvec)->lrugen.max_seq);
+
+ lru_gen_scan_lruvec(lruvec, max_seq,
+ accessed_cb, flush_cb);
+ cond_resched();
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ memalloc_noreclaim_restore(flags);
+ set_task_reclaim_state(current, NULL);
+ memset(walk, 0, sizeof(*walk));
+ }
+
+ sleep_time = next_wake_time - jiffies;
+ if (sleep_time > 0 && sleep_time != MAX_SCHEDULE_TIMEOUT)
+ schedule_timeout_idle(sleep_time);
+ }
+ kfree(walk);
+ return 0;
+}
+
+static int __init klruscand_init(void)
+{
+ struct task_struct *task;
+
+ task = kthread_run(klruscand_run, NULL, "klruscand");
+
+ if (IS_ERR(task)) {
+ pr_err("Failed to create klruscand kthread\n");
+ return PTR_ERR(task);
+ }
+
+ scan_thread = task;
+ return 0;
+}
+module_init(klruscand_init);
--
2.34.1
Powered by blists - more mailing lists