[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250324220301.1273038-3-kinseyho@google.com>
Date: Mon, 24 Mar 2025 15:03:01 -0700
From: Kinsey Ho <kinseyho@...gle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: yuanchu@...gle.com, AneeshKumar.KizhakeVeetil@....com, Hasan.Maruf@....com,
Jonathan.Cameron@...wei.com, Michael.Day@....com, akpm@...ux-foundation.org,
dave.hansen@...el.com, david@...hat.com, feng.tang@...el.com,
gourry@...rry.net, hannes@...xchg.org, honggyu.kim@...com, hughd@...gle.com,
jhubbard@...dia.com, k.shutemov@...il.com, kbusch@...a.com,
kmanaouil.dev@...il.com, leesuyeon0506@...il.com, leillc@...gle.com,
liam.howlett@...cle.com, mgorman@...hsingularity.net, mingo@...hat.com,
nadav.amit@...il.com, nphamcs@...il.com, peterz@...radead.org,
raghavendra.kt@....com, riel@...riel.com, rientjes@...gle.com,
rppt@...nel.org, shivankg@....com, shy828301@...il.com, sj@...nel.org,
vbabka@...e.cz, weixugc@...gle.com, willy@...radead.org,
ying.huang@...ux.alibaba.com, ziy@...dia.com, dave@...olabs.net,
hyeonggon.yoo@...com, bharata@....com, Kinsey Ho <kinseyho@...gle.com>
Subject: [RFC PATCH v1 2/2] mm: klruscand: use mglru scanning for page promotion
Introduce a new kernel daemon, klruscand, that periodically invokes the
MGLRU page table walk. It leverages the new callbacks to gather access
information and forwards it to the kpromoted daemon for promotion
decisions.
This benefits from reusing the existing MGLRU page table walk
infrastructure, which is optimized with features such as hierarchical
scanning and bloom filters to reduce CPU overhead.
As an additional optimization to be added in the future, we can tune
the scan intervals for each memcg.
Signed-off-by: Kinsey Ho <kinseyho@...gle.com>
Signed-off-by: Yuanchu Xie <yuanchu@...gle.com>
---
mm/Kconfig | 8 ++++
mm/Makefile | 1 +
mm/klruscand.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 127 insertions(+)
create mode 100644 mm/klruscand.c
diff --git a/mm/Kconfig b/mm/Kconfig
index ceaa462a0ce6..ed0fa8f2551e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1366,6 +1366,14 @@ config KPROMOTED
Promote hot pages from lower tier to top tier by using the
memory access information provided by various sources.
+config KLRUSCAND
+ bool "Kernel lower tier access scan daemon"
+ default y
+ depends on KPROMOTED && LRU_GEN_WALKS_MMU
+ help
+ Scan for accesses from lower tiers by invoking MGLRU to perform
+ page table walks.
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index bf4f5f18f1f9..eb7b76db3b33 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -148,3 +148,4 @@ obj-$(CONFIG_EXECMEM) += execmem.o
obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
obj-$(CONFIG_PT_RECLAIM) += pt_reclaim.o
obj-$(CONFIG_KPROMOTED) += kpromoted.o
+obj-$(CONFIG_KLRUSCAND) += klruscand.o
diff --git a/mm/klruscand.c b/mm/klruscand.c
new file mode 100644
index 000000000000..a53d43c60155
--- /dev/null
+++ b/mm/klruscand.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/memcontrol.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/migrate.h>
+#include <linux/mm_inline.h>
+#include <linux/slab.h>
+#include <linux/sched/clock.h>
+#include <linux/memory-tiers.h>
+#include <linux/sched/mm.h>
+#include <linux/sched.h>
+#include <linux/kpromoted.h>
+
+#include "internal.h"
+
+#define KLRUSCAND_INTERVAL_MS 4000
+#define BATCH_SIZE (2 << 16)
+
+static struct task_struct *scan_thread;
+static pfn_t pfn_batch[BATCH_SIZE];
+static int batch_index;
+
+static void flush_cb(void)
+{
+ int i = 0;
+
+ for (; i < batch_index; i++) {
+ u64 pfn = pfn_batch[i].val;
+
+ kpromoted_record_access((unsigned long)pfn, NUMA_NO_NODE,
+ KPROMOTED_PGTABLE_SCAN, jiffies);
+
+ if (i % 16 == 0)
+ cond_resched();
+ }
+ batch_index = 0;
+}
+
+static int accessed_cb(pfn_t pfn)
+{
+ if (batch_index >= BATCH_SIZE)
+ return -EAGAIN;
+
+ pfn_batch[batch_index++] = pfn;
+ return 0;
+}
+
+static int klruscand_run(void *unused)
+{
+ struct lru_gen_mm_walk *walk;
+
+ walk = kzalloc(sizeof(*walk),
+ __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (!walk)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ unsigned long next_wake_time;
+ long sleep_time;
+ struct mem_cgroup *memcg;
+ int flags;
+ int nid;
+
+ next_wake_time = jiffies + msecs_to_jiffies(KLRUSCAND_INTERVAL_MS);
+
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct reclaim_state rs = { 0 };
+
+ if (node_is_toptier(nid))
+ continue;
+
+ rs.mm_walk = walk;
+ set_task_reclaim_state(current, &rs);
+ flags = memalloc_noreclaim_save();
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ struct lruvec *lruvec =
+ mem_cgroup_lruvec(memcg, pgdat);
+ unsigned long max_seq =
+ READ_ONCE((lruvec)->lrugen.max_seq);
+
+ lru_gen_scan_lruvec(lruvec, max_seq,
+ accessed_cb, flush_cb);
+ cond_resched();
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ memalloc_noreclaim_restore(flags);
+ set_task_reclaim_state(current, NULL);
+ memset(walk, 0, sizeof(*walk));
+ }
+
+ sleep_time = next_wake_time - jiffies;
+ if (sleep_time > 0 && sleep_time != MAX_SCHEDULE_TIMEOUT)
+ schedule_timeout_idle(sleep_time);
+ }
+ kfree(walk);
+ return 0;
+}
+
+static int __init klruscand_init(void)
+{
+ struct task_struct *task;
+
+ task = kthread_run(klruscand_run, NULL, "klruscand");
+
+ if (IS_ERR(task)) {
+ pr_err("Failed to create klruscand kthread\n");
+ return PTR_ERR(task);
+ }
+
+ scan_thread = task;
+ return 0;
+}
+module_init(klruscand_init);
--
2.49.0.395.g12beb8f557-goog
Powered by blists - more mailing lists