[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250914180351288rcLuZnPAMUej48nuTc7KV@zte.com.cn>
Date: Sun, 14 Sep 2025 18:03:51 +0800 (CST)
From: <xu.xin16@....com.cn>
To: <akpm@...ux-foundation.org>
Cc: <shakeel.butt@...ux.dev>, <hannes@...xchg.org>, <mhocko@...nel.org>,
<roman.gushchin@...ux.dev>, <david@...hat.com>,
<chengming.zhou@...ux.dev>, <muchun.song@...ux.dev>,
<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
<cgroups@...r.kernel.org>, <xu.xin16@....com.cn>
Subject: [PATCH v2 1/5] memcg: add per-memcg ksm_rmap_items stat
From: xu xin <xu.xin16@....com.cn>
With the enablement of container-level KSM (e.g., via prctl), there is
a growing demand for container-level observability of KSM behavior.
The value of "ksm_rmap_items" indicates the total allocated ksm
rmap_items of this memcg, which could be used to determine how
unbeneficial the ksm-policy (like madvise), they are using brings,
since the bigger the ratio of ksm_rmap_items over ksm_merging_pages,
the more unbeneficial the ksm bring.
Add the counter in the existing memory.stat without adding a new interface.
We traverse all processes of a memcg and summing the processes'
ksm_rmap_items counters instead of adding enum item in memcg_stat_item
or node_stat_item and updating the corresponding enum counter when
ksmd manipulate pages.
Finally, we can look up ksm_rmap_items of per-memcg simply by:
cat /sys/fs/cgroup/memory.stat | grep ksm_rmap_items
Signed-off-by: xu xin <xu.xin16@....com.cn>
---
include/linux/ksm.h | 1 +
mm/ksm.c | 38 ++++++++++++++++++++++++++++++++++++++
mm/memcontrol.c | 5 +++++
3 files changed, 44 insertions(+)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 22e67ca7cba3..a41ed503f152 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -94,6 +94,7 @@ void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
bool ksm_process_mergeable(struct mm_struct *mm);
+void memcg_stat_ksm_show(struct mem_cgroup *memcg, struct seq_buf *s);
#else /* !CONFIG_KSM */
diff --git a/mm/ksm.c b/mm/ksm.c
index 2ef29802a49b..b533f0edaf96 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3308,6 +3308,44 @@ long ksm_process_profit(struct mm_struct *mm)
}
#endif /* CONFIG_PROC_FS */
+#ifdef CONFIG_MEMCG
+struct memcg_ksm_stat {
+ unsigned long ksm_rmap_items;
+};
+
+static int evaluate_memcg_ksm_stat(struct task_struct *task, void *arg)
+{
+ struct mm_struct *mm;
+ struct memcg_ksm_stat *ksm_stat = arg;
+
+ mm = get_task_mm(task);
+ if (mm) {
+ ksm_stat->ksm_rmap_items += mm->ksm_rmap_items;
+ mmput(mm);
+ }
+
+ return 0;
+}
+
+/* Show the ksm statistic count at memory.stat under cgroup mountpoint */
+void memcg_stat_ksm_show(struct mem_cgroup *memcg, struct seq_buf *s)
+{
+ struct memcg_ksm_stat ksm_stat;
+
+ if (mem_cgroup_is_root(memcg)) {
+ /* Just use the global counters when root memcg */
+ ksm_stat.ksm_rmap_items = ksm_rmap_items;
+ } else {
+ /* Initialization */
+ ksm_stat.ksm_rmap_items = 0;
+ /* Summing all processes'ksm statistic items */
+ mem_cgroup_scan_tasks(memcg, evaluate_memcg_ksm_stat, &ksm_stat);
+ }
+ /* Print memcg ksm statistic items */
+ seq_buf_printf(s, "ksm_rmap_items %lu\n", ksm_stat.ksm_rmap_items);
+}
+#endif
+
#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 257d2c76b730..9595b132c6c3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -63,6 +63,7 @@
#include <linux/seq_buf.h>
#include <linux/sched/isolation.h>
#include <linux/kmemleak.h>
+#include <linux/ksm.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -1492,6 +1493,10 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
}
}
+#ifdef CONFIG_KSM
+ memcg_stat_ksm_show(memcg, s);
+#endif
+
/* Accumulated memory events */
seq_buf_printf(s, "pgscan %lu\n",
memcg_events(memcg, PGSCAN_KSWAPD) +
--
2.25.1
Powered by blists - more mailing lists