[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250914180750448qMRz3iTon78DoExPyZusD@zte.com.cn>
Date: Sun, 14 Sep 2025 18:07:50 +0800 (CST)
From: <xu.xin16@....com.cn>
To: <akpm@...ux-foundation.org>
Cc: <shakeel.butt@...ux.dev>, <hannes@...xchg.org>, <mhocko@...nel.org>,
<roman.gushchin@...ux.dev>, <david@...hat.com>,
<chengming.zhou@...ux.dev>, <muchun.song@...ux.dev>,
<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
<cgroups@...r.kernel.org>, <xu.xin16@....com.cn>
Subject: [PATCH v2 4/5] memcg: add per-memcg ksm_profit
From: xu xin <xu.xin16@....com.cn>
Users can obtain ksm_profit of a cgroup just by:
'cat /sys/fs/cgroup/memory.stat | grep ksm_profit
Signed-off-by: xu xin <xu.xin16@....com.cn>
---
mm/ksm.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 4cc47ad1e887..c01567a3d5ca 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -3308,11 +3308,18 @@ long ksm_process_profit(struct mm_struct *mm)
}
#endif /* CONFIG_PROC_FS */
+static inline long ksm_general_profit(void)
+{
+ return (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
+ ksm_rmap_items * sizeof(struct ksm_rmap_item);
+}
+
#ifdef CONFIG_MEMCG
struct memcg_ksm_stat {
unsigned long ksm_rmap_items;
long ksm_zero_pages;
unsigned long ksm_merging_pages;
+ long ksm_profit;
};
static int evaluate_memcg_ksm_stat(struct task_struct *task, void *arg)
@@ -3325,6 +3332,7 @@ static int evaluate_memcg_ksm_stat(struct task_struct *task, void *arg)
ksm_stat->ksm_rmap_items += mm->ksm_rmap_items;
ksm_stat->ksm_zero_pages += mm_ksm_zero_pages(mm);
ksm_stat->ksm_merging_pages += mm->ksm_merging_pages;
+ ksm_stat->ksm_profit += ksm_process_profit(mm);
mmput(mm);
}
@@ -3342,11 +3350,13 @@ void memcg_stat_ksm_show(struct mem_cgroup *memcg, struct seq_buf *s)
ksm_stat.ksm_zero_pages = atomic_long_read(&ksm_zero_pages);
ksm_stat.ksm_merging_pages = ksm_pages_shared +
ksm_pages_sharing;
+ ksm_stat.ksm_profit = ksm_general_profit();
} else {
/* Initialization */
ksm_stat.ksm_rmap_items = 0;
ksm_stat.ksm_zero_pages = 0;
ksm_stat.ksm_merging_pages = 0;
+ ksm_stat.ksm_profit = 0;
/* Summing all processes'ksm statistic items */
mem_cgroup_scan_tasks(memcg, evaluate_memcg_ksm_stat, &ksm_stat);
}
@@ -3354,6 +3364,7 @@ void memcg_stat_ksm_show(struct mem_cgroup *memcg, struct seq_buf *s)
seq_buf_printf(s, "ksm_rmap_items %lu\n", ksm_stat.ksm_rmap_items);
seq_buf_printf(s, "ksm_zero_pages %lu\n", ksm_stat.ksm_zero_pages);
seq_buf_printf(s, "ksm_merging_pages %lu\n", ksm_stat.ksm_merging_pages);
+ seq_buf_printf(s, "ksm_profit %lu\n", ksm_stat.ksm_profit);
}
#endif
@@ -3648,12 +3659,7 @@ KSM_ATTR_RO(ksm_zero_pages);
static ssize_t general_profit_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- long general_profit;
-
- general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
- ksm_rmap_items * sizeof(struct ksm_rmap_item);
-
- return sysfs_emit(buf, "%ld\n", general_profit);
+ return sysfs_emit(buf, "%ld\n", ksm_general_profit());
}
KSM_ATTR_RO(general_profit);
--
2.25.1
Powered by blists - more mailing lists