[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <77c64e29b70bad6ca303e0e591624f9cdf2a750b.1761658311.git.zhengqi.arch@bytedance.com>
Date: Tue, 28 Oct 2025 21:58:35 +0800
From: Qi Zheng <qi.zheng@...ux.dev>
To: hannes@...xchg.org,
	hughd@...gle.com,
	mhocko@...e.com,
	roman.gushchin@...ux.dev,
	shakeel.butt@...ux.dev,
	muchun.song@...ux.dev,
	david@...hat.com,
	lorenzo.stoakes@...cle.com,
	ziy@...dia.com,
	harry.yoo@...cle.com,
	imran.f.khan@...cle.com,
	kamalesh.babulal@...cle.com,
	axelrasmussen@...gle.com,
	yuanchu@...gle.com,
	weixugc@...gle.com,
	akpm@...ux-foundation.org
Cc: linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	cgroups@...r.kernel.org,
	Qi Zheng <zhengqi.arch@...edance.com>
Subject: [PATCH v1 22/26] mm: vmscan: prepare for reparenting traditional LRU folios
From: Qi Zheng <zhengqi.arch@...edance.com>
To reslove the dying memcg issue, we need to reparent LRU folios of child
memcg to its parent memcg. For traditional LRU list, each lruvec of every
memcg comprises four LRU lists. Due to the symmetry of the LRU lists, it
is feasible to transfer the LRU lists from a memcg to its parent memcg
during the reparenting process.
This commit implements the specific function, which will be used during
the reparenting process.
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
---
 include/linux/mmzone.h |  4 ++++
 mm/vmscan.c            | 39 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 43 insertions(+)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4398e027f450e..0d8776e5b6747 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -366,6 +366,10 @@ enum lruvec_flags {
 	LRUVEC_NODE_CONGESTED,
 };
 
+#ifdef CONFIG_MEMCG
+void lru_reparent_memcg(struct mem_cgroup *src, struct mem_cgroup *dst);
+#endif /* CONFIG_MEMCG */
+
 #endif /* !__GENERATING_BOUNDS_H */
 
 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 676e6270e5b45..7aa8e1472d10d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2649,6 +2649,45 @@ static bool can_age_anon_pages(struct lruvec *lruvec,
 			  lruvec_memcg(lruvec));
 }
 
+
+#ifdef CONFIG_MEMCG
+static void lruvec_reparent_lru(struct lruvec *src, struct lruvec *dst,
+				enum lru_list lru)
+{
+	int zid;
+	struct mem_cgroup_per_node *mz_src, *mz_dst;
+
+	mz_src = container_of(src, struct mem_cgroup_per_node, lruvec);
+	mz_dst = container_of(dst, struct mem_cgroup_per_node, lruvec);
+
+	if (lru != LRU_UNEVICTABLE)
+		list_splice_tail_init(&src->lists[lru], &dst->lists[lru]);
+
+	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+		mz_dst->lru_zone_size[zid][lru] += mz_src->lru_zone_size[zid][lru];
+		mz_src->lru_zone_size[zid][lru] = 0;
+	}
+}
+
+void lru_reparent_memcg(struct mem_cgroup *src, struct mem_cgroup *dst)
+{
+	int nid;
+
+	for_each_node(nid) {
+		enum lru_list lru;
+		struct lruvec *src_lruvec, *dst_lruvec;
+
+		src_lruvec = mem_cgroup_lruvec(src, NODE_DATA(nid));
+		dst_lruvec = mem_cgroup_lruvec(dst, NODE_DATA(nid));
+		dst_lruvec->anon_cost += src_lruvec->anon_cost;
+		dst_lruvec->file_cost += src_lruvec->file_cost;
+
+		for_each_lru(lru)
+			lruvec_reparent_lru(src_lruvec, dst_lruvec, lru);
+	}
+}
+#endif
+
 #ifdef CONFIG_LRU_GEN
 
 #ifdef CONFIG_LRU_GEN_ENABLED
-- 
2.20.1
Powered by blists - more mailing lists
 
