[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230830175335.1536008-3-yosryahmed@google.com>
Date: Wed, 30 Aug 2023 17:53:33 +0000
From: Yosry Ahmed <yosryahmed@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeelb@...gle.com>,
Muchun Song <muchun.song@...ux.dev>,
Ivan Babrou <ivan@...udflare.com>, Tejun Heo <tj@...nel.org>,
"Michal Koutný" <mkoutny@...e.com>,
Waiman Long <longman@...hat.com>, linux-mm@...ck.org,
cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
Yosry Ahmed <yosryahmed@...gle.com>
Subject: [PATCH v3 2/4] mm: memcg: add a helper for non-unified stats flushing
Some contexts flush memcg stats outside of unified flushing, directly
using cgroup_rstat_flush(). Add a helper for non-unified flushing, a
counterpart for do_unified_stats_flush(), and use it in those contexts,
as well as in do_unified_stats_flush() itself.
This abstracts the rstat API and makes it easy to introduce
modifications to either unified or non-unified flushing functions
without changing callers.
No functional change intended.
Signed-off-by: Yosry Ahmed <yosryahmed@...gle.com>
---
mm/memcontrol.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2d0ec828a1c4..8c046feeaae7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -639,6 +639,17 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
}
}
+/*
+ * do_stats_flush - do a flush of the memory cgroup statistics
+ * @memcg: memory cgroup to flush
+ *
+ * Only flushes the subtree of @memcg, does not skip under any conditions.
+ */
+static void do_stats_flush(struct mem_cgroup *memcg)
+{
+ cgroup_rstat_flush(memcg->css.cgroup);
+}
+
/*
* do_unified_stats_flush - do a unified flush of memory cgroup statistics
*
@@ -656,7 +667,7 @@ static void do_unified_stats_flush(void)
WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
- cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
+ do_stats_flush(root_mem_cgroup);
atomic_set(&stats_flush_threshold, 0);
atomic_set(&stats_unified_flush_ongoing, 0);
@@ -7790,7 +7801,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
break;
}
- cgroup_rstat_flush(memcg->css.cgroup);
+ do_stats_flush(memcg);
pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
if (pages < max)
continue;
@@ -7855,8 +7866,10 @@ void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
static u64 zswap_current_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- cgroup_rstat_flush(css->cgroup);
- return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+ do_stats_flush(memcg);
+ return memcg_page_state(memcg, MEMCG_ZSWAP_B);
}
static int zswap_max_show(struct seq_file *m, void *v)
--
2.42.0.rc2.253.gd59a3bf2b4-goog
Powered by blists - more mailing lists