[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240702084423.1717904-4-link@vivo.com>
Date: Tue, 2 Jul 2024 16:44:06 +0800
From: Huan Yang <link@...o.com>
To: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Shakeel Butt <shakeel.butt@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
Andrew Morton <akpm@...ux-foundation.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
David Hildenbrand <david@...hat.com>,
Ryan Roberts <ryan.roberts@....com>,
Chris Li <chrisl@...nel.org>,
Dan Schatzberg <schatzberg.dan@...il.com>,
Huan Yang <link@...o.com>,
Kairui Song <kasong@...cent.com>,
cgroups@...r.kernel.org,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Christian Brauner <brauner@...nel.org>
Cc: opensource.kernel@...o.com
Subject: [RFC PATCH 3/4] mm: memcg: pmc: support reaper
If memcg enables pmc, it will cache some pages. However, if all
processes in memcg exit and there are some remaining pages in the cache,
those pages will not be used unless memcg is deleted.
To avoid this situation, a periodic reaping job has been added to each
memcg when pmc enabled, which will reclaim all the cached memory in the
memcg at regular intervals(default 5s).
User also can change reaper interval time like below:
echo keys=reaper_time=8000000 > memory.cache
This memcg will reaper cache each 8s(type unit is us)
Signed-off-by: Huan Yang <link@...o.com>
---
include/linux/mmzone.h | 6 ++++
mm/memcontrol.c | 77 ++++++++++++++++++++++++++++++++++++++----
mm/page_alloc.c | 1 +
3 files changed, 77 insertions(+), 7 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 773b89e214c9..b56dd462232b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -608,6 +608,7 @@ struct mem_cgroup_zone_cache {
struct list_head pages;
spinlock_t pages_lock;
atomic_t nr_pages;
+ atomic_t nr_reapered;
atomic_t nr_alloced;
};
@@ -616,6 +617,11 @@ struct mem_cgroup_per_node_cache {
struct mem_cgroup_zone_cache zone_cachep[MAX_NR_ZONES];
struct mem_cgroup *memcg;
+ /* cycle cache reclaim time unit, us, default 5s, 0 means disable reaper */
+#define DEFAULT_PMC_REAPER_TIME ((5 * 1000 * 1000))
+ unsigned int reaper_wait;
+ struct delayed_work reaper_work;
+
/* max number to hold page, unit page, default 100MB */
#define DEFAULT_PMC_HOLD_LIMIX ((100 << 20) >> PAGE_SHIFT)
unsigned int hold_limit;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9db5bbe63b34..ae6917de91cc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7101,6 +7101,39 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
return nbytes;
}
+/**
+ * This function use to reaper all cache pages by cycling scan.
+ * The scan interval time depends on @reaper_wait which can set by `keys` nest
+ * key.
+ * Default, each memcg which enabled cache will be reapered every 5s.
+ */
+static void pmc_reaper(struct work_struct *worker)
+{
+ struct mem_cgroup_per_node_cache *node_cachep = container_of(
+ to_delayed_work(worker), struct mem_cgroup_per_node_cache,
+ reaper_work);
+ struct mem_cgroup *memcg;
+ int num;
+
+ if (!READ_ONCE(node_cachep->reaper_wait))
+ return;
+
+ memcg = node_cachep->memcg;
+ rcu_read_lock();
+ if (!css_tryget(&memcg->css)) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ num = mem_cgroup_release_cache(node_cachep);
+
+ css_put(&memcg->css);
+
+ schedule_delayed_work(&node_cachep->reaper_work,
+ usecs_to_jiffies(node_cachep->reaper_wait));
+}
+
static int __enable_mem_cgroup_cache(struct mem_cgroup *memcg)
{
int nid, idx;
@@ -7141,8 +7174,13 @@ static int __enable_mem_cgroup_cache(struct mem_cgroup *memcg)
p->memcg = memcg;
p->hold_limit = DEFAULT_PMC_HOLD_LIMIX;
p->allow_watermark = DEFAULT_PMC_GAP_WATERMARK;
+ p->reaper_wait = DEFAULT_PMC_REAPER_TIME;
atomic_inc(&pmc_nr_enabled);
+
+ INIT_DELAYED_WORK(&p->reaper_work, pmc_reaper);
+ schedule_delayed_work(&p->reaper_work,
+ usecs_to_jiffies(p->reaper_wait));
}
if (static_branch_likely(&pmc_key))
@@ -7184,6 +7222,7 @@ static int __disable_mem_cgroup_cache(struct mem_cgroup *memcg)
p = nodeinfo->cachep;
+ cancel_delayed_work_sync(&p->reaper_work);
mem_cgroup_release_cache(p);
kfree(p);
@@ -7207,7 +7246,8 @@ static int mem_cgroup_cache_show(struct seq_file *m, void *v)
if (!READ_ONCE(memcg->cache_enabled))
return -EINVAL;
- seq_printf(m, "%4s %16s %16s\n", "NODE", "WATERMARK", "HOLD_LIMIT");
+ seq_printf(m, "%4s %16s %16s %16s\n", "NODE", "WATERMARK",
+ "HOLD_LIMIT", "REAPER_TIME");
for_each_online_node(nid) {
struct mem_cgroup_per_node *nodeinfo = memcg->nodeinfo[nid];
struct mem_cgroup_per_node_cache *p;
@@ -7216,13 +7256,15 @@ static int mem_cgroup_cache_show(struct seq_file *m, void *v)
if (!p)
continue;
- seq_printf(m, "%4d %14uKB %14uKB\n", nid,
+ seq_printf(m, "%4d %14uKB %14uKB %16u\n", nid,
(READ_ONCE(p->allow_watermark) << (PAGE_SHIFT - 10)),
- (READ_ONCE(p->hold_limit) << (PAGE_SHIFT - 10)));
+ (READ_ONCE(p->hold_limit) << (PAGE_SHIFT - 10)),
+ READ_ONCE(p->reaper_wait));
}
seq_puts(m, "===========\n");
- seq_printf(m, "%4s %16s %16s %16s\n", "NODE", "ZONE", "CACHE", "HIT");
+ seq_printf(m, "%4s %16s %16s %16s %16s\n", "NODE", "ZONE", "CACHE",
+ "REAPER", "HIT");
for_each_online_node(nid) {
struct mem_cgroup_per_node *nodeinfo = memcg->nodeinfo[nid];
@@ -7242,9 +7284,12 @@ static int mem_cgroup_cache_show(struct seq_file *m, void *v)
continue;
zc = &p->zone_cachep[idx];
- seq_printf(m, "%4d %16s %14dKB %14dKB\n", nid, z->name,
+ seq_printf(m, "%4d %16s %14dKB %14dKB %14dKB\n", nid,
+ z->name,
(atomic_read(&zc->nr_pages)
<< (PAGE_SHIFT - 10)),
+ (atomic_read(&zc->nr_reapered)
+ << (PAGE_SHIFT - 10)),
(atomic_read(&zc->nr_alloced)
<< (PAGE_SHIFT - 10)));
}
@@ -7257,6 +7302,7 @@ static int mem_cgroup_cache_show(struct seq_file *m, void *v)
enum {
OPT_KEY_NID,
OPT_KEY_WATERMARK,
+ OPT_KEY_REAPER_TIME,
OPT_KEY_HOLD_LIMIT,
OPT_KEY_ERR,
NR_PMC_KEY_OPTS = OPT_KEY_ERR
@@ -7265,6 +7311,7 @@ enum {
static const match_table_t fc_tokens = {
{ OPT_KEY_NID, "nid=%d" },
{ OPT_KEY_WATERMARK, "watermark=%u" },
+ { OPT_KEY_REAPER_TIME, "reaper_time=%u" },
{ OPT_KEY_HOLD_LIMIT, "limit=%u" },
{ OPT_KEY_ERR, NULL}
};
@@ -7282,6 +7329,12 @@ __apply_status_for_mem_cgroup_cache(struct mem_cgroup_per_node_cache *p,
STATUS_UNSET_DEFAULT_VALUE)
p->allow_watermark = opts[OPT_KEY_WATERMARK];
break;
+ case OPT_KEY_REAPER_TIME:
+ if (opts[OPT_KEY_REAPER_TIME] !=
+ STATUS_UNSET_DEFAULT_VALUE)
+ WRITE_ONCE(p->reaper_wait,
+ opts[OPT_KEY_REAPER_TIME]);
+ break;
case OPT_KEY_HOLD_LIMIT:
if (opts[OPT_KEY_HOLD_LIMIT] !=
STATUS_UNSET_DEFAULT_VALUE)
@@ -7319,7 +7372,7 @@ mem_cgroup_apply_cache_status(struct mem_cgroup *memcg,
}
/**
- * Support nid=x,watermark=bytes,limit=bytes args
+ * Support nid=x,watermark=bytes,limit=bytes,reaper=us args
*/
static int __mem_cgroup_cache_control_key(char *buf,
struct mem_cgroup *memcg)
@@ -7361,6 +7414,14 @@ static int __mem_cgroup_cache_control_key(char *buf,
return -EINVAL;
opts[OPT_KEY_WATERMARK] = v;
break;
+ case OPT_KEY_REAPER_TIME:
+ if (match_uint(&args[0], &v))
+ return -EINVAL;
+#define MAX_REAPER_TIME ((10 * 1000 * 1000))
+ if (v > MAX_REAPER_TIME)
+ return -EINVAL;
+ opts[OPT_KEY_REAPER_TIME] = v;
+ break;
case OPT_KEY_HOLD_LIMIT:
if (match_uint(&args[0], &v))
return -EINVAL;
@@ -7402,7 +7463,9 @@ static const match_table_t ctrl_tokens = {
* 1. nid=x, if input, will only change target NODE's cache status. Else, all.
* 2. watermark=bytes, change cache hold behavior, only zone free pages above
* high watermark+watermark, can hold.
- * 3. limit=bytes, change max pages can cache. Max can change to 500MB
+ * 3. reaper_time=us, change reaper time, default is 10s. Set 0 can disable,
+ * max can change to 10s
+ * 4. limit=bytes, change max pages can cache. Max can change to 500MB
* Enable and keys can both input, split by space, so can set args after enable,
* if cache not enable, can't set keys.
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54c4d00c2506..1fe02f4f3b33 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1310,6 +1310,7 @@ int mem_cgroup_release_cache(struct mem_cgroup_per_node_cache *nodep)
}
num += i;
+ atomic_add(i, &zc->nr_reapered);
atomic_sub(i, &zc->nr_pages);
}
--
2.45.2
Powered by blists - more mailing lists