[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190307230033.31975-5-guro@fb.com>
Date: Thu, 7 Mar 2019 15:00:32 -0800
From: Roman Gushchin <guroan@...il.com>
To: linux-mm@...ck.org, kernel-team@...com
Cc: linux-kernel@...r.kernel.org, Tejun Heo <tj@...nel.org>,
Rik van Riel <riel@...riel.com>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>, Roman Gushchin <guro@...com>
Subject: [PATCH 4/5] mm: release per-node memcg percpu data prematurely
Similar to memcg-level statistics, per-node data isn't expected
to be hot after cgroup removal. Switching over to atomics and
prematurely releasing percpu data helps to reduce the memory
footprint of dying cgroups.
Signed-off-by: Roman Gushchin <guro@...com>
---
include/linux/memcontrol.h | 1 +
mm/memcontrol.c | 24 +++++++++++++++++++++++-
2 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 569337514230..f296693d102b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -127,6 +127,7 @@ struct mem_cgroup_per_node {
struct lruvec lruvec;
struct lruvec_stat __rcu /* __percpu */ *lruvec_stat_cpu;
+ struct lruvec_stat __percpu *lruvec_stat_cpu_offlined;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8c55954e6f23..18e863890392 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4459,7 +4459,7 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
if (!pn)
return;
- free_percpu(pn->lruvec_stat_cpu);
+ WARN_ON_ONCE(pn->lruvec_stat_cpu != NULL);
kfree(pn);
}
@@ -4615,7 +4615,17 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
static void mem_cgroup_free_percpu(struct rcu_head *rcu)
{
struct mem_cgroup *memcg = container_of(rcu, struct mem_cgroup, rcu);
+ int node;
+
+ for_each_node(node) {
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+ if (!pn)
+ continue;
+
+ free_percpu(pn->lruvec_stat_cpu_offlined);
+ WARN_ON_ONCE(pn->lruvec_stat_cpu != NULL);
+ }
free_percpu(memcg->vmstats_percpu_offlined);
WARN_ON_ONCE(memcg->vmstats_percpu);
@@ -4624,6 +4634,18 @@ static void mem_cgroup_free_percpu(struct rcu_head *rcu)
static void mem_cgroup_offline_percpu(struct mem_cgroup *memcg)
{
+ int node;
+
+ for_each_node(node) {
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+
+ if (!pn)
+ continue;
+
+ pn->lruvec_stat_cpu_offlined = (struct lruvec_stat __percpu *)
+ rcu_dereference(pn->lruvec_stat_cpu);
+ rcu_assign_pointer(pn->lruvec_stat_cpu, NULL);
+ }
memcg->vmstats_percpu_offlined = (struct memcg_vmstats_percpu __percpu*)
rcu_dereference(memcg->vmstats_percpu);
rcu_assign_pointer(memcg->vmstats_percpu, NULL);
--
2.20.1
Powered by blists - more mailing lists