>From c40bbd69cbb655b6389c2398ce89abb06e64910d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Wed, 4 Apr 2012 21:08:38 +0400 Subject: [PATCH] decrement static keys on real destroy time We call the destroy function when a cgroup starts to be removed, such as by a rmdir event. However, because of our reference counters, some objects are still inflight. Right now, we are decrementing the static_keys at destroy() time, meaning that if we get rid of the last static_key reference, some objects will still have charges, but the code to properly uncharge them won't be run. This becomes a problem specially if it is ever enabled again, because now new charges will be added to the staled charges making keeping it pretty much impossible. Signed-off-by: Glauber Costa --- include/net/tcp_memcontrol.h | 2 ++ mm/memcontrol.c | 15 +++++++++++++++ net/ipv4/tcp_memcontrol.c | 10 ++++------ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h index 7df18bc..5a2b915 100644 --- a/include/net/tcp_memcontrol.h +++ b/include/net/tcp_memcontrol.h @@ -9,6 +9,8 @@ struct tcp_memcontrol { /* those two are read-mostly, leave them at the end */ long tcp_prot_mem[3]; int tcp_memory_pressure; + /* if this cgroup was ever limited, having static_keys activated */ + bool limited; }; struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 64a1bcd..74b757b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -442,6 +442,15 @@ void sock_release_memcg(struct sock *sk) } } +static void disarm_static_keys(struct mem_cgroup *memcg) +{ +#ifdef CONFIG_INET + if (memcg->tcp_mem.limited) + static_key_slow_dec(&memcg_socket_limit_enabled); +#endif +} + + #ifdef CONFIG_INET struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) { @@ -452,6 +461,11 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) } EXPORT_SYMBOL(tcp_proto_cgroup); #endif /* CONFIG_INET */ +#else +static inline void disarm_static_keys(struct mem_cgroup *memcg) +{ +} + #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ static void drain_all_stock_async(struct mem_cgroup *memcg); @@ -4883,6 +4897,7 @@ static void __mem_cgroup_put(struct mem_cgroup *memcg, int count) { if (atomic_sub_and_test(count, &memcg->refcnt)) { struct mem_cgroup *parent = parent_mem_cgroup(memcg); + disarm_static_keys(memcg); __mem_cgroup_free(memcg); if (parent) mem_cgroup_put(parent); diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 1517037..93555ab 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -41,6 +41,7 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1]; tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2]; tcp->tcp_memory_pressure = 0; + tcp->limited = false; parent_cg = tcp_prot.proto_cgroup(parent); if (parent_cg) @@ -74,9 +75,6 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg) percpu_counter_destroy(&tcp->tcp_sockets_allocated); val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); - - if (val != RESOURCE_MAX) - static_key_slow_dec(&memcg_socket_limit_enabled); } EXPORT_SYMBOL(tcp_destroy_cgroup); @@ -107,10 +105,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, net->ipv4.sysctl_tcp_mem[i]); - if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) - static_key_slow_dec(&memcg_socket_limit_enabled); - else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) + if (old_lim == RESOURCE_MAX && !tcp->limited) { static_key_slow_inc(&memcg_socket_limit_enabled); + tcp->limited = true; + } return 0; } -- 1.7.7.6