[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20151210125636.GJ19496@dhcp22.suse.cz>
Date: Thu, 10 Dec 2015 13:56:36 +0100
From: Michal Hocko <mhocko@...nel.org>
To: Johannes Weiner <hannes@...xchg.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Vladimir Davydov <vdavydov@...tuozzo.com>, linux-mm@...ck.org,
cgroups@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel-team@...com
Subject: Re: [PATCH 4/8] mm: memcontrol: group kmem init and exit functions
together
On Tue 08-12-15 13:34:21, Johannes Weiner wrote:
> Put all the related code to setup and teardown the kmem accounting
> state into the same location. No functional change intended.
>
> Signed-off-by: Johannes Weiner <hannes@...xchg.org>
Acked-by: Michal Hocko <mhocko@...e.com>
> ---
> mm/memcontrol.c | 157 +++++++++++++++++++++++++++-----------------------------
> 1 file changed, 76 insertions(+), 81 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 22b8c4f..5118618 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2924,12 +2924,88 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg)
> mutex_unlock(&memcg_limit_mutex);
> return ret;
> }
> +
> +static int memcg_init_kmem(struct mem_cgroup *memcg)
> +{
> + int ret;
> +
> + ret = memcg_propagate_kmem(memcg);
> + if (ret)
> + return ret;
> +
> + return tcp_init_cgroup(memcg);
> +}
> +
> +static void memcg_offline_kmem(struct mem_cgroup *memcg)
> +{
> + struct cgroup_subsys_state *css;
> + struct mem_cgroup *parent, *child;
> + int kmemcg_id;
> +
> + if (memcg->kmem_state != KMEM_ONLINE)
> + return;
> + /*
> + * Clear the online state before clearing memcg_caches array
> + * entries. The slab_mutex in memcg_deactivate_kmem_caches()
> + * guarantees that no cache will be created for this cgroup
> + * after we are done (see memcg_create_kmem_cache()).
> + */
> + memcg->kmem_state = KMEM_ALLOCATED;
> +
> + memcg_deactivate_kmem_caches(memcg);
> +
> + kmemcg_id = memcg->kmemcg_id;
> + BUG_ON(kmemcg_id < 0);
> +
> + parent = parent_mem_cgroup(memcg);
> + if (!parent)
> + parent = root_mem_cgroup;
> +
> + /*
> + * Change kmemcg_id of this cgroup and all its descendants to the
> + * parent's id, and then move all entries from this cgroup's list_lrus
> + * to ones of the parent. After we have finished, all list_lrus
> + * corresponding to this cgroup are guaranteed to remain empty. The
> + * ordering is imposed by list_lru_node->lock taken by
> + * memcg_drain_all_list_lrus().
> + */
> + css_for_each_descendant_pre(css, &memcg->css) {
> + child = mem_cgroup_from_css(css);
> + BUG_ON(child->kmemcg_id != kmemcg_id);
> + child->kmemcg_id = parent->kmemcg_id;
> + if (!memcg->use_hierarchy)
> + break;
> + }
> + memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
> +
> + memcg_free_cache_id(kmemcg_id);
> +}
> +
> +static void memcg_free_kmem(struct mem_cgroup *memcg)
> +{
> + if (memcg->kmem_state == KMEM_ALLOCATED) {
> + memcg_destroy_kmem_caches(memcg);
> + static_branch_dec(&memcg_kmem_enabled_key);
> + WARN_ON(page_counter_read(&memcg->kmem));
> + }
> + tcp_destroy_cgroup(memcg);
> +}
> #else
> static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
> unsigned long limit)
> {
> return -EINVAL;
> }
> +static int memcg_init_kmem(struct mem_cgroup *memcg)
> +{
> + return 0;
> +}
> +static void memcg_offline_kmem(struct mem_cgroup *memcg)
> +{
> +}
> +static void memcg_free_kmem(struct mem_cgroup *memcg)
> +{
> +}
> #endif /* CONFIG_MEMCG_KMEM */
>
> /*
> @@ -3555,87 +3631,6 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
> return 0;
> }
>
> -#ifdef CONFIG_MEMCG_KMEM
> -static int memcg_init_kmem(struct mem_cgroup *memcg)
> -{
> - int ret;
> -
> - ret = memcg_propagate_kmem(memcg);
> - if (ret)
> - return ret;
> -
> - return tcp_init_cgroup(memcg);
> -}
> -
> -static void memcg_offline_kmem(struct mem_cgroup *memcg)
> -{
> - struct cgroup_subsys_state *css;
> - struct mem_cgroup *parent, *child;
> - int kmemcg_id;
> -
> - if (memcg->kmem_state != KMEM_ONLINE)
> - return;
> - /*
> - * Clear the online state before clearing memcg_caches array
> - * entries. The slab_mutex in memcg_deactivate_kmem_caches()
> - * guarantees that no cache will be created for this cgroup
> - * after we are done (see memcg_create_kmem_cache()).
> - */
> - memcg->kmem_state = KMEM_ALLOCATED;
> -
> - memcg_deactivate_kmem_caches(memcg);
> -
> - kmemcg_id = memcg->kmemcg_id;
> - BUG_ON(kmemcg_id < 0);
> -
> - parent = parent_mem_cgroup(memcg);
> - if (!parent)
> - parent = root_mem_cgroup;
> -
> - /*
> - * Change kmemcg_id of this cgroup and all its descendants to the
> - * parent's id, and then move all entries from this cgroup's list_lrus
> - * to ones of the parent. After we have finished, all list_lrus
> - * corresponding to this cgroup are guaranteed to remain empty. The
> - * ordering is imposed by list_lru_node->lock taken by
> - * memcg_drain_all_list_lrus().
> - */
> - css_for_each_descendant_pre(css, &memcg->css) {
> - child = mem_cgroup_from_css(css);
> - BUG_ON(child->kmemcg_id != kmemcg_id);
> - child->kmemcg_id = parent->kmemcg_id;
> - if (!memcg->use_hierarchy)
> - break;
> - }
> - memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
> -
> - memcg_free_cache_id(kmemcg_id);
> -}
> -
> -static void memcg_free_kmem(struct mem_cgroup *memcg)
> -{
> - if (memcg->kmem_state == KMEM_ALLOCATED) {
> - memcg_destroy_kmem_caches(memcg);
> - static_branch_dec(&memcg_kmem_enabled_key);
> - WARN_ON(page_counter_read(&memcg->kmem));
> - }
> - tcp_destroy_cgroup(memcg);
> -}
> -#else
> -static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
> -{
> - return 0;
> -}
> -
> -static void memcg_offline_kmem(struct mem_cgroup *memcg)
> -{
> -}
> -
> -static void memcg_free_kmem(struct mem_cgroup *memcg)
> -{
> -}
> -#endif
> -
> #ifdef CONFIG_CGROUP_WRITEBACK
>
> struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
> --
> 2.6.3
--
Michal Hocko
SUSE Labs
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists