[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210415032730.mgw6yty22vdzhgjl@gabell>
Date: Wed, 14 Apr 2021 23:27:30 -0400
From: Masayoshi Mizuma <msys.mizuma@...il.com>
To: Waiman Long <longman@...hat.com>
Cc: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Vladimir Davydov <vdavydov.dev@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Tejun Heo <tj@...nel.org>, Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Vlastimil Babka <vbabka@...e.cz>, Roman Gushchin <guro@...com>,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
linux-mm@...ck.org, Shakeel Butt <shakeelb@...gle.com>,
Muchun Song <songmuchun@...edance.com>,
Alex Shi <alex.shi@...ux.alibaba.com>,
Chris Down <chris@...isdown.name>,
Yafang Shao <laoar.shao@...il.com>,
Wei Yang <richard.weiyang@...il.com>,
Xing Zhengjun <zhengjun.xing@...ux.intel.com>
Subject: Re: [PATCH v3 1/5] mm/memcg: Pass both memcg and lruvec to
mod_memcg_lruvec_state()
On Tue, Apr 13, 2021 at 09:20:23PM -0400, Waiman Long wrote:
> The caller of mod_memcg_lruvec_state() has both memcg and lruvec readily
> available. So both of them are now passed to mod_memcg_lruvec_state()
> and __mod_memcg_lruvec_state(). The __mod_memcg_lruvec_state() is
> updated to allow either of the two parameters to be set to null. This
> makes mod_memcg_lruvec_state() equivalent to mod_memcg_state() if lruvec
> is null.
>
> The new __mod_memcg_lruvec_state() function will be used in the next
> patch as a replacement of mod_memcg_state() in mm/percpu.c for the
> consolidation of the memory uncharge and vmstat update functions in
> the kmem_cache_free() path.
>
> Signed-off-by: Waiman Long <longman@...hat.com>
> Acked-by: Roman Gushchin <guro@...com>
> Reviewed-by: Shakeel Butt <shakeelb@...gle.com>
> ---
> include/linux/memcontrol.h | 12 +++++++-----
> mm/memcontrol.c | 19 +++++++++++++------
> mm/slab.h | 2 +-
> 3 files changed, 21 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 0c04d39a7967..95f12996e66c 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -955,8 +955,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
> return x;
> }
>
> -void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
> - int val);
> +void __mod_memcg_lruvec_state(struct mem_cgroup *memcg, struct lruvec *lruvec,
> + enum node_stat_item idx, int val);
> void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
>
> static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
> @@ -969,13 +969,14 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
> local_irq_restore(flags);
> }
>
> -static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
> +static inline void mod_memcg_lruvec_state(struct mem_cgroup *memcg,
> + struct lruvec *lruvec,
> enum node_stat_item idx, int val)
> {
> unsigned long flags;
>
> local_irq_save(flags);
> - __mod_memcg_lruvec_state(lruvec, idx, val);
> + __mod_memcg_lruvec_state(memcg, lruvec, idx, val);
> local_irq_restore(flags);
> }
>
> @@ -1369,7 +1370,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
> return node_page_state(lruvec_pgdat(lruvec), idx);
> }
>
> -static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
> +static inline void __mod_memcg_lruvec_state(struct mem_cgroup *memcg,
> + struct lruvec *lruvec,
> enum node_stat_item idx, int val)
> {
> }
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index e064ac0d850a..d66e1e38f8ac 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -799,20 +799,27 @@ parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
> return mem_cgroup_nodeinfo(parent, nid);
> }
>
> -void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
> - int val)
> +/*
> + * Either one of memcg or lruvec can be NULL, but not both.
> + */
> +void __mod_memcg_lruvec_state(struct mem_cgroup *memcg, struct lruvec *lruvec,
> + enum node_stat_item idx, int val)
> {
> struct mem_cgroup_per_node *pn;
> - struct mem_cgroup *memcg;
> long x, threshold = MEMCG_CHARGE_BATCH;
>
> + /* Update lruvec */
> pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
> - memcg = pn->memcg;
> +
> + if (!memcg)
> + memcg = pn->memcg;
>
> /* Update memcg */
> __mod_memcg_state(memcg, idx, val);
>
> - /* Update lruvec */
> + if (!lruvec)
> + return;
> +
> __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
>
> if (vmstat_item_in_bytes(idx))
> @@ -848,7 +855,7 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
>
> /* Update memcg and lruvec */
> if (!mem_cgroup_disabled())
> - __mod_memcg_lruvec_state(lruvec, idx, val);
> + __mod_memcg_lruvec_state(NULL, lruvec, idx, val);
> }
>
> void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
> diff --git a/mm/slab.h b/mm/slab.h
> index 076582f58f68..bc6c7545e487 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -293,7 +293,7 @@ static inline void mod_objcg_state(struct obj_cgroup *objcg,
> rcu_read_lock();
> memcg = obj_cgroup_memcg(objcg);
> lruvec = mem_cgroup_lruvec(memcg, pgdat);
> - mod_memcg_lruvec_state(lruvec, idx, nr);
> + mod_memcg_lruvec_state(memcg, lruvec, idx, nr);
> rcu_read_unlock();
> }
>
> --
> 2.18.1
>
Please feel free to add:
Tested-by: Masayoshi Mizuma <m.mizuma@...fujitsu.com>
Thanks!
Masa
Powered by blists - more mailing lists