[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHbLzkrg8OYqbKevdV_6qJ5L9P-_8ui=HAgm-0o69yKLtMg8tQ@mail.gmail.com>
Date: Fri, 29 Jan 2021 10:04:38 -0800
From: Yang Shi <shy828301@...il.com>
To: Vlastimil Babka <vbabka@...e.cz>
Cc: Roman Gushchin <guro@...com>, Kirill Tkhai <ktkhai@...tuozzo.com>,
Shakeel Butt <shakeelb@...gle.com>,
Dave Chinner <david@...morbit.com>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Linux MM <linux-mm@...ck.org>,
Linux FS-devel Mailing List <linux-fsdevel@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [v5 PATCH 07/11] mm: vmscan: add per memcg shrinker nr_deferred
On Fri, Jan 29, 2021 at 9:20 AM Yang Shi <shy828301@...il.com> wrote:
>
> On Fri, Jan 29, 2021 at 5:00 AM Vlastimil Babka <vbabka@...e.cz> wrote:
> >
> > On 1/28/21 12:33 AM, Yang Shi wrote:
> > > Currently the number of deferred objects are per shrinker, but some slabs, for example,
> > > vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
> > >
> > > The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> > > excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> > > may suffer from over shrink, excessive reclaim latency, etc.
> > >
> > > For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> > > heavy workload. Workload in A generates excessive deferred objects, then B's vfs cache
> > > might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
> > >
> > > We observed this hit in our production environment which was running vfs heavy workload
> > > shown as the below tracing log:
> > >
> > > <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> > > nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> > > cache items 246404277 delta 31345 total_scan 123202138
> > > <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> > > nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> > > last shrinker return val 123186855
> > >
> > > The vfs cache and page cache ration was 10:1 on this machine, and half of caches were dropped.
> > > This also resulted in significant amount of page caches were dropped due to inodes eviction.
> > >
> > > Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> > > better isolation.
> > >
> > > When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> > > would be used. And non memcg aware shrinkers use shrinker's nr_deferred all the time.
> > >
> > > Signed-off-by: Yang Shi <shy828301@...il.com>
> > > ---
> > > include/linux/memcontrol.h | 7 +++---
> > > mm/vmscan.c | 48 +++++++++++++++++++++++++-------------
> > > 2 files changed, 36 insertions(+), 19 deletions(-)
> > >
> > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > > index 62b888b88a5f..e0384367e07d 100644
> > > --- a/include/linux/memcontrol.h
> > > +++ b/include/linux/memcontrol.h
> > > @@ -93,12 +93,13 @@ struct lruvec_stat {
> > > };
> > >
> > > /*
> > > - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> > > - * which have elements charged to this memcg.
> > > + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> > > + * shrinkers, which have elements charged to this memcg.
> > > */
> > > struct shrinker_info {
> > > struct rcu_head rcu;
> > > - unsigned long map[];
> > > + unsigned long *map;
> > > + atomic_long_t *nr_deferred;
> > > };
> > >
> > > /*
> > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > index 256896d157d4..20be0db291fe 100644
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > > @@ -187,16 +187,21 @@ static DECLARE_RWSEM(shrinker_rwsem);
> > > #ifdef CONFIG_MEMCG
> > > static int shrinker_nr_max;
> > >
> > > +#define NR_MAX_TO_SHR_MAP_SIZE(nr_max) \
> > > + ((nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long))
> >
> > Could have been part of patch 4 already. And yeah, using DIV_ROUND_UP(), as
> > being hidden in a macro makes the "shorter statement" benefit disappear :)
> >
> > > +
> > > static void free_shrinker_info_rcu(struct rcu_head *head)
> > > {
> > > kvfree(container_of(head, struct shrinker_info, rcu));
> > > }
> > >
> > > static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > > - int size, int old_size)
> > > + int m_size, int d_size,
> > > + int old_m_size, int old_d_size)
> > > {
> > > struct shrinker_info *new, *old;
> > > int nid;
> > > + int size = m_size + d_size;
> > >
> > > for_each_node(nid) {
> > > old = rcu_dereference_protected(
> > > @@ -209,9 +214,15 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > > if (!new)
> > > return -ENOMEM;
> > >
> > > - /* Set all old bits, clear all new bits */
> > > - memset(new->map, (int)0xff, old_size);
> > > - memset((void *)new->map + old_size, 0, size - old_size);
> > > + new->map = (unsigned long *)(new + 1);
> > > + new->nr_deferred = (void *)new->map + m_size;
> >
> > This better be aligned to sizeof(atomic_long_t). Can we be sure about that?
>
> Good point. No, if unsigned long is 32 bit on some 64 bit machines.
I think we could just change map to "u64" and guarantee struct
shrinker_info is aligned to 64 bit.
>
> > Also it's all quite ugly and complex. Is it worth it? What about just leaving
> > map as it is and allocating a nr_deferred array separately, i.e.:
> >
> > struct shrinker_info {
> > struct rcu_head rcu;
> > atomic_long_t *nr_deferred; // allocated separately
> > unsigned long map[];
> > };
>
> So, you mean we allocate shrinker info with map array in the first
> step, then allocate nr_deferred? It is ok, but I'm afraid the error
> handling may make the code not that clean as what you expect since we
> have to call kvmalloc() twice. And we still need to do all the
> initialization and copy work. So, eventually we just replace the
> pointer assignment to error handling. I'm not quite sure if it is
> worth it. The nested error handling might be more error prone.
>
> >
> > > + /* map: set all old bits, clear all new bits */
> > > + memset(new->map, (int)0xff, old_m_size);
> > > + memset((void *)new->map + old_m_size, 0, m_size - old_m_size);
> > > + /* nr_deferred: copy old values, clear all new values */
> > > + memcpy(new->nr_deferred, old->nr_deferred, old_d_size);
> > > + memset((void *)new->nr_deferred + old_d_size, 0, d_size - old_d_size);
> > >
> > > rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
> > > call_rcu(&old->rcu, free_shrinker_info_rcu);
> > > @@ -226,9 +237,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> > > struct shrinker_info *info;
> > > int nid;
> > >
> > > - if (mem_cgroup_is_root(memcg))
> > > - return;
> > > -
> > > for_each_node(nid) {
> > > pn = mem_cgroup_nodeinfo(memcg, nid);
> > > info = rcu_dereference_protected(pn->shrinker_info, true);
> > > @@ -242,12 +250,13 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> > > {
> > > struct shrinker_info *info;
> > > int nid, size, ret = 0;
> > > -
> > > - if (mem_cgroup_is_root(memcg))
> > > - return 0;
> > > + int m_size, d_size = 0;
> > >
> > > down_write(&shrinker_rwsem);
> > > - size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > > + m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> > > + d_size = shrinker_nr_max * sizeof(atomic_long_t);
> > > + size = m_size + d_size;
> > > +
> > > for_each_node(nid) {
> > > info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
> > > if (!info) {
> > > @@ -255,6 +264,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> > > ret = -ENOMEM;
> > > break;
> > > }
> > > + info->map = (unsigned long *)(info + 1);
> > > + info->nr_deferred = (void *)info->map + m_size;
> > > rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
> > > }
> > > up_write(&shrinker_rwsem);
> > > @@ -266,10 +277,16 @@ static int expand_shrinker_info(int new_id)
> > > {
> > > int size, old_size, ret = 0;
> > > int new_nr_max = new_id + 1;
> > > + int m_size, d_size = 0;
> > > + int old_m_size, old_d_size = 0;
> > > struct mem_cgroup *memcg;
> > >
> > > - size = (new_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > > - old_size = (shrinker_nr_max / BITS_PER_LONG + 1) * sizeof(unsigned long);
> > > + m_size = NR_MAX_TO_SHR_MAP_SIZE(new_nr_max);
> > > + d_size = new_nr_max * sizeof(atomic_long_t);
> > > + size = m_size + d_size;
> > > + old_m_size = NR_MAX_TO_SHR_MAP_SIZE(shrinker_nr_max);
> > > + old_d_size = shrinker_nr_max * sizeof(atomic_long_t);
> > > + old_size = old_m_size + old_d_size;
> > > if (size <= old_size)
> > > goto out;
> > >
> > > @@ -278,9 +295,8 @@ static int expand_shrinker_info(int new_id)
> > >
> > > memcg = mem_cgroup_iter(NULL, NULL, NULL);
> > > do {
> > > - if (mem_cgroup_is_root(memcg))
> > > - continue;
> > > - ret = expand_one_shrinker_info(memcg, size, old_size);
> > > + ret = expand_one_shrinker_info(memcg, m_size, d_size,
> > > + old_m_size, old_d_size);
> > > if (ret) {
> > > mem_cgroup_iter_break(NULL, memcg);
> > > goto out;
> > >
> >
Powered by blists - more mailing lists