[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALvZod6R=cEwK+AND+E3CG3LrWViNjWDd0dy-Brz1MhJWyJ+ZQ@mail.gmail.com>
Date: Mon, 8 Mar 2021 11:12:03 -0800
From: Shakeel Butt <shakeelb@...gle.com>
To: Yang Shi <shy828301@...il.com>
Cc: Roman Gushchin <guro@...com>, Kirill Tkhai <ktkhai@...tuozzo.com>,
Vlastimil Babka <vbabka@...e.cz>,
Dave Chinner <david@...morbit.com>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Linux MM <linux-mm@...ck.org>,
linux-fsdevel <linux-fsdevel@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: [v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred
On Tue, Feb 16, 2021 at 4:13 PM Yang Shi <shy828301@...il.com> wrote:
>
> Currently the number of deferred objects are per shrinker, but some slabs, for example,
> vfs inode/dentry cache are per memcg, this would result in poor isolation among memcgs.
>
> The deferred objects typically are generated by __GFP_NOFS allocations, one memcg with
> excessive __GFP_NOFS allocations may blow up deferred objects, then other innocent memcgs
> may suffer from over shrink, excessive reclaim latency, etc.
>
> For example, two workloads run in memcgA and memcgB respectively, workload in B is vfs
> heavy workload. Workload in A generates excessive deferred objects, then B's vfs cache
> might be hit heavily (drop half of caches) by B's limit reclaim or global reclaim.
>
> We observed this hit in our production environment which was running vfs heavy workload
> shown as the below tracing log:
>
> <...>-409454 [016] .... 28286961.747146: mm_shrink_slab_start: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> nid: 1 objects to shrink 3641681686040 gfp_flags GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> cache items 246404277 delta 31345 total_scan 123202138
> <...>-409454 [022] .... 28287105.928018: mm_shrink_slab_end: super_cache_scan+0x0/0x1a0 ffff9a83046f3458:
> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 602
> last shrinker return val 123186855
>
> The vfs cache and page cache ratio was 10:1 on this machine, and half of caches were dropped.
> This also resulted in significant amount of page caches were dropped due to inodes eviction.
>
> Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness and bring
> better isolation.
>
> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's nr_deferred
> would be used. And non memcg aware shrinkers use shrinker's nr_deferred all the time.
>
> Signed-off-by: Yang Shi <shy828301@...il.com>
> ---
> include/linux/memcontrol.h | 7 +++--
> mm/vmscan.c | 60 ++++++++++++++++++++++++++------------
> 2 files changed, 46 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 4c9253896e25..c457fc7bc631 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -93,12 +93,13 @@ struct lruvec_stat {
> };
>
> /*
> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> - * which have elements charged to this memcg.
> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> + * shrinkers, which have elements charged to this memcg.
> */
> struct shrinker_info {
> struct rcu_head rcu;
> - unsigned long map[];
> + atomic_long_t *nr_deferred;
> + unsigned long *map;
> };
>
> /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a1047ea60ecf..fcb399e18fc3 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
> #ifdef CONFIG_MEMCG
> static int shrinker_nr_max;
>
> +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
> static inline int shrinker_map_size(int nr_items)
> {
> return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
> }
>
> +static inline int shrinker_defer_size(int nr_items)
> +{
> + return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
> +}
> +
> static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
> int nid)
> {
> @@ -200,10 +206,12 @@ static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
> }
>
> static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> - int size, int old_size)
> + int map_size, int defer_size,
> + int old_map_size, int old_defer_size)
> {
> struct shrinker_info *new, *old;
> int nid;
> + int size = map_size + defer_size;
>
> for_each_node(nid) {
> old = shrinker_info_protected(memcg, nid);
> @@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> if (!new)
> return -ENOMEM;
>
> - /* Set all old bits, clear all new bits */
> - memset(new->map, (int)0xff, old_size);
> - memset((void *)new->map + old_size, 0, size - old_size);
> + new->nr_deferred = (atomic_long_t *)(new + 1);
> + new->map = (void *)new->nr_deferred + defer_size;
> +
> + /* map: set all old bits, clear all new bits */
> + memset(new->map, (int)0xff, old_map_size);
> + memset((void *)new->map + old_map_size, 0, map_size - old_map_size);
> + /* nr_deferred: copy old values, clear all new values */
> + memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
> + memset((void *)new->nr_deferred + old_defer_size, 0,
> + defer_size - old_defer_size);
>
> rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
> kvfree_rcu(old);
> @@ -232,9 +247,6 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> struct shrinker_info *info;
> int nid;
>
> - if (mem_cgroup_is_root(memcg))
> - return;
> -
> for_each_node(nid) {
> pn = mem_cgroup_nodeinfo(memcg, nid);
> info = shrinker_info_protected(memcg, nid);
> @@ -247,12 +259,12 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> {
> struct shrinker_info *info;
> int nid, size, ret = 0;
> -
> - if (mem_cgroup_is_root(memcg))
> - return 0;
Can you please comment on the consequences on allowing to allocate
shrinker_info for root memcg? Why didn't we do that before but now it
is fine (or maybe required)? Please add the explanation in the commit
message.
> + int map_size, defer_size = 0;
>
> down_write(&shrinker_rwsem);
> - size = shrinker_map_size(shrinker_nr_max);
> + map_size = shrinker_map_size(shrinker_nr_max);
> + defer_size = shrinker_defer_size(shrinker_nr_max);
> + size = map_size + defer_size;
> for_each_node(nid) {
> info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
> if (!info) {
> @@ -260,6 +272,8 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> ret = -ENOMEM;
> break;
> }
> + info->nr_deferred = (atomic_long_t *)(info + 1);
> + info->map = (void *)info->nr_deferred + defer_size;
> rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
> }
> up_write(&shrinker_rwsem);
> @@ -267,15 +281,21 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
> return ret;
> }
>
> +static inline bool need_expand(int nr_max)
> +{
> + return round_up(nr_max, BITS_PER_LONG) >
> + round_up(shrinker_nr_max, BITS_PER_LONG);
> +}
> +
> static int expand_shrinker_info(int new_id)
> {
> - int size, old_size, ret = 0;
> + int ret = 0;
> int new_nr_max = new_id + 1;
> + int map_size, defer_size = 0;
> + int old_map_size, old_defer_size = 0;
> struct mem_cgroup *memcg;
>
> - size = shrinker_map_size(new_nr_max);
> - old_size = shrinker_map_size(shrinker_nr_max);
> - if (size <= old_size)
> + if (!need_expand(new_nr_max))
> goto out;
>
> if (!root_mem_cgroup)
> @@ -283,11 +303,15 @@ static int expand_shrinker_info(int new_id)
>
> lockdep_assert_held(&shrinker_rwsem);
>
> + map_size = shrinker_map_size(new_nr_max);
> + defer_size = shrinker_defer_size(new_nr_max);
> + old_map_size = shrinker_map_size(shrinker_nr_max);
> + old_defer_size = shrinker_defer_size(shrinker_nr_max);
> +
> memcg = mem_cgroup_iter(NULL, NULL, NULL);
> do {
> - if (mem_cgroup_is_root(memcg))
> - continue;
> - ret = expand_one_shrinker_info(memcg, size, old_size);
> + ret = expand_one_shrinker_info(memcg, map_size, defer_size,
> + old_map_size, old_defer_size);
> if (ret) {
> mem_cgroup_iter_break(NULL, memcg);
> goto out;
> --
> 2.26.2
>
Powered by blists - more mailing lists