[<prev] [next>] [day] [month] [year] [list]
Message-ID: <CAHbLzkqkv4Z01G0NmNbJyF-dDnavHtAwC0U0YnpL_N=xhQ9kJQ@mail.gmail.com>
Date: Fri, 29 Jan 2021 09:22:04 -0800
From: Yang Shi <shy828301@...il.com>
To: Kirill Tkhai <ktkhai@...tuozzo.com>
Cc: Roman Gushchin <guro@...com>, Shakeel Butt <shakeelb@...gle.com>,
Dave Chinner <david@...morbit.com>,
Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Linux MM <linux-mm@...ck.org>,
Linux FS-devel Mailing List <linux-fsdevel@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [v5 PATCH 08/11] mm: vmscan: use per memcg nr_deferred of shrinker
On Fri, Jan 29, 2021 at 6:59 AM Kirill Tkhai <ktkhai@...tuozzo.com> wrote:
>
> On 29.01.2021 17:55, Kirill Tkhai wrote:
> > On 28.01.2021 02:33, Yang Shi wrote:
> >> Use per memcg's nr_deferred for memcg aware shrinkers. The shrinker's nr_deferred
> >> will be used in the following cases:
> >> 1. Non memcg aware shrinkers
> >> 2. !CONFIG_MEMCG
> >> 3. memcg is disabled by boot parameter
> >>
> >> Signed-off-by: Yang Shi <shy828301@...il.com>
> >> ---
> >> mm/vmscan.c | 87 ++++++++++++++++++++++++++++++++++++++++++++---------
> >> 1 file changed, 73 insertions(+), 14 deletions(-)
> >>
> >> diff --git a/mm/vmscan.c b/mm/vmscan.c
> >> index 20be0db291fe..e1f8960f5cf6 100644
> >> --- a/mm/vmscan.c
> >> +++ b/mm/vmscan.c
> >> @@ -205,7 +205,8 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> >>
> >> for_each_node(nid) {
> >> old = rcu_dereference_protected(
> >> - mem_cgroup_nodeinfo(memcg, nid)->shrinker_info, true);
> >> + mem_cgroup_nodeinfo(memcg, nid)->shrinker_info,
> >> + lockdep_is_held(&shrinker_rwsem));
> >
> > Won't it better to pack this repeating pattern into helper function, e.g.:
> >
> > static struct shrinker_info memcg_shrinker_info(struct mem_cgroup *memcg, int nid)
> > {
> > return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> > lockdep_is_held(&shrinker_rwsem));
> > }
> >
> > ?
> >
> > Even shrink_slab_memcg() may want to use it.
>
> Hm, I see you already introduced a helper in [10/11], but it is used in only place.
> Then, we should use it for all places (introduce the helper earlier).
Yes, good point. Will fix in v6.
>
> >> /* Not yet online memcg */
> >> if (!old)
> >> return 0;
> >> @@ -239,7 +240,8 @@ void free_shrinker_info(struct mem_cgroup *memcg)
> >>
> >> for_each_node(nid) {
> >> pn = mem_cgroup_nodeinfo(memcg, nid);
> >> - info = rcu_dereference_protected(pn->shrinker_info, true);
> >> + info = rcu_dereference_protected(pn->shrinker_info,
> >> + lockdep_is_held(&shrinker_rwsem));
> >> if (info)
> >> kvfree(info);
> >> rcu_assign_pointer(pn->shrinker_info, NULL);
> >> @@ -360,6 +362,27 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> >> up_write(&shrinker_rwsem);
> >> }
> >>
> >> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> >> + struct mem_cgroup *memcg)
> >> +{
> >> + struct shrinker_info *info;
> >> +
> >> + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> >> + lockdep_is_held(&shrinker_rwsem));
> >> + return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> >> +}
> >> +
> >> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> >> + struct mem_cgroup *memcg)
> >> +{
> >> + struct shrinker_info *info;
> >> +
> >> + info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> >> + lockdep_is_held(&shrinker_rwsem));
> >> +
> >> + return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> >> +}
> >> +
> >> static bool cgroup_reclaim(struct scan_control *sc)
> >> {
> >> return sc->target_mem_cgroup;
> >> @@ -398,6 +421,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
> >> {
> >> }
> >>
> >> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> >> + struct mem_cgroup *memcg)
> >> +{
> >> + return 0;
> >> +}
> >> +
> >> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
> >> + struct mem_cgroup *memcg)
> >> +{
> >> + return 0;
> >> +}
> >> +
> >> static bool cgroup_reclaim(struct scan_control *sc)
> >> {
> >> return false;
> >> @@ -409,6 +444,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
> >> }
> >> #endif
> >>
> >> +static long count_nr_deferred(struct shrinker *shrinker,
> >> + struct shrink_control *sc)
> >> +{
> >> + int nid = sc->nid;
> >> +
> >> + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> >> + nid = 0;
> >> +
> >> + if (sc->memcg &&
> >> + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> >> + return count_nr_deferred_memcg(nid, shrinker,
> >> + sc->memcg);
> >> +
> >> + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> >> +}
> >> +
> >> +
> >> +static long set_nr_deferred(long nr, struct shrinker *shrinker,
> >> + struct shrink_control *sc)
> >> +{
> >> + int nid = sc->nid;
> >> +
> >> + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> >> + nid = 0;
> >> +
> >> + if (sc->memcg &&
> >> + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> >> + return set_nr_deferred_memcg(nr, nid, shrinker,
> >> + sc->memcg);
> >> +
> >> + return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> >> +}
> >> +
> >> /*
> >> * This misses isolated pages which are not accounted for to save counters.
> >> * As the data only determines if reclaim or compaction continues, it is
> >> @@ -545,14 +613,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> >> long freeable;
> >> long nr;
> >> long new_nr;
> >> - int nid = shrinkctl->nid;
> >> long batch_size = shrinker->batch ? shrinker->batch
> >> : SHRINK_BATCH;
> >> long scanned = 0, next_deferred;
> >>
> >> - if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> >> - nid = 0;
> >> -
> >> freeable = shrinker->count_objects(shrinker, shrinkctl);
> >> if (freeable == 0 || freeable == SHRINK_EMPTY)
> >> return freeable;
> >> @@ -562,7 +626,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> >> * and zero it so that other concurrent shrinker invocations
> >> * don't also do this scanning work.
> >> */
> >> - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> >> + nr = count_nr_deferred(shrinker, shrinkctl);
> >>
> >> total_scan = nr;
> >> if (shrinker->seeks) {
> >> @@ -653,14 +717,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> >> next_deferred = 0;
> >> /*
> >> * move the unused scan count back into the shrinker in a
> >> - * manner that handles concurrent updates. If we exhausted the
> >> - * scan, there is no need to do an update.
> >> + * manner that handles concurrent updates.
> >> */
> >> - if (next_deferred > 0)
> >> - new_nr = atomic_long_add_return(next_deferred,
> >> - &shrinker->nr_deferred[nid]);
> >> - else
> >> - new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> >> + new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
> >>
> >> trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
> >> return freed;
> >>
> >
>
>
Powered by blists - more mailing lists