[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180520070846.jfgfl23ionghevlw@esperanza>
Date: Sun, 20 May 2018 10:08:46 +0300
From: Vladimir Davydov <vdavydov.dev@...il.com>
To: Kirill Tkhai <ktkhai@...tuozzo.com>
Cc: akpm@...ux-foundation.org, shakeelb@...gle.com,
viro@...iv.linux.org.uk, hannes@...xchg.org, mhocko@...nel.org,
tglx@...utronix.de, pombredanne@...b.com, stummala@...eaurora.org,
gregkh@...uxfoundation.org, sfr@...b.auug.org.au, guro@...com,
mka@...omium.org, penguin-kernel@...ove.SAKURA.ne.jp,
chris@...is-wilson.co.uk, longman@...hat.com, minchan@...nel.org,
ying.huang@...el.com, mgorman@...hsingularity.net, jbacik@...com,
linux@...ck-us.net, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, willy@...radead.org, lirongqing@...du.com,
aryabinin@...tuozzo.com
Subject: Re: [PATCH v6 03/17] mm: Assign id to every memcg-aware shrinker
Hello Kirill,
Generally, all patches in the series look OK to me, but I'm going to do
some nitpicking before I ack them. See below.
On Fri, May 18, 2018 at 11:42:08AM +0300, Kirill Tkhai wrote:
> The patch introduces shrinker::id number, which is used to enumerate
> memcg-aware shrinkers. The number start from 0, and the code tries
> to maintain it as small as possible.
>
> This will be used as to represent a memcg-aware shrinkers in memcg
> shrinkers map.
>
> Since all memcg-aware shrinkers are based on list_lru, which is per-memcg
> in case of !CONFIG_MEMCG_KMEM only, the new functionality will be under
> this config option.
>
> Signed-off-by: Kirill Tkhai <ktkhai@...tuozzo.com>
> ---
> include/linux/shrinker.h | 4 +++
> mm/vmscan.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 64 insertions(+)
>
> diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
> index 6794490f25b2..7ca9c18cf130 100644
> --- a/include/linux/shrinker.h
> +++ b/include/linux/shrinker.h
> @@ -66,6 +66,10 @@ struct shrinker {
>
> /* These are for internal use */
> struct list_head list;
> +#ifdef CONFIG_MEMCG_KMEM
> + /* ID in shrinker_idr */
> + int id;
> +#endif
> /* objs pending delete, per node */
> atomic_long_t *nr_deferred;
> };
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 50055d72f294..3de12a9bdf85 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -169,6 +169,48 @@ unsigned long vm_total_pages;
> static LIST_HEAD(shrinker_list);
> static DECLARE_RWSEM(shrinker_rwsem);
>
> +#ifdef CONFIG_MEMCG_KMEM
> +static DEFINE_IDR(shrinker_idr);
> +
> +static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> +{
> + int id, ret;
> +
> + shrinker->id = -1;
> + down_write(&shrinker_rwsem);
> + ret = id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
> + if (ret < 0)
> + goto unlock;
> + shrinker->id = id;
> + ret = 0;
> +unlock:
> + up_write(&shrinker_rwsem);
> + return ret;
> +}
> +
> +static void unregister_memcg_shrinker(struct shrinker *shrinker)
> +{
> + int id = shrinker->id;
> +
> + if (id < 0)
> + return;
Nit: Please replace this with BUG_ON(id >= 0) - this function can only
be called for a memcg-aware shrinker that has been fully initialized
(prealloc_shrinker() sets nr_deferred after id; unregister_shrinker()
returns immediately if nr_deferred is NULL).
> +
> + down_write(&shrinker_rwsem);
> + idr_remove(&shrinker_idr, id);
> + up_write(&shrinker_rwsem);
> + shrinker->id = -1;
Nit: I'd move this under shrinker_rwsem as you set it with the rwsem
taken.
> +}
> +#else /* CONFIG_MEMCG_KMEM */
> +static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> +{
> + return 0;
> +}
> +
> +static void unregister_memcg_shrinker(struct shrinker *shrinker)
> +{
> +}
> +#endif /* CONFIG_MEMCG_KMEM */
> +
> #ifdef CONFIG_MEMCG
> static bool global_reclaim(struct scan_control *sc)
> {
> @@ -306,6 +348,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
> int prealloc_shrinker(struct shrinker *shrinker)
> {
> size_t size = sizeof(*shrinker->nr_deferred);
> + int ret;
>
> if (shrinker->flags & SHRINKER_NUMA_AWARE)
> size *= nr_node_ids;
> @@ -313,11 +356,26 @@ int prealloc_shrinker(struct shrinker *shrinker)
> shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
> if (!shrinker->nr_deferred)
> return -ENOMEM;
> +
> + if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
> + ret = prealloc_memcg_shrinker(shrinker);
> + if (ret)
> + goto free_deferred;
Nit: 'ret' is not really needed here.
> + }
> +
> return 0;
> +
> +free_deferred:
> + kfree(shrinker->nr_deferred);
> + shrinker->nr_deferred = NULL;
> + return -ENOMEM;
> }
>
> void free_prealloced_shrinker(struct shrinker *shrinker)
> {
> + if (shrinker->flags & SHRINKER_MEMCG_AWARE)
> + unregister_memcg_shrinker(shrinker);
> +
> kfree(shrinker->nr_deferred);
> shrinker->nr_deferred = NULL;
> }
> @@ -347,6 +405,8 @@ void unregister_shrinker(struct shrinker *shrinker)
> {
> if (!shrinker->nr_deferred)
> return;
> + if (shrinker->flags & SHRINKER_MEMCG_AWARE)
> + unregister_memcg_shrinker(shrinker);
> down_write(&shrinker_rwsem);
> list_del(&shrinker->list);
> up_write(&shrinker_rwsem);
>
Powered by blists - more mailing lists