[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e7204276-9de5-17eb-90ae-e51657d73ef4@linux.dev>
Date: Wed, 26 Jul 2023 14:49:05 +0800
From: Muchun Song <muchun.song@...ux.dev>
To: Qi Zheng <zhengqi.arch@...edance.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, x86@...nel.org,
kvm@...r.kernel.org, xen-devel@...ts.xenproject.org,
linux-erofs@...ts.ozlabs.org, linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com, linux-nfs@...r.kernel.org,
linux-mtd@...ts.infradead.org, rcu@...r.kernel.org, netdev@...r.kernel.org,
dri-devel@...ts.freedesktop.org, linux-arm-msm@...r.kernel.org,
dm-devel@...hat.com, linux-raid@...r.kernel.org,
linux-bcache@...r.kernel.org, virtualization@...ts.linux-foundation.org,
linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org, linux-btrfs@...r.kernel.org,
akpm@...ux-foundation.org, david@...morbit.com, tkhai@...ru, vbabka@...e.cz,
roman.gushchin@...ux.dev, djwong@...nel.org, brauner@...nel.org,
paulmck@...nel.org, tytso@....edu, steven.price@....com, cel@...nel.org,
senozhatsky@...omium.org, yujie.liu@...el.com, gregkh@...uxfoundation.org
Subject: Re: [PATCH v2 11/47] gfs2: dynamically allocate the gfs2-qd shrinker
On 2023/7/24 17:43, Qi Zheng wrote:
> Use new APIs to dynamically allocate the gfs2-qd shrinker.
>
> Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
> ---
> fs/gfs2/main.c | 6 +++---
> fs/gfs2/quota.c | 26 ++++++++++++++++++++------
> fs/gfs2/quota.h | 3 ++-
> 3 files changed, 25 insertions(+), 10 deletions(-)
>
> diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
> index afcb32854f14..e47b1cc79f59 100644
> --- a/fs/gfs2/main.c
> +++ b/fs/gfs2/main.c
> @@ -147,7 +147,7 @@ static int __init init_gfs2_fs(void)
> if (!gfs2_trans_cachep)
> goto fail_cachep8;
>
> - error = register_shrinker(&gfs2_qd_shrinker, "gfs2-qd");
> + error = gfs2_qd_shrinker_init();
> if (error)
> goto fail_shrinker;
>
> @@ -196,7 +196,7 @@ static int __init init_gfs2_fs(void)
> fail_wq2:
> destroy_workqueue(gfs_recovery_wq);
> fail_wq1:
> - unregister_shrinker(&gfs2_qd_shrinker);
> + gfs2_qd_shrinker_exit();
> fail_shrinker:
> kmem_cache_destroy(gfs2_trans_cachep);
> fail_cachep8:
> @@ -229,7 +229,7 @@ static int __init init_gfs2_fs(void)
>
> static void __exit exit_gfs2_fs(void)
> {
> - unregister_shrinker(&gfs2_qd_shrinker);
> + gfs2_qd_shrinker_exit();
> gfs2_glock_exit();
> gfs2_unregister_debugfs();
> unregister_filesystem(&gfs2_fs_type);
> diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
> index 704192b73605..bc9883cea847 100644
> --- a/fs/gfs2/quota.c
> +++ b/fs/gfs2/quota.c
> @@ -186,13 +186,27 @@ static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
> return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
> }
>
> -struct shrinker gfs2_qd_shrinker = {
> - .count_objects = gfs2_qd_shrink_count,
> - .scan_objects = gfs2_qd_shrink_scan,
> - .seeks = DEFAULT_SEEKS,
> - .flags = SHRINKER_NUMA_AWARE,
> -};
> +static struct shrinker *gfs2_qd_shrinker;
> +
> +int gfs2_qd_shrinker_init(void)
It's better to declare this as __init.
> +{
> + gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
> + if (!gfs2_qd_shrinker)
> + return -ENOMEM;
> +
> + gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
> + gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
> + gfs2_qd_shrinker->seeks = DEFAULT_SEEKS;
> +
> + shrinker_register(gfs2_qd_shrinker);
>
> + return 0;
> +}
> +
> +void gfs2_qd_shrinker_exit(void)
> +{
> + shrinker_unregister(gfs2_qd_shrinker);
> +}
>
> static u64 qd2index(struct gfs2_quota_data *qd)
> {
> diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
> index 21ada332d555..f9cb863373f7 100644
> --- a/fs/gfs2/quota.h
> +++ b/fs/gfs2/quota.h
> @@ -59,7 +59,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
> }
>
> extern const struct quotactl_ops gfs2_quotactl_ops;
> -extern struct shrinker gfs2_qd_shrinker;
> +int gfs2_qd_shrinker_init(void);
> +void gfs2_qd_shrinker_exit(void);
> extern struct list_lru gfs2_qd_lru;
> extern void __init gfs2_quota_hash_init(void);
>
Powered by blists - more mailing lists