[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230622085335.77010-21-zhengqi.arch@bytedance.com>
Date: Thu, 22 Jun 2023 16:53:26 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: akpm@...ux-foundation.org, david@...morbit.com, tkhai@...ru,
vbabka@...e.cz, roman.gushchin@...ux.dev, djwong@...nel.org,
brauner@...nel.org, paulmck@...nel.org, tytso@....edu
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
intel-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
linux-arm-msm@...r.kernel.org, dm-devel@...hat.com,
linux-raid@...r.kernel.org, linux-bcache@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org,
linux-nfs@...r.kernel.org, linux-xfs@...r.kernel.org,
linux-btrfs@...r.kernel.org, Qi Zheng <zhengqi.arch@...edance.com>
Subject: [PATCH 20/29] zsmalloc: dynamically allocate the mm-zspool shrinker
In preparation for implementing lockless slab shrink,
we need to dynamically allocate the mm-zspool shrinker,
so that it can be freed asynchronously using kfree_rcu().
Then it doesn't need to wait for RCU read-side critical
section when releasing the struct zs_pool.
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
---
mm/zsmalloc.c | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 3f057970504e..c03b34ae637e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -229,7 +229,7 @@ struct zs_pool {
struct zs_pool_stats stats;
/* Compact classes */
- struct shrinker shrinker;
+ struct shrinker *shrinker;
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
@@ -2107,8 +2107,7 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
unsigned long pages_freed;
- struct zs_pool *pool = container_of(shrinker, struct zs_pool,
- shrinker);
+ struct zs_pool *pool = shrinker->private_data;
/*
* Compact classes and calculate compaction delta.
@@ -2126,8 +2125,7 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker,
int i;
struct size_class *class;
unsigned long pages_to_free = 0;
- struct zs_pool *pool = container_of(shrinker, struct zs_pool,
- shrinker);
+ struct zs_pool *pool = shrinker->private_data;
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
class = pool->size_class[i];
@@ -2142,18 +2140,24 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker,
static void zs_unregister_shrinker(struct zs_pool *pool)
{
- unregister_shrinker(&pool->shrinker);
+ unregister_and_free_shrinker(pool->shrinker);
}
static int zs_register_shrinker(struct zs_pool *pool)
{
- pool->shrinker.scan_objects = zs_shrinker_scan;
- pool->shrinker.count_objects = zs_shrinker_count;
- pool->shrinker.batch = 0;
- pool->shrinker.seeks = DEFAULT_SEEKS;
+ int ret;
+
+ pool->shrinker = shrinker_alloc_and_init(zs_shrinker_count,
+ zs_shrinker_scan, 0,
+ DEFAULT_SEEKS, 0, pool);
+ if (!pool->shrinker)
+ return -ENOMEM;
- return register_shrinker(&pool->shrinker, "mm-zspool:%s",
- pool->name);
+ ret = register_shrinker(pool->shrinker, "mm-zspool:%s", pool->name);
+ if (ret)
+ shrinker_free(pool->shrinker);
+
+ return ret;
}
static int calculate_zspage_chain_size(int class_size)
--
2.30.2
Powered by blists - more mailing lists