[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230807110936.21819-33-zhengqi.arch@bytedance.com>
Date: Mon, 7 Aug 2023 19:09:20 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: akpm@...ux-foundation.org,
david@...morbit.com,
tkhai@...ru,
vbabka@...e.cz,
roman.gushchin@...ux.dev,
djwong@...nel.org,
brauner@...nel.org,
paulmck@...nel.org,
tytso@....edu,
steven.price@....com,
cel@...nel.org,
senozhatsky@...omium.org,
yujie.liu@...el.com,
gregkh@...uxfoundation.org,
muchun.song@...ux.dev,
simon.horman@...igine.com,
dlemoal@...nel.org
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
x86@...nel.org,
kvm@...r.kernel.org,
xen-devel@...ts.xenproject.org,
linux-erofs@...ts.ozlabs.org,
linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com,
linux-nfs@...r.kernel.org,
linux-mtd@...ts.infradead.org,
rcu@...r.kernel.org,
netdev@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linux-arm-msm@...r.kernel.org,
dm-devel@...hat.com,
linux-raid@...r.kernel.org,
linux-bcache@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
linux-fsdevel@...r.kernel.org,
linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org,
linux-btrfs@...r.kernel.org,
Qi Zheng <zhengqi.arch@...edance.com>,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v4 32/48] mbcache: dynamically allocate the mbcache shrinker
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the mbcache shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct mb_cache.
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
Reviewed-by: Muchun Song <songmuchun@...edance.com>
---
fs/mbcache.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2a4b8b549e93..0d1e24e9a5e3 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -37,7 +37,7 @@ struct mb_cache {
struct list_head c_list;
/* Number of entries in cache */
unsigned long c_entry_count;
- struct shrinker c_shrink;
+ struct shrinker *c_shrink;
/* Work for shrinking when the cache has too many entries */
struct work_struct c_shrink_work;
};
@@ -293,8 +293,7 @@ EXPORT_SYMBOL(mb_cache_entry_touch);
static unsigned long mb_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct mb_cache *cache = container_of(shrink, struct mb_cache,
- c_shrink);
+ struct mb_cache *cache = shrink->private_data;
return cache->c_entry_count;
}
@@ -333,8 +332,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
static unsigned long mb_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- struct mb_cache *cache = container_of(shrink, struct mb_cache,
- c_shrink);
+ struct mb_cache *cache = shrink->private_data;
return mb_cache_shrink(cache, sc->nr_to_scan);
}
@@ -377,15 +375,20 @@ struct mb_cache *mb_cache_create(int bucket_bits)
for (i = 0; i < bucket_count; i++)
INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
- cache->c_shrink.count_objects = mb_cache_count;
- cache->c_shrink.scan_objects = mb_cache_scan;
- cache->c_shrink.seeks = DEFAULT_SEEKS;
- if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
+ cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker");
+ if (!cache->c_shrink) {
kfree(cache->c_hash);
kfree(cache);
goto err_out;
}
+ cache->c_shrink->count_objects = mb_cache_count;
+ cache->c_shrink->scan_objects = mb_cache_scan;
+ cache->c_shrink->seeks = DEFAULT_SEEKS;
+ cache->c_shrink->private_data = cache;
+
+ shrinker_register(cache->c_shrink);
+
INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
return cache;
@@ -406,7 +409,7 @@ void mb_cache_destroy(struct mb_cache *cache)
{
struct mb_cache_entry *entry, *next;
- unregister_shrinker(&cache->c_shrink);
+ shrinker_free(cache->c_shrink);
/*
* We don't bother with any locking. Cache must not be used at this
--
2.30.2
Powered by blists - more mailing lists