[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230824033539.34570-5-zhengqi.arch@bytedance.com>
Date: Thu, 24 Aug 2023 11:35:39 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: akpm@...ux-foundation.org, david@...morbit.com, tkhai@...ru,
vbabka@...e.cz, roman.gushchin@...ux.dev, djwong@...nel.org,
brauner@...nel.org, paulmck@...nel.org, tytso@....edu,
steven.price@....com, cel@...nel.org, senozhatsky@...omium.org,
yujie.liu@...el.com, gregkh@...uxfoundation.org,
muchun.song@...ux.dev, joel@...lfernandes.org,
christian.koenig@....com, daniel@...ll.ch
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
dri-devel@...ts.freedesktop.org, linux-fsdevel@...r.kernel.org,
Qi Zheng <zhengqi.arch@...edance.com>,
Muchun Song <songmuchun@...edance.com>,
Daniel Vetter <daniel.vetter@...ll.ch>
Subject: [PATCH v3 4/4] drm/ttm: introduce pool_shrink_rwsem
Currently, the synchronize_shrinkers() is only used by TTM pool. It only
requires that no shrinkers run in parallel.
After we use RCU+refcount method to implement the lockless slab shrink,
we can not use shrinker_rwsem or synchronize_rcu() to guarantee that all
shrinker invocations have seen an update before freeing memory.
So we introduce a new pool_shrink_rwsem to implement a private
ttm_pool_synchronize_shrinkers(), so as to achieve the same purpose.
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
Reviewed-by: Muchun Song <songmuchun@...edance.com>
Reviewed-by: Christian König <christian.koenig@....com>
Acked-by: Daniel Vetter <daniel.vetter@...ll.ch>
---
drivers/gpu/drm/ttm/ttm_pool.c | 17 ++++++++++++++++-
include/linux/shrinker.h | 1 -
mm/shrinker.c | 15 ---------------
3 files changed, 16 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cddb9151d20f..648ca70403a7 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -74,6 +74,7 @@ static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
+static DECLARE_RWSEM(pool_shrink_rwsem);
/* Allocate pages of size 1 << order with the given gfp_flags */
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
@@ -317,6 +318,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_pages;
struct page *p;
+ down_read(&pool_shrink_rwsem);
spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
list_move_tail(&pt->shrinker_list, &shrinker_list);
@@ -329,6 +331,7 @@ static unsigned int ttm_pool_shrink(void)
} else {
num_pages = 0;
}
+ up_read(&pool_shrink_rwsem);
return num_pages;
}
@@ -572,6 +575,18 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
}
EXPORT_SYMBOL(ttm_pool_init);
+/**
+ * ttm_pool_synchronize_shrinkers - Wait for all running shrinkers to complete.
+ *
+ * This is useful to guarantee that all shrinker invocations have seen an
+ * update, before freeing memory, similar to rcu.
+ */
+static void ttm_pool_synchronize_shrinkers(void)
+{
+ down_write(&pool_shrink_rwsem);
+ up_write(&pool_shrink_rwsem);
+}
+
/**
* ttm_pool_fini - Cleanup a pool
*
@@ -593,7 +608,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
/* We removed the pool types from the LRU, but we need to also make sure
* that no shrinker is concurrently freeing pages from the pool.
*/
- synchronize_shrinkers();
+ ttm_pool_synchronize_shrinkers();
}
EXPORT_SYMBOL(ttm_pool_fini);
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 8dc15aa37410..6b5843c3b827 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -103,7 +103,6 @@ extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
const char *fmt, ...);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
-extern void synchronize_shrinkers(void);
#ifdef CONFIG_SHRINKER_DEBUG
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 043c87ccfab4..a16cd448b924 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -692,18 +692,3 @@ void unregister_shrinker(struct shrinker *shrinker)
shrinker->nr_deferred = NULL;
}
EXPORT_SYMBOL(unregister_shrinker);
-
-/**
- * synchronize_shrinkers - Wait for all running shrinkers to complete.
- *
- * This is equivalent to calling unregister_shrink() and register_shrinker(),
- * but atomically and with less overhead. This is useful to guarantee that all
- * shrinker invocations have seen an update, before freeing memory, similar to
- * rcu.
- */
-void synchronize_shrinkers(void)
-{
- down_write(&shrinker_rwsem);
- up_write(&shrinker_rwsem);
-}
-EXPORT_SYMBOL(synchronize_shrinkers);
--
2.30.2
Powered by blists - more mailing lists