[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250129064853.2210753-2-senozhatsky@chromium.org>
Date: Wed, 29 Jan 2025 15:43:47 +0900
From: Sergey Senozhatsky <senozhatsky@...omium.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Minchan Kim <minchan@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Yosry Ahmed <yosry.ahmed@...ux.dev>,
Nhat Pham <nphamcs@...il.com>
Cc: linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Sergey Senozhatsky <senozhatsky@...omium.org>
Subject: [PATCHv1 1/6] zsmalloc: factor out pool locking helpers
We currently have a mix of migrate_{read,write}_lock() helpers
that lock zspages, but it's zs_pool that actually has a ->migrate_lock
access to which is opene-coded. Factor out pool migrate locking
into helpers, zspage migration locking API will be renamed to
reduce confusion.
Signed-off-by: Sergey Senozhatsky <senozhatsky@...omium.org>
---
mm/zsmalloc.c | 56 +++++++++++++++++++++++++++++++++++++--------------
1 file changed, 41 insertions(+), 15 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 817626a351f8..2f8a2b139919 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -204,7 +204,8 @@ struct link_free {
};
struct zs_pool {
- const char *name;
+ /* protect page/zspage migration */
+ rwlock_t migrate_lock;
struct size_class *size_class[ZS_SIZE_CLASSES];
struct kmem_cache *handle_cachep;
@@ -213,6 +214,7 @@ struct zs_pool {
atomic_long_t pages_allocated;
struct zs_pool_stats stats;
+ atomic_t compaction_in_progress;
/* Compact classes */
struct shrinker *shrinker;
@@ -223,11 +225,35 @@ struct zs_pool {
#ifdef CONFIG_COMPACTION
struct work_struct free_work;
#endif
- /* protect page/zspage migration */
- rwlock_t migrate_lock;
- atomic_t compaction_in_progress;
+
+ const char *name;
};
+static void pool_write_unlock(struct zs_pool *pool)
+{
+ write_unlock(&pool->migrate_lock);
+}
+
+static void pool_write_lock(struct zs_pool *pool)
+{
+ write_lock(&pool->migrate_lock);
+}
+
+static void pool_read_unlock(struct zs_pool *pool)
+{
+ read_unlock(&pool->migrate_lock);
+}
+
+static void pool_read_lock(struct zs_pool *pool)
+{
+ read_lock(&pool->migrate_lock);
+}
+
+static bool pool_lock_is_contended(struct zs_pool *pool)
+{
+ return rwlock_is_contended(&pool->migrate_lock);
+}
+
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
{
SetPagePrivate(zpdesc_page(zpdesc));
@@ -1206,7 +1232,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
BUG_ON(in_interrupt());
/* It guarantees it can get zspage from handle safely */
- read_lock(&pool->migrate_lock);
+ pool_read_lock(pool);
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
zspage = get_zspage(zpdesc);
@@ -1218,7 +1244,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
* which is smaller granularity.
*/
migrate_read_lock(zspage);
- read_unlock(&pool->migrate_lock);
+ pool_read_unlock(pool);
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
@@ -1453,13 +1479,13 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
* The pool->migrate_lock protects the race with zpage's migration
* so it's safe to get the page from handle.
*/
- read_lock(&pool->migrate_lock);
+ pool_read_lock(pool);
obj = handle_to_obj(handle);
obj_to_zpdesc(obj, &f_zpdesc);
zspage = get_zspage(f_zpdesc);
class = zspage_class(pool, zspage);
spin_lock(&class->lock);
- read_unlock(&pool->migrate_lock);
+ pool_read_unlock(pool);
class_stat_sub(class, ZS_OBJS_INUSE, 1);
obj_free(class->size, obj);
@@ -1796,7 +1822,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* The pool migrate_lock protects the race between zpage migration
* and zs_free.
*/
- write_lock(&pool->migrate_lock);
+ pool_write_lock(pool);
class = zspage_class(pool, zspage);
/*
@@ -1833,7 +1859,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
*/
- write_unlock(&pool->migrate_lock);
+ pool_write_unlock(pool);
spin_unlock(&class->lock);
migrate_write_unlock(zspage);
@@ -1956,7 +1982,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
* protect the race between zpage migration and zs_free
* as well as zpage allocation/free
*/
- write_lock(&pool->migrate_lock);
+ pool_write_lock(pool);
spin_lock(&class->lock);
while (zs_can_compact(class)) {
int fg;
@@ -1983,14 +2009,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
src_zspage = NULL;
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
- || rwlock_is_contended(&pool->migrate_lock)) {
+ || pool_lock_is_contended(pool)) {
putback_zspage(class, dst_zspage);
dst_zspage = NULL;
spin_unlock(&class->lock);
- write_unlock(&pool->migrate_lock);
+ pool_write_unlock(pool);
cond_resched();
- write_lock(&pool->migrate_lock);
+ pool_write_lock(pool);
spin_lock(&class->lock);
}
}
@@ -2002,7 +2028,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
putback_zspage(class, dst_zspage);
spin_unlock(&class->lock);
- write_unlock(&pool->migrate_lock);
+ pool_write_unlock(pool);
return pages_freed;
}
--
2.48.1.262.g85cc9f2d1e-goog
Powered by blists - more mailing lists