[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240219-b4-szmalloc-migrate-v1-2-34cd49c6545b@bytedance.com>
Date: Mon, 19 Feb 2024 13:33:52 +0000
From: Chengming Zhou <zhouchengming@...edance.com>
To: nphamcs@...il.com, yosryahmed@...gle.com, Sergey Senozhatsky <senozhatsky@...omium.org>,
Minchan Kim <minchan@...nel.org>, Andrew Morton <akpm@...ux-foundation.org>, hannes@...xchg.org
Cc: linux-mm@...ck.org, Chengming Zhou <zhouchengming@...edance.com>, linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] mm/zsmalloc: remove migrate_write_lock_nested()
The migrate write lock is to protect the race between zspage migration
and zspage objects' map users.
We only need to lock out the map users of src zspage, not dst zspage,
which is safe to map by users concurrently, since we only need to do
obj_malloc() from dst zspage.
So we can remove the migrate_write_lock_nested() use case.
As we are here, cleanup the __zs_compact() by moving putback_zspage()
outside of migrate_write_unlock since we hold pool lock, no malloc or
free users can come in.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
---
mm/zsmalloc.c | 22 +++++-----------------
1 file changed, 5 insertions(+), 17 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 64d5533fa5d8..f2ae7d4c6f21 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -279,7 +279,6 @@ static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage);
static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_lock_nested(struct zspage *zspage);
static void migrate_write_unlock(struct zspage *zspage);
#ifdef CONFIG_COMPACTION
@@ -1727,11 +1726,6 @@ static void migrate_write_lock(struct zspage *zspage)
write_lock(&zspage->lock);
}
-static void migrate_write_lock_nested(struct zspage *zspage)
-{
- write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
-}
-
static void migrate_write_unlock(struct zspage *zspage)
{
write_unlock(&zspage->lock);
@@ -2003,19 +1997,17 @@ static unsigned long __zs_compact(struct zs_pool *pool,
dst_zspage = isolate_dst_zspage(class);
if (!dst_zspage)
break;
- migrate_write_lock(dst_zspage);
}
src_zspage = isolate_src_zspage(class);
if (!src_zspage)
break;
- migrate_write_lock_nested(src_zspage);
-
+ migrate_write_lock(src_zspage);
migrate_zspage(pool, src_zspage, dst_zspage);
- fg = putback_zspage(class, src_zspage);
migrate_write_unlock(src_zspage);
+ fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) {
free_zspage(pool, class, src_zspage);
pages_freed += class->pages_per_zspage;
@@ -2025,7 +2017,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
|| spin_is_contended(&pool->lock)) {
putback_zspage(class, dst_zspage);
- migrate_write_unlock(dst_zspage);
dst_zspage = NULL;
spin_unlock(&pool->lock);
@@ -2034,15 +2025,12 @@ static unsigned long __zs_compact(struct zs_pool *pool,
}
}
- if (src_zspage) {
+ if (src_zspage)
putback_zspage(class, src_zspage);
- migrate_write_unlock(src_zspage);
- }
- if (dst_zspage) {
+ if (dst_zspage)
putback_zspage(class, dst_zspage);
- migrate_write_unlock(dst_zspage);
- }
+
spin_unlock(&pool->lock);
return pages_freed;
--
b4 0.10.1
Powered by blists - more mailing lists