[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240219-b4-szmalloc-migrate-v1-1-34cd49c6545b@bytedance.com>
Date: Mon, 19 Feb 2024 13:33:51 +0000
From: Chengming Zhou <zhouchengming@...edance.com>
To: nphamcs@...il.com, yosryahmed@...gle.com, Sergey Senozhatsky <senozhatsky@...omium.org>,
Minchan Kim <minchan@...nel.org>, Andrew Morton <akpm@...ux-foundation.org>, hannes@...xchg.org
Cc: linux-mm@...ck.org, Chengming Zhou <zhouchengming@...edance.com>, linux-kernel@...r.kernel.org
Subject: [PATCH 1/3] mm/zsmalloc: fix migrate_write_lock() when !CONFIG_COMPACTION
migrate_write_lock() is a empty function when !CONFIG_COMPACTION, in
which case zs_compact() can be triggered from shrinker reclaim context.
(Maybe it's better to rename it to zs_shrink()?)
And zspage map object users rely on this migrate_read_lock() so object
won't be migrated elsewhere.
Fix it by always implementing the migrate_write_lock() related functions.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
---
mm/zsmalloc.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c937635e0ad1..64d5533fa5d8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -278,18 +278,15 @@ static bool ZsHugePage(struct zspage *zspage)
static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage);
-
-#ifdef CONFIG_COMPACTION
static void migrate_write_lock(struct zspage *zspage);
static void migrate_write_lock_nested(struct zspage *zspage);
static void migrate_write_unlock(struct zspage *zspage);
+
+#ifdef CONFIG_COMPACTION
static void kick_deferred_free(struct zs_pool *pool);
static void init_deferred_free(struct zs_pool *pool);
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
#else
-static void migrate_write_lock(struct zspage *zspage) {}
-static void migrate_write_lock_nested(struct zspage *zspage) {}
-static void migrate_write_unlock(struct zspage *zspage) {}
static void kick_deferred_free(struct zs_pool *pool) {}
static void init_deferred_free(struct zs_pool *pool) {}
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
@@ -1725,7 +1722,6 @@ static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
read_unlock(&zspage->lock);
}
-#ifdef CONFIG_COMPACTION
static void migrate_write_lock(struct zspage *zspage)
{
write_lock(&zspage->lock);
@@ -1741,6 +1737,7 @@ static void migrate_write_unlock(struct zspage *zspage)
write_unlock(&zspage->lock);
}
+#ifdef CONFIG_COMPACTION
/* Number of isolated subpage for *page migration* in this zspage */
static void inc_zspage_isolation(struct zspage *zspage)
{
--
b4 0.10.1
Powered by blists - more mailing lists