[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <kmku2oyevj5vbt6na73iyksyod4sjcvik5zx43h4mw55m744oo@ddiynkvadmni>
Date: Wed, 21 Jan 2026 12:41:39 +0900
From: Sergey Senozhatsky <senozhatsky@...omium.org>
To: Nhat Pham <nphamcs@...il.com>
Cc: Sergey Senozhatsky <senozhatsky@...omium.org>,
Andrew Morton <akpm@...ux-foundation.org>, Yosry Ahmed <yosry.ahmed@...ux.dev>,
Minchan Kim <minchan@...nel.org>, Johannes Weiner <hannes@...xchg.org>,
Brian Geffon <bgeffon@...gle.com>, linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [RFC PATCH] zsmalloc: make common caches global
On (26/01/19 13:44), Nhat Pham wrote:
> On Thu, Jan 15, 2026 at 9:53 PM Sergey Senozhatsky
> <senozhatsky@...omium.org> wrote:
> >
> > On (26/01/16 13:48), Sergey Senozhatsky wrote:
> > > Currently, zsmalloc creates kmem_cache of handles and zspages
> > > for each pool, which may be suboptimal from the memory usage
> > > point of view (extra internal fragmentation per pool). Systems
> > > that create multiple zsmalloc pools may benefit from shared
> > > common zsmalloc caches.
> >
> > This is step 1.
> >
> > Step 2 is to look into possibility of sharing zsmalloc pools.
> > E.g. if there are N zram devices in the system, do we really need
> > N zsmalloc pools? Can we just share a single pool between them?
>
> Ditto for zswap (although here, we almost always only have a single zswap pool).
COMPLETELY UNTESTED (current linux-next doesn't boot for me, hitting
an "Oops: stack guard page: 0000" early during boot).
So I'm thinking of something like below. Basically have a Kconfig
option to turn zsmalloc into a singleton pool mode, transparently
for zsmalloc users.
---
mm/Kconfig | 11 ++++++++
mm/zsmalloc.c | 73 ++++++++++++++++++++++++++++++++++++++++++---------
2 files changed, 72 insertions(+), 12 deletions(-)
diff --git a/mm/Kconfig b/mm/Kconfig
index 4fc1a171dffa..ff6855e74c3d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -132,6 +132,17 @@ menu "Zsmalloc allocator options"
comment "Zsmalloc is a common backend allocator for zswap & zram"
+config ZSMALLOC_SINGLETON_POOL
+ bool "Use a singleton zsmalloc pool"
+ default n
+ help
+ This option enables the use of a single global zsmalloc pool
+ instance for all users of zsmalloc (e.g., zswap, zram). This
+ reduces memory overhead and fragmentation by sharing size class
+ configurations and memory between different users.
+
+ If unsure, say N.
+
config ZSMALLOC_STAT
bool "Export zsmalloc statistics"
select DEBUG_FS
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 8df45aa1b5c8..acd14b001342 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -224,6 +224,10 @@ struct zs_pool {
atomic_t compaction_in_progress;
};
+#ifdef CONFIG_ZSMALLOC_SINGLETON_POOL
+static struct zs_pool *zs_singleton_pool;
+#endif
+
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
{
SetPagePrivate(zpdesc_page(zpdesc));
@@ -2051,17 +2055,7 @@ static int calculate_zspage_chain_size(int class_size)
return chain_size;
}
-/**
- * zs_create_pool - Creates an allocation pool to work from.
- * @name: pool name to be created
- *
- * This function must be called before anything when using
- * the zsmalloc allocator.
- *
- * On success, a pointer to the newly created pool is returned,
- * otherwise NULL.
- */
-struct zs_pool *zs_create_pool(const char *name)
+static struct zs_pool *__zs_create_pool(const char *name)
{
int i;
struct zs_pool *pool;
@@ -2170,9 +2164,29 @@ struct zs_pool *zs_create_pool(const char *name)
zs_destroy_pool(pool);
return NULL;
}
+
+/**
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @name: pool name to be created
+ *
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
+ *
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
+ */
+struct zs_pool *zs_create_pool(const char *name)
+{
+#ifdef CONFIG_ZSMALLOC_SINGLETON_POOL
+ return zs_singleton_pool;
+#else
+ return __zs_create_pool(name);
+#endif
+
+}
EXPORT_SYMBOL_GPL(zs_create_pool);
-void zs_destroy_pool(struct zs_pool *pool)
+static void __zs_destroy_pool(struct zs_pool *pool)
{
int i;
@@ -2203,8 +2217,35 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(pool->name);
kfree(pool);
}
+
+void zs_destroy_pool(struct zs_pool *pool __maybe_unused)
+{
+#ifndef CONFIG_ZSMALLOC_SINGLETON_POOL
+ __zs_destroy_pool(pool);
+#endif
+}
EXPORT_SYMBOL_GPL(zs_destroy_pool);
+static void zs_destroy_singleton_pool(void)
+{
+#ifdef CONFIG_ZSMALLOC_SINGLETON_POOL
+ if (zs_singleton_pool) {
+ __zs_destroy_pool(zs_singleton_pool);
+ zs_singleton_pool = NULL;
+ }
+#endif
+}
+
+static int zs_create_singleton_pool(void)
+{
+#ifdef CONFIG_ZSMALLOC_SINGLETON_POOL
+ zs_singleton_pool = __zs_create_pool("zsmalloc");
+ if (!zs_singleton_pool)
+ return -ENOMEM;
+#endif
+ return 0;
+}
+
static void zs_destroy_caches(void)
{
kmem_cache_destroy(handle_cachep);
@@ -2235,9 +2276,16 @@ static int __init zs_init(void)
if (rc)
return rc;
+ rc = zs_create_singleton_pool();
+ if (rc) {
+ zs_destroy_caches();
+ return rc;
+ }
+
#ifdef CONFIG_COMPACTION
rc = set_movable_ops(&zsmalloc_mops, PGTY_zsmalloc);
if (rc) {
+ zs_destroy_singleton_pool();
zs_destroy_caches();
return rc;
}
@@ -2252,6 +2300,7 @@ static void __exit zs_exit(void)
set_movable_ops(NULL, PGTY_zsmalloc);
#endif
zs_stat_exit();
+ zs_destroy_singleton_pool();
zs_destroy_caches();
}
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists