[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260110040217.1927971-3-viro@zeniv.linux.org.uk>
Date: Sat, 10 Jan 2026 04:02:04 +0000
From: Al Viro <viro@...iv.linux.org.uk>
To: linux-mm@...ck.org
Cc: Vlastimil Babka <vbabka@...e.cz>,
Harry Yoo <harry.yoo@...cle.com>,
linux-fsdevel@...r.kernel.org,
Linus Torvalds <torvalds@...ux-foundation.org>,
Christian Brauner <brauner@...nel.org>,
Jan Kara <jack@...e.cz>,
Mateusz Guzik <mguzik@...il.com>,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 02/15] allow static-duration kmem_cache in modules
We need to make sure that instance in a module will get to
slab_kmem_cache_release() before the module data gets freed. That's only
a problem on sysfs setups - otherwise it'll definitely be finished before
kmem_cache_destroy() returns.
Note that modules themselves have sysfs-exposed attributes,
so a similar problem already exists there. That's dealt with by
having mod_sysfs_teardown() wait for refcount of module->mkobj.kobj
reaching zero. Let's make use of that - have static-duration-in-module
kmem_cache instances grab a reference to that kobject upon setup and
drop it in the end of slab_kmem_cache_release().
Let setup helpers store the kobjetct to be pinned in
kmem_cache_args->owner (for preallocated; if somebody manually sets it
for non-preallocated case, it'll be ignored). That would be
&THIS_MODULE->mkobj.kobj for a module and NULL in built-in.
If sysfs is enabled and we are dealing with preallocated instance,
let create_cache() grab and stash that reference in kmem_cache->owner
and let slab_kmem_cache_release() drop it instead of freeing kmem_cache
instance.
Signed-off-by: Al Viro <viro@...iv.linux.org.uk>
---
include/linux/slab-static.h | 12 ++++++++----
include/linux/slab.h | 4 ++++
mm/slab.h | 1 +
mm/slab_common.c | 16 ++++++++++++++--
4 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/include/linux/slab-static.h b/include/linux/slab-static.h
index 47b2220b4988..16d1564b4a4b 100644
--- a/include/linux/slab-static.h
+++ b/include/linux/slab-static.h
@@ -2,10 +2,7 @@
#ifndef _LINUX_SLAB_STATIC_H
#define _LINUX_SLAB_STATIC_H
-#ifdef MODULE
-#error "can't use that in modules"
-#endif
-
+#include <linux/init.h>
#include <generated/kmem_cache_size.h>
/* same size and alignment as struct kmem_cache: */
@@ -13,9 +10,16 @@ struct kmem_cache_opaque {
unsigned char opaque[KMEM_CACHE_SIZE];
} __aligned(KMEM_CACHE_ALIGN);
+#ifdef MODULE
+#define THIS_MODULE_KOBJ &THIS_MODULE->mkobj.kobj
+#else
+#define THIS_MODULE_KOBJ NULL
+#endif
+
#define __KMEM_CACHE_SETUP(cache, name, size, flags, ...) \
__kmem_cache_create_args((name), (size), \
&(struct kmem_cache_args) { \
+ .owner = THIS_MODULE_KOBJ, \
.preallocated = (cache), \
__VA_ARGS__}, (flags))
diff --git a/include/linux/slab.h b/include/linux/slab.h
index f16c784148b4..dc1aeb14a12b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -60,6 +60,7 @@ enum _slab_flag_bits {
#ifdef CONFIG_SLAB_OBJ_EXT
_SLAB_NO_OBJ_EXT,
#endif
+ _SLAB_PREALLOCATED,
_SLAB_FLAGS_LAST_BIT
};
@@ -244,6 +245,8 @@ enum _slab_flag_bits {
#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
#endif
+#define SLAB_PREALLOCATED __SLAB_FLAG_BIT(_SLAB_PREALLOCATED)
+
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -373,6 +376,7 @@ struct kmem_cache_args {
*/
unsigned int sheaf_capacity;
struct kmem_cache *preallocated;
+ struct kobject *owner;
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
diff --git a/mm/slab.h b/mm/slab.h
index e767aa7e91b0..9ff9a0a3b164 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -249,6 +249,7 @@ struct kmem_cache {
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
+ struct kobject *owner; /* keep that pinned while alive */
#endif
#ifdef CONFIG_SLAB_FREELIST_HARDENED
unsigned long random;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 81a413b44afb..a854e6872acd 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -245,6 +245,12 @@ static struct kmem_cache *create_cache(const char *name,
kmem_cache_free(kmem_cache, s);
return ERR_PTR(err);
}
+#ifdef CONFIG_SYSFS
+ if (flags & SLAB_PREALLOCATED) {
+ s->owner = args->owner;
+ kobject_get(s->owner);
+ }
+#endif
s->refcount = 1;
list_add(&s->list, &slab_caches);
return s;
@@ -322,7 +328,7 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
args->usersize = args->useroffset = 0;
if (args->preallocated)
- flags |= SLAB_NO_MERGE;
+ flags |= SLAB_NO_MERGE | SLAB_PREALLOCATED;
if (!args->usersize && !args->sheaf_capacity)
s = __kmem_cache_alias(name, object_size, args->align, flags,
@@ -481,7 +487,13 @@ void slab_kmem_cache_release(struct kmem_cache *s)
{
__kmem_cache_release(s);
kfree_const(s->name);
- kmem_cache_free(kmem_cache, s);
+ if (!(s->flags & SLAB_PREALLOCATED)) {
+ kmem_cache_free(kmem_cache, s);
+ return;
+ }
+#ifdef CONFIG_SYSFS
+ kobject_put(s->owner);
+#endif
}
void kmem_cache_destroy(struct kmem_cache *s)
--
2.47.3
Powered by blists - more mailing lists