[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240715-b4-slab-kfree_rcu-destroy-v1-4-46b2984c2205@suse.cz>
Date: Mon, 15 Jul 2024 22:29:30 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: "Paul E. McKenney" <paulmck@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>, Boqun Feng <boqun.feng@...il.com>,
Christoph Lameter <cl@...ux.com>, David Rientjes <rientjes@...gle.com>
Cc: Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>, Zqiang <qiang.zhang1211@...il.com>,
Julia Lawall <Julia.Lawall@...ia.fr>, Jakub Kicinski <kuba@...nel.org>,
"Jason A. Donenfeld" <Jason@...c4.com>,
"Uladzislau Rezki (Sony)" <urezki@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, rcu@...r.kernel.org,
Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH RFC 4/6] mm, slab: simplify kmem_cache_release()
kfence_shutdown_cache() is now called always just before
kmem_cache_release() so move it there.
Also replace two variants of the functions by using
__is_defined(SLAB_SUPPORTS_SYSFS).
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slab_common.c | 18 ++++--------------
1 file changed, 4 insertions(+), 14 deletions(-)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2eef5ad37fa7..57962e1a5a86 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -486,7 +486,6 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
}
EXPORT_SYMBOL(kmem_buckets_create);
-#ifdef SLAB_SUPPORTS_SYSFS
/*
* For a given kmem_cache, kmem_cache_destroy() should only be called
* once or there will be a use-after-free problem. The actual deletion
@@ -495,18 +494,12 @@ EXPORT_SYMBOL(kmem_buckets_create);
*/
static void kmem_cache_release(struct kmem_cache *s)
{
- if (slab_state >= FULL) {
+ kfence_shutdown_cache(s);
+ if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
sysfs_slab_release(s);
- } else {
+ else
slab_kmem_cache_release(s);
- }
-}
-#else
-static void kmem_cache_release(struct kmem_cache *s)
-{
- slab_kmem_cache_release(s);
}
-#endif
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
@@ -531,10 +524,8 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
- list_for_each_entry_safe(s, s2, &to_destroy, list) {
- kfence_shutdown_cache(s);
+ list_for_each_entry_safe(s, s2, &to_destroy, list)
kmem_cache_release(s);
- }
}
void slab_kmem_cache_release(struct kmem_cache *s)
@@ -591,7 +582,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
schedule_work(&slab_caches_to_rcu_destroy_work);
mutex_unlock(&slab_mutex);
} else {
- kfence_shutdown_cache(s);
kmem_cache_release(s);
}
}
--
2.45.2
Powered by blists - more mailing lists