lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240715-b4-slab-kfree_rcu-destroy-v1-2-46b2984c2205@suse.cz>
Date: Mon, 15 Jul 2024 22:29:28 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: "Paul E. McKenney" <paulmck@...nel.org>, 
 Joel Fernandes <joel@...lfernandes.org>, 
 Josh Triplett <josh@...htriplett.org>, Boqun Feng <boqun.feng@...il.com>, 
 Christoph Lameter <cl@...ux.com>, David Rientjes <rientjes@...gle.com>
Cc: Steven Rostedt <rostedt@...dmis.org>, 
 Mathieu Desnoyers <mathieu.desnoyers@...icios.com>, 
 Lai Jiangshan <jiangshanlai@...il.com>, Zqiang <qiang.zhang1211@...il.com>, 
 Julia Lawall <Julia.Lawall@...ia.fr>, Jakub Kicinski <kuba@...nel.org>, 
 "Jason A. Donenfeld" <Jason@...c4.com>, 
 "Uladzislau Rezki (Sony)" <urezki@...il.com>, 
 Andrew Morton <akpm@...ux-foundation.org>, 
 Roman Gushchin <roman.gushchin@...ux.dev>, 
 Hyeonggon Yoo <42.hyeyoo@...il.com>, linux-mm@...ck.org, 
 linux-kernel@...r.kernel.org, rcu@...r.kernel.org, 
 Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH RFC 2/6] mm, slab: always maintain per-node slab and object
 count

Currently SLUB counts per-node slabs and total objects only with
CONFIG_SLUB_DEBUG, in order to minimize overhead. However, the detection
in __kmem_cache_shutdown() whether there are no outstanding object
relies on the per-node slab count (node_nr_slabs()) so it may be
unreliable without CONFIG_SLUB_DEBUG. Thus we might be failing to warn
about such situations, and instead destroy a cache while leaving its
slab(s) around (due to a buggy slab user creating such a scenario, not
in normal operation).

We will also need node_nr_slabs() to be reliable in the following work
to gracefully handle kmem_cache_destroy() with kfree_rcu() objects in
flight. Thus make the counting of per-node slabs and objects
unconditional.

Note that CONFIG_SLUB_DEBUG is the default anyway, and the counting is
done only when allocating or freeing a slab page, so even in
!CONFIG_SLUB_DEBUG configs the overhead should be negligible.

Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
 mm/slub.c | 49 +++++++++++++++++++++----------------------------
 1 file changed, 21 insertions(+), 28 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 829a1f08e8a2..aa4d80109c49 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -426,9 +426,9 @@ struct kmem_cache_node {
 	spinlock_t list_lock;
 	unsigned long nr_partial;
 	struct list_head partial;
-#ifdef CONFIG_SLUB_DEBUG
 	atomic_long_t nr_slabs;
 	atomic_long_t total_objects;
+#ifdef CONFIG_SLUB_DEBUG
 	struct list_head full;
 #endif
 };
@@ -438,6 +438,26 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 	return s->node[node];
 }
 
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+{
+	return atomic_long_read(&n->nr_slabs);
+}
+
+static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
+{
+	struct kmem_cache_node *n = get_node(s, node);
+
+	atomic_long_inc(&n->nr_slabs);
+	atomic_long_add(objects, &n->total_objects);
+}
+static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
+{
+	struct kmem_cache_node *n = get_node(s, node);
+
+	atomic_long_dec(&n->nr_slabs);
+	atomic_long_sub(objects, &n->total_objects);
+}
+
 /*
  * Iterator over all nodes. The body will be executed for each node that has
  * a kmem_cache_node structure allocated (which is true for all online nodes)
@@ -1511,26 +1531,6 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
 	list_del(&slab->slab_list);
 }
 
-static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
-{
-	return atomic_long_read(&n->nr_slabs);
-}
-
-static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
-{
-	struct kmem_cache_node *n = get_node(s, node);
-
-	atomic_long_inc(&n->nr_slabs);
-	atomic_long_add(objects, &n->total_objects);
-}
-static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
-{
-	struct kmem_cache_node *n = get_node(s, node);
-
-	atomic_long_dec(&n->nr_slabs);
-	atomic_long_sub(objects, &n->total_objects);
-}
-
 /* Object debug checks for alloc/free paths */
 static void setup_object_debug(struct kmem_cache *s, void *object)
 {
@@ -1871,13 +1871,6 @@ slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
 
 #define disable_higher_order_debug 0
 
-static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
-							{ return 0; }
-static inline void inc_slabs_node(struct kmem_cache *s, int node,
-							int objects) {}
-static inline void dec_slabs_node(struct kmem_cache *s, int node,
-							int objects) {}
-
 #ifndef CONFIG_SLUB_TINY
 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
 			       void **freelist, void *nextfree)

-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ