[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251022-fix-slab-accounting-v1-1-27870ec363ce@suse.cz>
Date: Wed, 22 Oct 2025 19:23:11 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...two.org>, David Rientjes <rientjes@...gle.com>,
Roman Gushchin <roman.gushchin@...ux.dev>, Harry Yoo <harry.yoo@...cle.com>,
Alexei Starovoitov <ast@...nel.org>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH] slab: perform inc_slabs_node() as part of new_slab()
Since commit af92793e52c3 ("slab: Introduce kmalloc_nolock() and
kfree_nolock().") there's a possibility in alloc_single_from_new_slab()
that we discard the newly allocated slab if we can't spin and we fail to
trylock. As a result we don't perform inc_slabs_node() later in the
function. Instead we perform a deferred deactivate_slab() which can
either put the unacounted slab on partial list, or discard it
immediately while performing dec_slabs_node(). Either way will cause an
accounting imbalance.
Fix this and also make the code more robust by performing
inc_slabs_node() in new_slab() itself, and removing it from its callers.
As a side effect, in the theoretical case where the new slab is
immediately leaked due to debugging consistency check failure, it will
be accounted (as full) in /proc/slabinfo anyway, which is not wrong.
The exceptional caller is early_kmem_cache_node_alloc() where the node
for stats is not yet initialized. We can handle it by using
allocate_slab() there as the gfp flags are known and fixed and we don't
need need new_slab()'s fixups.
Fixes: af92793e52c3 ("slab: Introduce kmalloc_nolock() and kfree_nolock().")
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slub.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 23d8f54e9486..dd4c85ea1038 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3268,13 +3268,21 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
+ struct slab *slab;
+
if (unlikely(flags & GFP_SLAB_BUG_MASK))
flags = kmalloc_fix_flags(flags);
WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
- return allocate_slab(s,
- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+ flags &= GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK;
+
+ slab = allocate_slab(s, flags, node);
+
+ if (likely(slab))
+ inc_slabs_node(s, slab_nid(slab), slab->objects);
+
+ return slab;
}
static void __free_slab(struct kmem_cache *s, struct slab *slab)
@@ -3415,8 +3423,7 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
int orig_size, gfp_t gfpflags)
{
bool allow_spin = gfpflags_allow_spinning(gfpflags);
- int nid = slab_nid(slab);
- struct kmem_cache_node *n = get_node(s, nid);
+ struct kmem_cache_node *n = get_node(s, slab_nid(slab));
unsigned long flags;
void *object;
@@ -3451,7 +3458,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
else
add_partial(n, slab, DEACTIVATE_TO_HEAD);
- inc_slabs_node(s, nid, slab->objects);
spin_unlock_irqrestore(&n->list_lock, flags);
return object;
@@ -4680,8 +4686,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
slab->inuse = slab->objects;
slab->frozen = 1;
- inc_slabs_node(s, slab_nid(slab), slab->objects);
-
if (unlikely(!pfmemalloc_match(slab, gfpflags) && allow_spin)) {
/*
* For !pfmemalloc_match() case we don't load freelist so that
@@ -7697,7 +7701,7 @@ static void early_kmem_cache_node_alloc(int node)
BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
- slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
+ slab = allocate_slab(kmem_cache_node, GFP_NOWAIT, node);
BUG_ON(!slab);
if (slab_nid(slab) != node) {
---
base-commit: 6ed8bfd24ce1cb31742b09a3eb557cd008533eec
change-id: 20251022-fix-slab-accounting-f0abbda8a6ff
Best regards,
--
Vlastimil Babka <vbabka@...e.cz>
Powered by blists - more mailing lists