[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aW3SJBR1BcDor-ya@hyeyoo>
Date: Mon, 19 Jan 2026 15:41:40 +0900
From: Harry Yoo <harry.yoo@...cle.com>
To: Vlastimil Babka <vbabka@...e.cz>
Cc: Petr Tesarik <ptesarik@...e.com>, Christoph Lameter <cl@...two.org>,
David Rientjes <rientjes@...gle.com>,
Roman Gushchin <roman.gushchin@...ux.dev>, Hao Li <hao.li@...ux.dev>,
Andrew Morton <akpm@...ux-foundation.org>,
Uladzislau Rezki <urezki@...il.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Alexei Starovoitov <ast@...nel.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, linux-rt-devel@...ts.linux.dev,
bpf@...r.kernel.org, kasan-dev@...glegroups.com
Subject: Re: [PATCH v3 09/21] slab: add optimized sheaf refill from partial
list
On Fri, Jan 16, 2026 at 03:40:29PM +0100, Vlastimil Babka wrote:
> At this point we have sheaves enabled for all caches, but their refill
> is done via __kmem_cache_alloc_bulk() which relies on cpu (partial)
> slabs - now a redundant caching layer that we are about to remove.
>
> The refill will thus be done from slabs on the node partial list.
> Introduce new functions that can do that in an optimized way as it's
> easier than modifying the __kmem_cache_alloc_bulk() call chain.
>
> Extend struct partial_context so it can return a list of slabs from the
> partial list with the sum of free objects in them within the requested
> min and max.
>
> Introduce get_partial_node_bulk() that removes the slabs from freelist
> and returns them in the list.
>
> Introduce get_freelist_nofreeze() which grabs the freelist without
> freezing the slab.
>
> Introduce alloc_from_new_slab() which can allocate multiple objects from
> a newly allocated slab where we don't need to synchronize with freeing.
> In some aspects it's similar to alloc_single_from_new_slab() but assumes
> the cache is a non-debug one so it can avoid some actions.
>
> Introduce __refill_objects() that uses the functions above to fill an
> array of objects. It has to handle the possibility that the slabs will
> contain more objects that were requested, due to concurrent freeing of
> objects to those slabs. When no more slabs on partial lists are
> available, it will allocate new slabs. It is intended to be only used
> in context where spinning is allowed, so add a WARN_ON_ONCE check there.
>
> Finally, switch refill_sheaf() to use __refill_objects(). Sheaves are
> only refilled from contexts that allow spinning, or even blocking.
>
> Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> mm/slub.c | 284 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 264 insertions(+), 20 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 9bea8a65e510..dce80463f92c 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3522,6 +3525,63 @@ static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
> #endif
> static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
>
> +static bool get_partial_node_bulk(struct kmem_cache *s,
> + struct kmem_cache_node *n,
> + struct partial_context *pc)
> +{
> + struct slab *slab, *slab2;
> + unsigned int total_free = 0;
> + unsigned long flags;
> +
> + /* Racy check to avoid taking the lock unnecessarily. */
> + if (!n || data_race(!n->nr_partial))
> + return false;
> +
> + INIT_LIST_HEAD(&pc->slabs);
> +
> + spin_lock_irqsave(&n->list_lock, flags);
> +
> + list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
> + struct freelist_counters flc;
> + unsigned int slab_free;
> +
> + if (!pfmemalloc_match(slab, pc->flags))
> + continue;
> + /*
> + * determine the number of free objects in the slab racily
> + *
> + * due to atomic updates done by a racing free we should not
> + * read an inconsistent value here, but do a sanity check anyway
> + *
> + * slab_free is a lower bound due to subsequent concurrent
> + * freeing, the caller might get more objects than requested and
> + * must deal with it
> + */
> + flc.counters = data_race(READ_ONCE(slab->counters));
> + slab_free = flc.objects - flc.inuse;
> +
> + if (unlikely(slab_free > oo_objects(s->oo)))
> + continue;
When is this condition supposed to be true?
I guess it's when __update_freelist_slow() doesn't update
slab->counters atomically?
> +
> + /* we have already min and this would get us over the max */
> + if (total_free >= pc->min_objects
> + && total_free + slab_free > pc->max_objects)
> + break;
> +
> + remove_partial(n, slab);
> +
> + list_add(&slab->slab_list, &pc->slabs);
> +
> + total_free += slab_free;
> + if (total_free >= pc->max_objects)
> + break;
> + }
> +
> + spin_unlock_irqrestore(&n->list_lock, flags);
> + return total_free > 0;
> +}
> +
> /*
> * Try to allocate a partial slab from a specific node.
> */
> +static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
> + void **p, unsigned int count, bool allow_spin)
> +{
> + unsigned int allocated = 0;
> + struct kmem_cache_node *n;
> + unsigned long flags;
> + void *object;
> +
> + if (!allow_spin && (slab->objects - slab->inuse) > count) {
> +
> + n = get_node(s, slab_nid(slab));
> +
> + if (!spin_trylock_irqsave(&n->list_lock, flags)) {
> + /* Unlucky, discard newly allocated slab */
> + defer_deactivate_slab(slab, NULL);
> + return 0;
> + }
> + }
> +
> + object = slab->freelist;
> + while (object && allocated < count) {
> + p[allocated] = object;
> + object = get_freepointer(s, object);
> + maybe_wipe_obj_freeptr(s, p[allocated]);
> +
> + slab->inuse++;
> + allocated++;
> + }
> + slab->freelist = object;
> +
> + if (slab->freelist) {
> +
> + if (allow_spin) {
> + n = get_node(s, slab_nid(slab));
> + spin_lock_irqsave(&n->list_lock, flags);
> + }
> + add_partial(n, slab, DEACTIVATE_TO_HEAD);
> + spin_unlock_irqrestore(&n->list_lock, flags);
> + }
> +
> + inc_slabs_node(s, slab_nid(slab), slab->objects);
Maybe add a comment explaining why inc_slabs_node() doesn't need to be
called under n->list_lock?
> + return allocated;
> +}
> +
> /*
> * Slow path. The lockless freelist is empty or we need to perform
> * debugging duties.
> @@ -5388,6 +5519,9 @@ static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
> return ret;
> }
>
> +static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
> + size_t size, void **p);
> +
> /*
> * returns a sheaf that has at least the requested size
> * when prefilling is needed, do so with given gfp flags
> @@ -7463,6 +7597,116 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
> }
> EXPORT_SYMBOL(kmem_cache_free_bulk);
>
> +static unsigned int
> +__refill_objects(struct kmem_cache *s, void **p, gfp_t gfp, unsigned int min,
> + unsigned int max)
> +{
> + struct slab *slab, *slab2;
> + struct partial_context pc;
> + unsigned int refilled = 0;
> + unsigned long flags;
> + void *object;
> + int node;
> +
> + pc.flags = gfp;
> + pc.min_objects = min;
> + pc.max_objects = max;
> +
> + node = numa_mem_id();
> +
> + if (WARN_ON_ONCE(!gfpflags_allow_spinning(gfp)))
> + return 0;
> +
> + /* TODO: consider also other nodes? */
> + if (!get_partial_node_bulk(s, get_node(s, node), &pc))
> + goto new_slab;
> +
> + list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
> +
> + list_del(&slab->slab_list);
When a slab is removed from the list,
> + object = get_freelist_nofreeze(s, slab);
> +
> + while (object && refilled < max) {
> + p[refilled] = object;
> + object = get_freepointer(s, object);
> + maybe_wipe_obj_freeptr(s, p[refilled]);
> +
> + refilled++;
> + }
> +
> + /*
> + * Freelist had more objects than we can accommodate, we need to
> + * free them back. We can treat it like a detached freelist, just
> + * need to find the tail object.
> + */
> + if (unlikely(object)) {
And the freelist had more objects than requested,
> + void *head = object;
> + void *tail;
> + int cnt = 0;
> +
> + do {
> + tail = object;
> + cnt++;
> + object = get_freepointer(s, object);
> + } while (object);
> + do_slab_free(s, slab, head, tail, cnt, _RET_IP_);
objects are freed to the slab but the slab may or may not be added back to
n->partial?
> + }
> +
> + if (refilled >= max)
> + break;
> + }
> +
> + if (unlikely(!list_empty(&pc.slabs))) {
> + struct kmem_cache_node *n = get_node(s, node);
> +
> + spin_lock_irqsave(&n->list_lock, flags);
> +
> + list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
> +
> + if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial))
> + continue;
> +
> + list_del(&slab->slab_list);
> + add_partial(n, slab, DEACTIVATE_TO_HEAD);
> + }
> +
> + spin_unlock_irqrestore(&n->list_lock, flags);
> +
> + /* any slabs left are completely free and for discard */
> + list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
> +
> + list_del(&slab->slab_list);
> + discard_slab(s, slab);
> + }
> + }
> +
> +
> + if (likely(refilled >= min))
> + goto out;
> +
> +new_slab:
> +
> + slab = new_slab(s, pc.flags, node);
> + if (!slab)
> + goto out;
> +
> + stat(s, ALLOC_SLAB);
> +
> + /*
> + * TODO: possible optimization - if we know we will consume the whole
> + * slab we might skip creating the freelist?
> + */
> + refilled += alloc_from_new_slab(s, slab, p + refilled, max - refilled,
> + /* allow_spin = */ true);
> +
> + if (refilled < min)
> + goto new_slab;
It should jump to out: label when alloc_from_new_slab() returns zero
(trylock failed).
...Oh wait, no. I was confused.
Why does alloc_from_new_slab() handle !allow_spin case when it cannot be
called if allow_spin is false?
> +out:
> +
> + return refilled;
> +}
--
Cheers,
Harry / Hyeonggon
Powered by blists - more mailing lists