[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e81d914b-8718-4dbb-a2d8-d5298fe66d1a@bytedance.com>
Date: Fri, 19 Jan 2024 11:53:12 +0800
From: Chengming Zhou <zhouchengming@...edance.com>
To: "Christoph Lameter (Ampere)" <cl@...ux.com>
Cc: Hyeonggon Yoo <42.hyeyoo@...il.com>, Joonsoo Kim
<iamjoonsoo.kim@....com>, Vlastimil Babka <vbabka@...e.cz>,
Pekka Enberg <penberg@...nel.org>, Andrew Morton
<akpm@...ux-foundation.org>, Roman Gushchin <roman.gushchin@...ux.dev>,
David Rientjes <rientjes@...gle.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/3] mm/slub: directly load freelist from cpu partial slab
in the likely case
On 2024/1/19 06:14, Christoph Lameter (Ampere) wrote:
> On Thu, 18 Jan 2024, Chengming Zhou wrote:
>
>> So get_freelist() has two cases to handle: cpu slab and cpu partial list slab.
>> The latter is NOT frozen, so need to remove "VM_BUG_ON(!new.frozen)" from it.
>
> Right so keep the check if it is the former?
>
Ok, I get it. Maybe like this:
diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56a3..7fa9dbc2e938 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3313,7 +3313,7 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
*
* If this function returns NULL then the slab has been unfrozen.
*/
-static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
+static inline void *get_freelist(struct kmem_cache *s, struct slab *slab, int frozen)
{
struct slab new;
unsigned long counters;
@@ -3326,7 +3326,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
counters = slab->counters;
new.counters = counters;
- VM_BUG_ON(!new.frozen);
+ VM_BUG_ON(frozen && !new.frozen);
new.inuse = slab->objects;
new.frozen = freelist != NULL;
@@ -3440,7 +3440,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (freelist)
goto load_freelist;
- freelist = get_freelist(s, slab);
+ freelist = get_freelist(s, slab, 1);
if (!freelist) {
c->slab = NULL;
@@ -3498,18 +3498,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
slab = slub_percpu_partial(c);
slub_set_percpu_partial(c, slab);
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- stat(s, CPU_PARTIAL_ALLOC);
- if (unlikely(!node_match(slab, node) ||
- !pfmemalloc_match(slab, gfpflags))) {
- slab->next = NULL;
- __put_partials(s, slab);
- continue;
+ if (likely(node_match(slab, node) &&
+ pfmemalloc_match(slab, gfpflags))) {
+ c->slab = slab;
+ freelist = get_freelist(s, slab, 0);
+ stat(s, CPU_PARTIAL_ALLOC);
+ goto load_freelist;
}
- freelist = freeze_slab(s, slab);
- goto retry_load_slab;
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+
+ slab->next = NULL;
+ __put_partials(s, slab);
}
#endif
Powered by blists - more mailing lists