[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231017154439.3036608-6-chengming.zhou@linux.dev>
Date: Tue, 17 Oct 2023 15:44:39 +0000
From: chengming.zhou@...ux.dev
To: cl@...ux.com, penberg@...nel.org
Cc: rientjes@...gle.com, iamjoonsoo.kim@....com,
akpm@...ux-foundation.org, vbabka@...e.cz,
roman.gushchin@...ux.dev, 42.hyeyoo@...il.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, chengming.zhou@...ux.dev,
Chengming Zhou <zhouchengming@...edance.com>
Subject: [RFC PATCH 5/5] slub: Introduce get_cpu_partial()
From: Chengming Zhou <zhouchengming@...edance.com>
Since the slabs on cpu partial list are not frozen anymore, we introduce
get_cpu_partial() to get a frozen slab with its freelist from cpu partial
list. It's now much like getting a frozen slab with its freelist from
node partial list.
Another change is about get_partial(), which can return no frozen slab
when all slabs are failed when acquire_slab(), but get some unfreeze slabs
in its cpu partial list, so we need to check this rare case to avoid
allocating a new slab.
Signed-off-by: Chengming Zhou <zhouchengming@...edance.com>
---
mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 68 insertions(+), 19 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 044235bd8a45..d58eaf8447fd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3064,6 +3064,68 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
return freelist;
}
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+
+static void *get_cpu_partial(struct kmem_cache *s, struct kmem_cache_cpu *c,
+ struct slab **slabptr, int node, gfp_t gfpflags)
+{
+ unsigned long flags;
+ struct slab *slab;
+ struct slab new;
+ unsigned long counters;
+ void *freelist;
+
+ while (slub_percpu_partial(c)) {
+ local_lock_irqsave(&s->cpu_slab->lock, flags);
+ if (unlikely(!slub_percpu_partial(c))) {
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ /* we were preempted and partial list got empty */
+ return NULL;
+ }
+
+ slab = slub_percpu_partial(c);
+ slub_set_percpu_partial(c, slab);
+ local_unlock_irqrestore(&s->cpu_slab->lock, flags);
+ stat(s, CPU_PARTIAL_ALLOC);
+
+ if (unlikely(!node_match(slab, node) ||
+ !pfmemalloc_match(slab, gfpflags))) {
+ slab->next = NULL;
+ __unfreeze_partials(s, slab);
+ continue;
+ }
+
+ do {
+ freelist = slab->freelist;
+ counters = slab->counters;
+
+ new.counters = counters;
+ VM_BUG_ON(new.frozen);
+
+ new.inuse = slab->objects;
+ new.frozen = 1;
+ } while (!__slab_update_freelist(s, slab,
+ freelist, counters,
+ NULL, new.counters,
+ "get_cpu_partial"));
+
+ *slabptr = slab;
+ return freelist;
+ }
+
+ return NULL;
+}
+
+#else /* CONFIG_SLUB_CPU_PARTIAL */
+
+static void *get_cpu_partial(struct kmem_cache *s, struct kmem_cache_cpu *c,
+ struct slab **slabptr, int node, gfp_t gfpflags)
+{
+ return NULL;
+}
+
+#endif
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
@@ -3106,7 +3168,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
node = NUMA_NO_NODE;
goto new_slab;
}
-redo:
if (unlikely(!node_match(slab, node))) {
/*
@@ -3182,24 +3243,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
new_slab:
- if (slub_percpu_partial(c)) {
- local_lock_irqsave(&s->cpu_slab->lock, flags);
- if (unlikely(c->slab)) {
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- goto reread_slab;
- }
- if (unlikely(!slub_percpu_partial(c))) {
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- /* we were preempted and partial list got empty */
- goto new_objects;
- }
-
- slab = c->slab = slub_percpu_partial(c);
- slub_set_percpu_partial(c, slab);
- local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- stat(s, CPU_PARTIAL_ALLOC);
- goto redo;
- }
+ freelist = get_cpu_partial(s, c, &slab, node, gfpflags);
+ if (freelist)
+ goto retry_load_slab;
new_objects:
@@ -3210,6 +3256,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (freelist)
goto check_new_slab;
+ if (slub_percpu_partial(c))
+ goto new_slab;
+
slub_put_cpu_ptr(s->cpu_slab);
slab = new_slab(s, gfpflags, node);
c = slub_get_cpu_ptr(s->cpu_slab);
--
2.40.1
Powered by blists - more mailing lists