[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230515080554.520976397@infradead.org>
Date: Mon, 15 May 2023 09:57:08 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: torvalds@...ux-foundation.org
Cc: corbet@....net, will@...nel.org, peterz@...radead.org,
boqun.feng@...il.com, mark.rutland@....com,
catalin.marinas@....com, dennis@...nel.org, tj@...nel.org,
cl@...ux.com, hca@...ux.ibm.com, gor@...ux.ibm.com,
agordeev@...ux.ibm.com, borntraeger@...ux.ibm.com,
svens@...ux.ibm.com, tglx@...utronix.de, mingo@...hat.com,
bp@...en8.de, dave.hansen@...ux.intel.com, x86@...nel.org,
hpa@...or.com, joro@...tes.org, suravee.suthikulpanit@....com,
robin.murphy@....com, dwmw2@...radead.org,
baolu.lu@...ux.intel.com, Arnd Bergmann <arnd@...db.de>,
Herbert Xu <herbert@...dor.apana.org.au>, davem@...emloft.net,
penberg@...nel.org, rientjes@...gle.com, iamjoonsoo.kim@....com,
Andrew Morton <akpm@...ux-foundation.org>, vbabka@...e.cz,
roman.gushchin@...ux.dev, 42.hyeyoo@...il.com,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-s390@...r.kernel.org,
iommu@...ts.linux.dev, linux-arch@...r.kernel.org,
linux-crypto@...r.kernel.org
Subject: [PATCH v3 09/11] mm/slub: Fold slab_update_freelist()
The two functions slab_update_freelist() and __slab_update_freelist()
are nearly identical, fold and add a boolean argument and rely on
constant propagation.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
mm/slub.c | 80 +++++++++++++++++++++-----------------------------------------
1 file changed, 28 insertions(+), 52 deletions(-)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -559,53 +559,29 @@ __update_freelist_slow(struct slab *slab
* allocation/ free operation in hardirq context. Therefore nothing can
* interrupt the operation.
*/
-static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
- void *freelist_old, unsigned long counters_old,
- void *freelist_new, unsigned long counters_new,
- const char *n)
+static __always_inline
+bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
+ void *freelist_old, unsigned long counters_old,
+ void *freelist_new, unsigned long counters_new,
+ bool irq_save, const char *n)
{
bool ret;
- if (USE_LOCKLESS_FAST_PATH())
+ if (!irq_save && USE_LOCKLESS_FAST_PATH())
lockdep_assert_irqs_disabled();
if (s->flags & __CMPXCHG_DOUBLE) {
ret = __update_freelist_fast(slab, freelist_old, counters_old,
freelist_new, counters_new);
} else {
- ret = __update_freelist_slow(slab, freelist_old, counters_old,
- freelist_new, counters_new);
- }
- if (likely(ret))
- return true;
-
- cpu_relax();
- stat(s, CMPXCHG_DOUBLE_FAIL);
-
-#ifdef SLUB_DEBUG_CMPXCHG
- pr_info("%s %s: cmpxchg double redo ", n, s->name);
-#endif
-
- return false;
-}
-
-static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
- void *freelist_old, unsigned long counters_old,
- void *freelist_new, unsigned long counters_new,
- const char *n)
-{
- bool ret;
-
- if (s->flags & __CMPXCHG_DOUBLE) {
- ret = __update_freelist_fast(slab, freelist_old, counters_old,
- freelist_new, counters_new);
- } else {
unsigned long flags;
- local_irq_save(flags);
+ if (irq_save)
+ local_irq_save(flags);
ret = __update_freelist_slow(slab, freelist_old, counters_old,
freelist_new, counters_new);
- local_irq_restore(flags);
+ if (irq_save)
+ local_irq_restore(flags);
}
if (likely(ret))
return true;
@@ -2250,10 +2226,10 @@ static inline void *acquire_slab(struct
VM_BUG_ON(new.frozen);
new.frozen = 1;
- if (!__slab_update_freelist(s, slab,
- freelist, counters,
- new.freelist, new.counters,
- "acquire_slab"))
+ if (!slab_update_freelist(s, slab,
+ freelist, counters,
+ new.freelist, new.counters,
+ false, "acquire_slab"))
return NULL;
remove_partial(n, slab);
@@ -2577,9 +2553,9 @@ static void deactivate_slab(struct kmem_
if (!slab_update_freelist(s, slab,
- old.freelist, old.counters,
- new.freelist, new.counters,
- "unfreezing slab")) {
+ old.freelist, old.counters,
+ new.freelist, new.counters,
+ true, "unfreezing slab")) {
if (mode == M_PARTIAL)
spin_unlock_irqrestore(&n->list_lock, flags);
goto redo;
@@ -2633,10 +2609,10 @@ static void __unfreeze_partials(struct k
new.frozen = 0;
- } while (!__slab_update_freelist(s, slab,
- old.freelist, old.counters,
- new.freelist, new.counters,
- "unfreezing slab"));
+ } while (!slab_update_freelist(s, slab,
+ old.freelist, old.counters,
+ new.freelist, new.counters,
+ false, "unfreezing slab"));
if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
slab->next = slab_to_discard;
@@ -3072,10 +3048,10 @@ static inline void *get_freelist(struct
new.inuse = slab->objects;
new.frozen = freelist != NULL;
- } while (!__slab_update_freelist(s, slab,
- freelist, counters,
- NULL, new.counters,
- "get_freelist"));
+ } while (!slab_update_freelist(s, slab,
+ freelist, counters,
+ NULL, new.counters,
+ false, "get_freelist"));
return freelist;
}
@@ -3666,9 +3642,9 @@ static void __slab_free(struct kmem_cach
}
} while (!slab_update_freelist(s, slab,
- prior, counters,
- head, new.counters,
- "__slab_free"));
+ prior, counters,
+ head, new.counters,
+ true, "__slab_free"));
if (likely(!n)) {
Powered by blists - more mailing lists