lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  2 Feb 2021 03:05:14 -0500
From:   Abel Wu <abel.w@...oud.com>
To:     Christoph Lameter <cl@...ux.com>,
        Pekka Enberg <penberg@...nel.org>,
        David Rientjes <rientjes@...gle.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Vlastimil Babka <vbabka@...e.cz>
Cc:     hewenliang4@...wei.com, wuyun.wu@...wei.com,
        Abel Wu <abel.w@...oud.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: [PATCH] mm/slub: embed __slab_alloc to its caller

Since slab_alloc_node() is the only caller of __slab_alloc(), embed
__slab_alloc() to its caller to save function call overhead. This
will also expand the caller's code block size a bit, but hackbench
tests on both host and guest didn't show a difference w/ or w/o
this patch.

Also rename ___slab_alloc() to __slab_alloc().

Signed-off-by: Abel Wu <abel.w@...oud.com>
---
 mm/slub.c | 46 ++++++++++++++++------------------------------
 1 file changed, 16 insertions(+), 30 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 7ecbbbe5bc0c..0f69d2d0471a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2654,10 +2654,9 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
  * we need to allocate a new slab. This is the slowest path since it involves
  * a call to the page allocator and the setup of a new slab.
  *
- * Version of __slab_alloc to use when we know that interrupts are
- * already disabled (which is the case for bulk allocation).
+ * Must be called with interrupts disabled.
  */
-static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			  unsigned long addr, struct kmem_cache_cpu *c)
 {
 	void *freelist;
@@ -2758,31 +2757,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 	return freelist;
 }
 
-/*
- * Another one that disabled interrupt and compensates for possible
- * cpu changes by refetching the per cpu area pointer.
- */
-static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
-			  unsigned long addr, struct kmem_cache_cpu *c)
-{
-	void *p;
-	unsigned long flags;
-
-	local_irq_save(flags);
-#ifdef CONFIG_PREEMPTION
-	/*
-	 * We may have been preempted and rescheduled on a different
-	 * cpu before disabling interrupts. Need to reload cpu area
-	 * pointer.
-	 */
-	c = this_cpu_ptr(s->cpu_slab);
-#endif
-
-	p = ___slab_alloc(s, gfpflags, node, addr, c);
-	local_irq_restore(flags);
-	return p;
-}
-
 /*
  * If the object has been wiped upon free, make sure it's fully initialized by
  * zeroing out freelist pointer.
@@ -2854,7 +2828,19 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	object = c->freelist;
 	page = c->page;
 	if (unlikely(!object || !page || !node_match(page, node))) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+#ifdef CONFIG_PREEMPTION
+		/*
+		 * We may have been preempted and rescheduled on a different
+		 * cpu before disabling interrupts. Need to reload cpu area
+		 * pointer.
+		 */
+		c = this_cpu_ptr(s->cpu_slab);
+#endif
 		object = __slab_alloc(s, gfpflags, node, addr, c);
+		local_irq_restore(flags);
 	} else {
 		void *next_object = get_freepointer_safe(s, object);
 
@@ -3299,7 +3285,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			 * We may have removed an object from c->freelist using
 			 * the fastpath in the previous iteration; in that case,
 			 * c->tid has not been bumped yet.
-			 * Since ___slab_alloc() may reenable interrupts while
+			 * Since __slab_alloc() may reenable interrupts while
 			 * allocating memory, we should bump c->tid now.
 			 */
 			c->tid = next_tid(c->tid);
@@ -3308,7 +3294,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			 * Invoking slow path likely have side-effect
 			 * of re-populating per CPU c->freelist
 			 */
-			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
+			p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
 					    _RET_IP_, c);
 			if (unlikely(!p[i]))
 				goto error;
-- 
2.27.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ