[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210805152000.12817-15-vbabka@suse.cz>
Date: Thu, 5 Aug 2021 17:19:39 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
David Rientjes <rientjes@...gle.com>,
Pekka Enberg <penberg@...nel.org>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Mike Galbraith <efault@....de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
Mel Gorman <mgorman@...hsingularity.net>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Jann Horn <jannh@...gle.com>, Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH v4 14/35] mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc()
Continue reducing the irq disabled scope. Check for per-cpu partial slabs with
first with irqs enabled and then recheck with irqs disabled before grabbing
the slab page. Mostly preparatory for the following patches.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slub.c | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 7eb06fe9d7a0..868a782b6f62 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2676,11 +2676,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes)))
node = NUMA_NO_NODE;
- local_irq_save(flags);
- if (unlikely(c->page)) {
- local_irq_restore(flags);
- goto reread_page;
- }
goto new_slab;
}
redo:
@@ -2721,6 +2716,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (!freelist) {
c->page = NULL;
+ local_irq_restore(flags);
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
}
@@ -2750,12 +2746,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto reread_page;
}
deactivate_slab(s, page, c->freelist, c);
+ local_irq_restore(flags);
new_slab:
- lockdep_assert_irqs_disabled();
-
if (slub_percpu_partial(c)) {
+ local_irq_save(flags);
+ if (unlikely(c->page)) {
+ local_irq_restore(flags);
+ goto reread_page;
+ }
+ if (unlikely(!slub_percpu_partial(c)))
+ goto new_objects; /* stolen by an IRQ handler */
+
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
local_irq_restore(flags);
@@ -2763,6 +2766,16 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto redo;
}
+ local_irq_save(flags);
+ if (unlikely(c->page)) {
+ local_irq_restore(flags);
+ goto reread_page;
+ }
+
+new_objects:
+
+ lockdep_assert_irqs_disabled();
+
freelist = get_partial(s, gfpflags, node, &page);
if (freelist) {
c->page = page;
@@ -2795,15 +2808,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
check_new_page:
if (kmem_cache_debug(s)) {
- if (!alloc_debug_processing(s, page, freelist, addr))
+ if (!alloc_debug_processing(s, page, freelist, addr)) {
/* Slab failed checks. Next slab needed */
+ c->page = NULL;
+ local_irq_restore(flags);
goto new_slab;
- else
+ } else {
/*
* For debug case, we don't load freelist so that all
* allocations go through alloc_debug_processing()
*/
goto return_single;
+ }
}
if (unlikely(!pfmemalloc_match(page, gfpflags)))
--
2.32.0
Powered by blists - more mailing lists