[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210805152000.12817-19-vbabka@suse.cz>
Date: Thu, 5 Aug 2021 17:19:43 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
David Rientjes <rientjes@...gle.com>,
Pekka Enberg <penberg@...nel.org>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Mike Galbraith <efault@....de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
Mel Gorman <mgorman@...hsingularity.net>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Jann Horn <jannh@...gle.com>, Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH v4 18/35] mm, slub: stop disabling irqs around get_partial()
The function get_partial() does not need to have irqs disabled as a whole. It's
sufficient to convert spin_lock operations to their irq saving/restoring
versions.
As a result, it's now possible to reach the page allocator from the slab
allocator without disabling and re-enabling interrupts on the way.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slub.c | 22 ++++++++--------------
1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index f7c6cebb524d..b4a62aa00ae2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1993,11 +1993,12 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
* Try to allocate a partial slab from a specific node.
*/
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
- struct page **ret_page, gfp_t flags)
+ struct page **ret_page, gfp_t gfpflags)
{
struct page *page, *page2;
void *object = NULL;
unsigned int available = 0;
+ unsigned long flags;
int objects;
/*
@@ -2009,11 +2010,11 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
- spin_lock(&n->list_lock);
+ spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
void *t;
- if (!pfmemalloc_match(page, flags))
+ if (!pfmemalloc_match(page, gfpflags))
continue;
t = acquire_slab(s, n, page, object == NULL, &objects);
@@ -2034,7 +2035,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
- spin_unlock(&n->list_lock);
+ spin_unlock_irqrestore(&n->list_lock, flags);
return object;
}
@@ -2749,8 +2750,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_restore(flags);
goto reread_page;
}
- if (unlikely(!slub_percpu_partial(c)))
+ if (unlikely(!slub_percpu_partial(c))) {
+ local_irq_restore(flags);
goto new_objects; /* stolen by an IRQ handler */
+ }
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
@@ -2759,18 +2762,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto redo;
}
- local_irq_save(flags);
- if (unlikely(c->page)) {
- local_irq_restore(flags);
- goto reread_page;
- }
-
new_objects:
- lockdep_assert_irqs_disabled();
-
freelist = get_partial(s, gfpflags, node, &page);
- local_irq_restore(flags);
if (freelist)
goto check_new_page;
--
2.32.0
Powered by blists - more mailing lists