[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210609113903.1421-20-vbabka@suse.cz>
Date: Wed, 9 Jun 2021 13:38:48 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Christoph Lameter <cl@...ux.com>,
David Rientjes <rientjes@...gle.com>,
Pekka Enberg <penberg@...nel.org>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
Mel Gorman <mgorman@...hsingularity.net>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Jann Horn <jannh@...gle.com>, Vlastimil Babka <vbabka@...e.cz>
Subject: [RFC v2 19/34] mm, slub: move reset of c->page and freelist out of deactivate_slab()
deactivate_slab() removes the cpu slab by merging the cpu freelist with slab's
freelist and putting the slab on the proper node's list. It also sets the
respective kmem_cache_cpu pointers to NULL.
By extracting the kmem_cache_cpu operations from the function, we can make it
not dependent on disabled irqs.
Also if we return a single free pointer from ___slab_alloc, we no longer have
to assign kmem_cache_cpu.page before deactivation or care if somebody preempted
us and assigned a different page to our kmem_cache_cpu in the process.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slub.c | 31 ++++++++++++++++++-------------
1 file changed, 18 insertions(+), 13 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 6f894d1e84d6..8b0b975bfa36 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2149,10 +2149,13 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
}
/*
- * Remove the cpu slab
+ * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
+ * unfreezes the slabs and puts it on the proper list.
+ * Assumes the slab has been already safely taken away from kmem_cache_cpu
+ * by the caller.
*/
static void deactivate_slab(struct kmem_cache *s, struct page *page,
- void *freelist, struct kmem_cache_cpu *c)
+ void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2281,9 +2284,6 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
-
- c->page = NULL;
- c->freelist = NULL;
}
/*
@@ -2408,10 +2408,16 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
- stat(s, CPUSLAB_FLUSH);
- deactivate_slab(s, c->page, c->freelist, c);
+ void *freelist = c->freelist;
+ struct page *page = c->page;
+ c->page = NULL;
+ c->freelist = NULL;
c->tid = next_tid(c->tid);
+
+ deactivate_slab(s, page, freelist);
+
+ stat(s, CPUSLAB_FLUSH);
}
/*
@@ -2696,7 +2702,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_restore(flags);
goto reread_page;
}
- deactivate_slab(s, page, c->freelist, c);
+ freelist = c->freelist;
+ c->page = NULL;
+ c->freelist = NULL;
+ deactivate_slab(s, page, freelist);
local_irq_restore(flags);
new_slab:
@@ -2775,11 +2784,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
return_single:
local_irq_save(flags);
- if (unlikely(c->page))
- flush_slab(s, c);
- c->page = page;
-
- deactivate_slab(s, page, get_freepointer(s, freelist), c);
+ deactivate_slab(s, page, get_freepointer(s, freelist));
local_irq_restore(flags);
return freelist;
}
--
2.31.1
Powered by blists - more mailing lists