[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230915105933.495735-6-matteorizzo@google.com>
Date: Fri, 15 Sep 2023 10:59:24 +0000
From: Matteo Rizzo <matteorizzo@...gle.com>
To: cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
iamjoonsoo.kim@....com, akpm@...ux-foundation.org, vbabka@...e.cz,
roman.gushchin@...ux.dev, 42.hyeyoo@...il.com,
keescook@...omium.org, linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org, linux-mm@...ck.org,
linux-hardening@...r.kernel.org, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, dave.hansen@...ux.intel.com,
x86@...nel.org, hpa@...or.com, corbet@....net, luto@...nel.org,
peterz@...radead.org
Cc: jannh@...gle.com, matteorizzo@...gle.com, evn@...gle.com,
poprdi@...gle.com, jordyzomer@...gle.com
Subject: [RFC PATCH 05/14] mm/slub: create folio_set/clear_slab helpers
From: Jann Horn <jannh@...gle.com>
This is refactoring in preparation for SLAB_VIRTUAL. Extract this code
to separate functions so that it's not duplicated in the code that
allocates and frees page with SLAB_VIRTUAL enabled.
Signed-off-by: Jann Horn <jannh@...gle.com>
Co-developed-by: Matteo Rizzo <matteorizzo@...gle.com>
Signed-off-by: Matteo Rizzo <matteorizzo@...gle.com>
---
mm/slub.c | 32 ++++++++++++++++++++++----------
1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index ad33d9e1601d..9b87afade125 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1849,6 +1849,26 @@ static void *setup_object(struct kmem_cache *s, void *object)
/*
* Slab allocation and freeing
*/
+
+static void folio_set_slab(struct folio *folio, struct slab *slab)
+{
+ __folio_set_slab(folio);
+ /* Make the flag visible before any changes to folio->mapping */
+ smp_wmb();
+
+ if (folio_is_pfmemalloc(folio))
+ slab_set_pfmemalloc(slab);
+}
+
+static void folio_clear_slab(struct folio *folio, struct slab *slab)
+{
+ __slab_clear_pfmemalloc(slab);
+ folio->mapping = NULL;
+ /* Make the mapping reset visible before clearing the flag */
+ smp_wmb();
+ __folio_clear_slab(folio);
+}
+
static inline struct slab *alloc_slab_page(gfp_t flags, int node,
struct kmem_cache_order_objects oo)
{
@@ -1865,11 +1885,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
return NULL;
slab = folio_slab(folio);
- __folio_set_slab(folio);
- /* Make the flag visible before any changes to folio->mapping */
- smp_wmb();
- if (folio_is_pfmemalloc(folio))
- slab_set_pfmemalloc(slab);
+ folio_set_slab(folio, slab);
return slab;
}
@@ -2067,11 +2083,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
int order = folio_order(folio);
int pages = 1 << order;
- __slab_clear_pfmemalloc(slab);
- folio->mapping = NULL;
- /* Make the mapping reset visible before clearing the flag */
- smp_wmb();
- __folio_clear_slab(folio);
+ folio_clear_slab(folio, slab);
mm_account_reclaimed_pages(pages);
unaccount_slab(slab, order, s);
__free_pages(&folio->page, order);
--
2.42.0.459.ge4e396fd5e-goog
Powered by blists - more mailing lists