[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251027122847.320924-5-harry.yoo@oracle.com>
Date: Mon, 27 Oct 2025 21:28:44 +0900
From: Harry Yoo <harry.yoo@...cle.com>
To: akpm@...ux-foundation.org, vbabka@...e.cz
Cc: andreyknvl@...il.com, cl@...ux.com, dvyukov@...gle.com, glider@...gle.com,
hannes@...xchg.org, linux-mm@...ck.org, mhocko@...nel.org,
muchun.song@...ux.dev, rientjes@...gle.com, roman.gushchin@...ux.dev,
ryabinin.a.a@...il.com, shakeel.butt@...ux.dev, surenb@...gle.com,
vincenzo.frascino@....com, yeoreum.yun@....com, harry.yoo@...cle.com,
tytso@....edu, adilger.kernel@...ger.ca, linux-ext4@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH V3 4/7] mm/slab: use stride to access slabobj_ext
Use a configurable stride value when accessing slab object extension
metadata instead of assuming a fixed sizeof(struct slabobj_ext).
Store stride value in free bits of slab->counters field. This allows
for flexibility in cases where the extension is embedded within
slab objects.
Since these free bits exist only on 64-bit, any future optimizations
that need to change stride value cannot be enabled on 32-bit architectures.
Suggested-by: Vlastimil Babka <vbabka@...e.cz>
Signed-off-by: Harry Yoo <harry.yoo@...cle.com>
---
mm/slab.h | 37 +++++++++++++++++++++++++++++++++----
mm/slub.c | 2 ++
2 files changed, 35 insertions(+), 4 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index df2c987d950d..22ee28cb55e1 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -83,6 +83,14 @@ struct slab {
* that the slab was corrupted
*/
unsigned frozen:1;
+#ifdef CONFIG_64BIT
+ /*
+ * Some optimizations use free bits in 'counters' field
+ * to save memory. In case ->stride field is not available,
+ * such optimizations are disabled.
+ */
+ unsigned short stride;
+#endif
};
};
};
@@ -550,6 +558,26 @@ static inline unsigned long slab_obj_exts(struct slab *slab)
return obj_exts & ~OBJEXTS_FLAGS_MASK;
}
+#ifdef CONFIG_64BIT
+static inline void slab_set_stride(struct slab *slab, unsigned short stride)
+{
+ slab->stride = stride;
+}
+static inline unsigned short slab_get_stride(struct slab *slab)
+{
+ return slab->stride;
+}
+#else
+static inline void slab_set_stride(struct slab *slab, unsigned short stride)
+{
+ VM_WARN_ON_ONCE(stride != sizeof(struct slabobj_ext));
+}
+static inline unsigned short slab_get_stride(struct slab *slab)
+{
+ return sizeof(struct slabobj_ext);
+}
+#endif
+
/*
* slab_obj_ext - get the pointer to the slab object extension metadata
* associated with an object in a slab.
@@ -563,13 +591,10 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
unsigned long obj_exts,
unsigned int index)
{
- struct slabobj_ext *obj_ext;
-
VM_WARN_ON_ONCE(!slab_obj_exts(slab));
VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
- obj_ext = (struct slabobj_ext *)obj_exts;
- return &obj_ext[index];
+ return (struct slabobj_ext *)(obj_exts + slab_get_stride(slab) * index);
}
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
@@ -588,6 +613,10 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
return NULL;
}
+static inline void slab_set_stride(struct slab *slab, unsigned int stride) { }
+static inline unsigned int slab_get_stride(struct slab *slab) { return 0; }
+
+
#endif /* CONFIG_SLAB_OBJ_EXT */
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
diff --git a/mm/slub.c b/mm/slub.c
index ae73403f8c29..4383740a4d34 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2134,6 +2134,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#endif
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
+ slab_set_stride(slab, sizeof(struct slabobj_ext));
+
if (new_slab) {
/*
* If the slab is brand new and nobody can yet access its
--
2.43.0
Powered by blists - more mailing lists