[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251222110843.980347-7-harry.yoo@oracle.com>
Date: Mon, 22 Dec 2025 20:08:41 +0900
From: Harry Yoo <harry.yoo@...cle.com>
To: akpm@...ux-foundation.org, vbabka@...e.cz
Cc: andreyknvl@...il.com, cl@...two.org, dvyukov@...gle.com, glider@...gle.com,
hannes@...xchg.org, linux-mm@...ck.org, mhocko@...nel.org,
muchun.song@...ux.dev, rientjes@...gle.com, roman.gushchin@...ux.dev,
ryabinin.a.a@...il.com, shakeel.butt@...ux.dev, surenb@...gle.com,
vincenzo.frascino@....com, yeoreum.yun@....com, harry.yoo@...cle.com,
tytso@....edu, adilger.kernel@...ger.ca, linux-ext4@...r.kernel.org,
linux-kernel@...r.kernel.org, cgroups@...r.kernel.org,
hao.li@...ux.dev
Subject: [PATCH V4 6/8] mm/memcontrol,alloc_tag: handle slabobj_ext access under KASAN poison
In the near future, slabobj_ext may reside outside the allocated slab
object range within a slab, which could be reported as an out-of-bounds
access by KASAN.
As suggested by Andrey Konovalov [1], explicitly disable KASAN and KMSAN
checks when accessing slabobj_ext within slab allocator, memory profiling,
and memory cgroup code. While an alternative approach could be to unpoison
slabobj_ext, out-of-bounds accesses outside the slab allocator are
generally more common.
Move metadata_access_enable()/disable() helpers to mm/slab.h so that
it can be used outside mm/slub.c. However, as suggested by Suren
Baghdasaryan [2], instead of calling them directly from mm code (which is
more prone to errors), change users to access slabobj_ext via get/put
APIs:
- Users should call get_slab_obj_exts() to access slabobj_metadata.
From now on, accessing it outside the section covered by
get_slab_obj_exts() ~ put_slab_obj_exts() is illegal.
This ensures that accesses to slabobj_ext metadata won't be reported
as access violations.
- If get_slab_obj_exts() returns zero, the caller should not call
put_slab_obj_exts(). Otherwise it must be paired with
put_slab_obj_exts().
Call kasan_reset_tag() in slab_obj_ext() before returning the address to
prevent SW or HW tag-based KASAN from reporting false positives.
Suggested-by: Andrey Konovalov <andreyknvl@...il.com>
Suggested-by: Suren Baghdasaryan <surenb@...gle.com>
Link: https://lore.kernel.org/linux-mm/CA+fCnZezoWn40BaS3cgmCeLwjT+5AndzcQLc=wH3BjMCu6_YCw@mail.gmail.com [1]
Link: https://lore.kernel.org/linux-mm/CAJuCfpG=Lb4WhYuPkSpdNO4Ehtjm1YcEEK0OM=3g9i=LxmpHSQ@mail.gmail.com [2]
Signed-off-by: Harry Yoo <harry.yoo@...cle.com>
---
mm/memcontrol.c | 12 +++++++--
mm/slab.h | 54 +++++++++++++++++++++++++++++++++++---
mm/slub.c | 69 ++++++++++++++++++++++++-------------------------
3 files changed, 95 insertions(+), 40 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fd9105a953b0..50ca00122571 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2604,10 +2604,16 @@ struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
if (!obj_exts)
return NULL;
+ get_slab_obj_exts(obj_exts);
off = obj_to_index(slab->slab_cache, slab, p);
obj_ext = slab_obj_ext(slab, obj_exts, off);
- if (obj_ext->objcg)
- return obj_cgroup_memcg(obj_ext->objcg);
+ if (obj_ext->objcg) {
+ struct obj_cgroup *objcg = obj_ext->objcg;
+
+ put_slab_obj_exts(obj_exts);
+ return obj_cgroup_memcg(objcg);
+ }
+ put_slab_obj_exts(obj_exts);
return NULL;
}
@@ -3219,10 +3225,12 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
return false;
obj_exts = slab_obj_exts(slab);
+ get_slab_obj_exts(obj_exts);
off = obj_to_index(s, slab, p[i]);
obj_ext = slab_obj_ext(slab, obj_exts, off);
obj_cgroup_get(objcg);
obj_ext->objcg = objcg;
+ put_slab_obj_exts(obj_exts);
}
return true;
diff --git a/mm/slab.h b/mm/slab.h
index 38967ec663d1..ba67d6059032 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -510,6 +510,24 @@ bool slab_in_kunit_test(void);
static inline bool slab_in_kunit_test(void) { return false; }
#endif
+/*
+ * slub is about to manipulate internal object metadata. This memory lies
+ * outside the range of the allocated object, so accessing it would normally
+ * be reported by kasan as a bounds error. metadata_access_enable() is used
+ * to tell kasan that these accesses are OK.
+ */
+static inline void metadata_access_enable(void)
+{
+ kasan_disable_current();
+ kmsan_disable_current();
+}
+
+static inline void metadata_access_disable(void)
+{
+ kmsan_enable_current();
+ kasan_enable_current();
+}
+
#ifdef CONFIG_SLAB_OBJ_EXT
/*
@@ -519,8 +537,22 @@ static inline bool slab_in_kunit_test(void) { return false; }
*
* Returns the address of the object extension vector associated with the slab,
* or zero if no such vector has been associated yet.
- * Do not dereference the return value directly; use slab_obj_ext() to access
- * its elements.
+ * Do not dereference the return value directly; use get/put_slab_obj_exts()
+ * pair and slab_obj_ext() to access individual elements.
+ *
+ * Example usage:
+ *
+ * obj_exts = slab_obj_exts(slab);
+ * if (obj_exts) {
+ * get_slab_obj_exts(obj_exts);
+ * obj_ext = slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, obj));
+ * // do something with obj_ext
+ * put_slab_obj_exts(obj_exts);
+ * }
+ *
+ * Note that the get/put semantics does not involve reference counting.
+ * Instead, it updates kasan/kmsan depth so that accesses to slabobj_ext
+ * won't be reported as access violations.
*/
static inline unsigned long slab_obj_exts(struct slab *slab)
{
@@ -539,6 +571,17 @@ static inline unsigned long slab_obj_exts(struct slab *slab)
return obj_exts & ~OBJEXTS_FLAGS_MASK;
}
+static inline void get_slab_obj_exts(unsigned long obj_exts)
+{
+ VM_WARN_ON_ONCE(!obj_exts);
+ metadata_access_enable();
+}
+
+static inline void put_slab_obj_exts(unsigned long obj_exts)
+{
+ metadata_access_disable();
+}
+
#ifdef CONFIG_64BIT
static inline void slab_set_stride(struct slab *slab, unsigned short stride)
{
@@ -567,15 +610,20 @@ static inline unsigned short slab_get_stride(struct slab *slab)
* @index: an index of the object
*
* Returns a pointer to the object extension associated with the object.
+ * Must be called within a section covered by get/put_slab_obj_exts().
*/
static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
unsigned long obj_exts,
unsigned int index)
{
+ struct slabobj_ext *obj_ext;
+
VM_WARN_ON_ONCE(!slab_obj_exts(slab));
VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
- return (struct slabobj_ext *)(obj_exts + slab_get_stride(slab) * index);
+ obj_ext = (struct slabobj_ext *)(obj_exts +
+ slab_get_stride(slab) * index);
+ return kasan_reset_tag(obj_ext);
}
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
diff --git a/mm/slub.c b/mm/slub.c
index 8ac60a17d988..39c381cc1b2c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -975,24 +975,6 @@ static slab_flags_t slub_debug;
static const char *slub_debug_string __ro_after_init;
static int disable_higher_order_debug;
-/*
- * slub is about to manipulate internal object metadata. This memory lies
- * outside the range of the allocated object, so accessing it would normally
- * be reported by kasan as a bounds error. metadata_access_enable() is used
- * to tell kasan that these accesses are OK.
- */
-static inline void metadata_access_enable(void)
-{
- kasan_disable_current();
- kmsan_disable_current();
-}
-
-static inline void metadata_access_disable(void)
-{
- kmsan_enable_current();
- kasan_enable_current();
-}
-
/*
* Object debugging
*/
@@ -2042,23 +2024,27 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
{
- unsigned long slab_exts;
struct slab *obj_exts_slab;
+ unsigned long slab_exts;
obj_exts_slab = virt_to_slab(obj_exts);
slab_exts = slab_obj_exts(obj_exts_slab);
if (slab_exts) {
+ get_slab_obj_exts(slab_exts);
unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
obj_exts_slab, obj_exts);
struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
slab_exts, offs);
- if (unlikely(is_codetag_empty(ext->ref)))
+ if (unlikely(is_codetag_empty(ext->ref))) {
+ put_slab_obj_exts(slab_exts);
return;
+ }
/* codetag should be NULL here */
WARN_ON(ext->ref.ct);
set_codetag_empty(&ext->ref);
+ put_slab_obj_exts(slab_exts);
}
}
@@ -2228,30 +2214,28 @@ static inline void free_slab_obj_exts(struct slab *slab)
#ifdef CONFIG_MEM_ALLOC_PROFILING
-static inline struct slabobj_ext *
-prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p)
+static inline unsigned long
+prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab,
+ gfp_t flags, void *p)
{
- struct slab *slab;
- unsigned long obj_exts;
-
- slab = virt_to_slab(p);
- obj_exts = slab_obj_exts(slab);
- if (!obj_exts &&
+ if (!slab_obj_exts(slab) &&
alloc_slab_obj_exts(slab, s, flags, false)) {
pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
__func__, s->name);
- return NULL;
+ return 0;
}
- obj_exts = slab_obj_exts(slab);
- return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p));
+ return slab_obj_exts(slab);
}
+
/* Should be called only if mem_alloc_profiling_enabled() */
static noinline void
__alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
{
+ unsigned long obj_exts;
struct slabobj_ext *obj_ext;
+ struct slab *slab;
if (!object)
return;
@@ -2262,16 +2246,23 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
if (flags & __GFP_NO_OBJ_EXT)
return;
- obj_ext = prepare_slab_obj_ext_hook(s, flags, object);
+ slab = virt_to_slab(object);
+ obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object);
/*
* Currently obj_exts is used only for allocation profiling.
* If other users appear then mem_alloc_profiling_enabled()
* check should be added before alloc_tag_add().
*/
- if (likely(obj_ext))
+ if (obj_exts) {
+ unsigned int obj_idx = obj_to_index(s, slab, object);
+
+ get_slab_obj_exts(obj_exts);
+ obj_ext = slab_obj_ext(slab, obj_exts, obj_idx);
alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
- else
+ put_slab_obj_exts(obj_exts);
+ } else {
alloc_tag_set_inaccurate(current->alloc_tag);
+ }
}
static inline void
@@ -2297,11 +2288,13 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p
if (!obj_exts)
return;
+ get_slab_obj_exts(obj_exts);
for (i = 0; i < objects; i++) {
unsigned int off = obj_to_index(s, slab, p[i]);
alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
}
+ put_slab_obj_exts(obj_exts);
}
static inline void
@@ -2368,7 +2361,9 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
if (likely(!obj_exts))
return;
+ get_slab_obj_exts(obj_exts);
__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
+ put_slab_obj_exts(obj_exts);
}
static __fastpath_inline
@@ -2418,10 +2413,14 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
/* Ignore already charged objects. */
obj_exts = slab_obj_exts(slab);
if (obj_exts) {
+ get_slab_obj_exts(obj_exts);
off = obj_to_index(s, slab, p);
obj_ext = slab_obj_ext(slab, obj_exts, off);
- if (unlikely(obj_ext->objcg))
+ if (unlikely(obj_ext->objcg)) {
+ put_slab_obj_exts(obj_exts);
return true;
+ }
+ put_slab_obj_exts(obj_exts);
}
return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
--
2.43.0
Powered by blists - more mailing lists