[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200610163135.17364-10-vbabka@suse.cz>
Date: Wed, 10 Jun 2020 18:31:35 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
kernel-team@...roid.com, vinmenon@...eaurora.org,
Kees Cook <keescook@...omium.org>,
Matthew Garrett <mjg59@...gle.com>,
Roman Gushchin <guro@...com>, Vlastimil Babka <vbabka@...e.cz>,
Jann Horn <jannh@...gle.com>,
Vijayanand Jitta <vjitta@...eaurora.org>
Subject: [PATCH 9/9] mm, slab/slub: move and improve cache_from_obj()
The function cache_from_obj() was added by commit b9ce5ef49f00 ("sl[au]b:
always get the cache from its page in kmem_cache_free()") to support kmemcg,
where per-memcg cache can be different from the root one, so we can't use
the kmem_cache pointer given to kmem_cache_free().
Prior to that commit, SLUB already had debugging check+warning that could be
enabled to compare the given kmem_cache pointer to one referenced by the slab
page where the object-to-be-freed resides. This check was moved to
cache_from_obj(). Later the check was also enabled for SLAB_FREELIST_HARDENED
configs by commit 598a0717a816 ("mm/slab: validate cache membership under
freelist hardening").
These checks and warnings can be useful especially for the debugging, which can
be improved. Commit 598a0717a816 changed the pr_err() with WARN_ON_ONCE() to
WARN_ONCE() so only the first hit is now reported, others are silent. This
patch changes it to WARN() so that all errors are reported.
It's also useful to print SLUB allocation/free tracking info for the offending
object, if tracking is enabled. We could export the SLUB print_tracking()
function and provide an empty one for SLAB, or realize that both the debugging
and hardening cases in cache_from_obj() are only supported by SLUB anyway. So
this patch moves cache_from_obj() from slab.h to separate instances in slab.c
and slub.c, where the SLAB version only does the kmemcg lookup and even could
be completely removed once the kmemcg rework [1] is merged. The SLUB version
can thus easily use the print_tracking() function. It can also use the
kmem_cache_debug_flags() static key check for improved performance in kernels
without the hardening and with debugging not enabled on boot.
[1] https://lore.kernel.org/r/20200608230654.828134-18-guro@fb.com
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
mm/slab.c | 8 ++++++++
mm/slab.h | 23 -----------------------
mm/slub.c | 21 +++++++++++++++++++++
3 files changed, 29 insertions(+), 23 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 9350062ffc1a..6134c4c36d4c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3672,6 +3672,14 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
}
EXPORT_SYMBOL(__kmalloc_track_caller);
+static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+{
+ if (memcg_kmem_enabled())
+ return virt_to_cache(x);
+ else
+ return s;
+}
+
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
diff --git a/mm/slab.h b/mm/slab.h
index 815e4e9a94cd..c0c4244f75da 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -503,29 +503,6 @@ static __always_inline void uncharge_slab_page(struct page *page, int order,
memcg_uncharge_slab(page, order, s);
}
-static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
-{
- struct kmem_cache *cachep;
-
- /*
- * When kmemcg is not being used, both assignments should return the
- * same value. but we don't want to pay the assignment price in that
- * case. If it is not compiled in, the compiler should be smart enough
- * to not do even the assignment. In that case, slab_equal_or_root
- * will also be a constant.
- */
- if (!memcg_kmem_enabled() &&
- !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
- !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
- return s;
-
- cachep = virt_to_cache(x);
- WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
- "%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name);
- return cachep;
-}
-
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifndef CONFIG_SLUB
diff --git a/mm/slub.c b/mm/slub.c
index efb08f2e9c66..f7a1d8537674 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1524,6 +1524,10 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
{
return false;
}
+
+static void print_tracking(struct kmem_cache *s, void *object)
+{
+}
#endif /* CONFIG_SLUB_DEBUG */
/*
@@ -3175,6 +3179,23 @@ void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
}
#endif
+static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
+{
+ struct kmem_cache *cachep;
+
+ if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
+ !memcg_kmem_enabled() &&
+ !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
+ return s;
+
+ cachep = virt_to_cache(x);
+ if (WARN(cachep && !slab_equal_or_root(cachep, s),
+ "%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name))
+ print_tracking(cachep, x);
+ return cachep;
+}
+
void kmem_cache_free(struct kmem_cache *s, void *x)
{
s = cache_from_obj(s, x);
--
2.26.2
Powered by blists - more mailing lists