[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200614123923.99189-2-songmuchun@bytedance.com>
Date: Sun, 14 Jun 2020 20:39:21 +0800
From: Muchun Song <songmuchun@...edance.com>
To: cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
iamjoonsoo.kim@....com, akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Muchun Song <songmuchun@...edance.com>
Subject: [PATCH 1/3] mm/slub: Fix slabs_node return value when CONFIG_SLUB_DEBUG disabled
The slabs_node() always return zero when CONFIG_SLUB_DEBUG is disabled.
But some codes determine whether slab is empty by checking the return
value of slabs_node(). As you know, the result is not correct. This
problem can be reproduce by the follow code(and boot system with the
cmdline of "slub_nomerge"):
void *objs[32];
struct kmem_cache *cache = kmem_cache_create("kmem-test", 128, 0,
0, 0);
if (cache) {
int i;
/* Make a full slab */
for (i = 0; i < ARRAY_SIZE(objs); i++)
objs[i] = kmem_cache_alloc(cache, GFP_KERNEL_ACCOUNT);
/*
* This really should fail because the slab cache still has
* objects. But we did destroy the @cache because of zero
* returned by slabs_node().
*/
kmem_cache_destroy(cache);
}
To fix it, we can move the nr_slabs of kmem_cache_node out of the
CONFIG_SLUB_DEBUG. So we can get the corrent value returned by the
slabs_node().
With this patch applied, we will get a warning message and stack
trace in the dmesg.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
---
mm/slab.h | 2 +-
mm/slub.c | 80 +++++++++++++++++++++++++++++++++------------------------------
2 files changed, 43 insertions(+), 39 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 0b91f2a7b033..062d4542b7e2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -619,8 +619,8 @@ struct kmem_cache_node {
#ifdef CONFIG_SLUB
unsigned long nr_partial;
struct list_head partial;
-#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
+#ifdef CONFIG_SLUB_DEBUG
atomic_long_t total_objects;
struct list_head full;
#endif
diff --git a/mm/slub.c b/mm/slub.c
index 49b5cb7da318..1a3e6a5b7287 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1070,39 +1070,14 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
list_del(&page->slab_list);
}
-/* Tracking of the number of slabs for debugging purposes */
-static inline unsigned long slabs_node(struct kmem_cache *s, int node)
+/* Tracking of the number of objects for debugging purposes */
+static inline void inc_objects_node(struct kmem_cache_node *n, int objects)
{
- struct kmem_cache_node *n = get_node(s, node);
-
- return atomic_long_read(&n->nr_slabs);
+ atomic_long_add(objects, &n->total_objects);
}
-static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+static inline void dec_objects_node(struct kmem_cache_node *n, int objects)
{
- return atomic_long_read(&n->nr_slabs);
-}
-
-static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
-{
- struct kmem_cache_node *n = get_node(s, node);
-
- /*
- * May be called early in order to allocate a slab for the
- * kmem_cache_node structure. Solve the chicken-egg
- * dilemma by deferring the increment of the count during
- * bootstrap (see early_kmem_cache_node_alloc).
- */
- if (likely(n)) {
- atomic_long_inc(&n->nr_slabs);
- atomic_long_add(objects, &n->total_objects);
- }
-}
-static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
-{
- struct kmem_cache_node *n = get_node(s, node);
-
- atomic_long_dec(&n->nr_slabs);
atomic_long_sub(objects, &n->total_objects);
}
@@ -1413,15 +1388,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#define disable_higher_order_debug 0
-static inline unsigned long slabs_node(struct kmem_cache *s, int node)
- { return 0; }
-static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
- { return 0; }
-static inline void inc_slabs_node(struct kmem_cache *s, int node,
- int objects) {}
-static inline void dec_slabs_node(struct kmem_cache *s, int node,
- int objects) {}
-
+static inline void inc_objects_node(struct kmem_cache_node *n, int objects) {}
+static inline void dec_objects_node(struct kmem_cache_node *n, int objects) {}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
void *freelist, void *nextfree)
{
@@ -1429,6 +1397,42 @@ static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
}
#endif /* CONFIG_SLUB_DEBUG */
+static inline unsigned long slabs_node(struct kmem_cache *s, int node)
+{
+ struct kmem_cache_node *n = get_node(s, node);
+
+ return atomic_long_read(&n->nr_slabs);
+}
+
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
+{
+ return atomic_long_read(&n->nr_slabs);
+}
+
+static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
+{
+ struct kmem_cache_node *n = get_node(s, node);
+
+ /*
+ * May be called early in order to allocate a slab for the
+ * kmem_cache_node structure. Solve the chicken-egg
+ * dilemma by deferring the increment of the count during
+ * bootstrap (see early_kmem_cache_node_alloc).
+ */
+ if (likely(n)) {
+ atomic_long_inc(&n->nr_slabs);
+ inc_objects_node(n, objects);
+ }
+}
+
+static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
+{
+ struct kmem_cache_node *n = get_node(s, node);
+
+ atomic_long_dec(&n->nr_slabs);
+ dec_objects_node(n, objects);
+}
+
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
--
2.11.0
Powered by blists - more mailing lists