[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170123225449.GA29940@htj.duckdns.org>
Date: Mon, 23 Jan 2017 17:54:49 -0500
From: Tejun Heo <tj@...nel.org>
To: vdavydov.dev@...il.com, cl@...ux.com, penberg@...nel.org,
rientjes@...gle.com, iamjoonsoo.kim@....com,
akpm@...ux-foundation.org
Cc: jsvana@...com, hannes@...xchg.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, cgroups@...r.kernel.org, kernel-team@...com
Subject: [PATCH v2 02/10] slub: separate out sysfs_slab_release() from
sysfs_slab_remove()
>From 3b0cdd93b2d9bdea62ea6681e612bdae7a40d883 Mon Sep 17 00:00:00 2001
From: Tejun Heo <tj@...nel.org>
Date: Mon, 23 Jan 2017 17:53:18 -0500
Separate out slub sysfs removal and release, and call the former
earlier from __kmem_cache_shutdown(). There's no reason to defer
sysfs removal through RCU and this will later allow us to remove sysfs
files way earlier during memory cgroup offline instead of release.
v2: Add slab_state >= FULL test to sysfs_slab_release() so that
kobject_put() is skipped for caches which aren't fully initialized
as before. This most likely leaks the kmem_cache on init failure
as we're skipping the only release path. Let's fix that up later.
Signed-off-by: Tejun Heo <tj@...nel.org>
Cc: Vladimir Davydov <vdavydov.dev@...il.com>
Cc: Christoph Lameter <cl@...ux.com>
Cc: Pekka Enberg <penberg@...nel.org>
Cc: David Rientjes <rientjes@...gle.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
---
v2 of the patch. Fixes boot failure reported by the test bot.
Thanks.
include/linux/slub_def.h | 4 ++--
mm/slab_common.c | 2 +-
mm/slub.c | 12 ++++++++++--
3 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 75f56c2..07ef550 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -113,9 +113,9 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_remove(struct kmem_cache *);
+void sysfs_slab_release(struct kmem_cache *);
#else
-static inline void sysfs_slab_remove(struct kmem_cache *s)
+static inline void sysfs_slab_release(struct kmem_cache *s)
{
}
#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 46ff746..3bc4bb8 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -480,7 +480,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier)
list_for_each_entry_safe(s, s2, release, list) {
#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_remove(s);
+ sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);
#endif
diff --git a/mm/slub.c b/mm/slub.c
index 68b84f9..59ca718 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -214,11 +214,13 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void memcg_propagate_slab_attrs(struct kmem_cache *s);
+static void sysfs_slab_remove(struct kmem_cache *s);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
+static inline void sysfs_slab_remove(struct kmem_cache *s) { }
#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si)
@@ -3679,6 +3681,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
if (n->nr_partial || slabs_node(s, node))
return 1;
}
+ sysfs_slab_remove(s);
return 0;
}
@@ -5629,7 +5632,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
goto out;
}
-void sysfs_slab_remove(struct kmem_cache *s)
+static void sysfs_slab_remove(struct kmem_cache *s)
{
if (slab_state < FULL)
/*
@@ -5643,7 +5646,12 @@ void sysfs_slab_remove(struct kmem_cache *s)
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
- kobject_put(&s->kobj);
+}
+
+void sysfs_slab_release(struct kmem_cache *s)
+{
+ if (slab_state >= FULL)
+ kobject_put(&s->kobj);
}
/*
--
2.9.3
Powered by blists - more mailing lists