[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200813084858.1494-1-wuyun.wu@huawei.com>
Date: Thu, 13 Aug 2020 16:48:54 +0800
From: <wuyun.wu@...wei.com>
To: Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
"David Rientjes" <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
"Andrew Morton" <akpm@...ux-foundation.org>
CC: <hewenliang4@...wei.com>, <hushiyuan@...wei.com>,
Abel Wu <wuyun.wu@...wei.com>,
"open list:SLAB ALLOCATOR" <linux-mm@...ck.org>,
"open list" <linux-kernel@...r.kernel.org>
Subject: [PATCH] mm/slub: sysfs cleanup on cpu partial when !SLUB_CPU_PARTIAL
From: Abel Wu <wuyun.wu@...wei.com>
Hide cpu partial related sysfs entries when !CONFIG_SLUB_CPU_PARTIAL to
avoid confusion.
Signed-off-by: Abel Wu <wuyun.wu@...wei.com>
---
mm/slub.c | 56 +++++++++++++++++++++++++++++++------------------------
1 file changed, 32 insertions(+), 24 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 5d89e4064f83..4f496ae5a820 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5071,29 +5071,6 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
}
SLAB_ATTR(min_partial);
-static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
-{
- return sprintf(buf, "%u\n", slub_cpu_partial(s));
-}
-
-static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
- size_t length)
-{
- unsigned int objects;
- int err;
-
- err = kstrtouint(buf, 10, &objects);
- if (err)
- return err;
- if (objects && !kmem_cache_has_cpu_partial(s))
- return -EINVAL;
-
- slub_set_cpu_partial(s, objects);
- flush_all(s);
- return length;
-}
-SLAB_ATTR(cpu_partial);
-
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (!s->ctor)
@@ -5132,6 +5109,30 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(objects_partial);
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%u\n", slub_cpu_partial(s));
+}
+
+static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
+ size_t length)
+{
+ unsigned int objects;
+ int err;
+
+ err = kstrtouint(buf, 10, &objects);
+ if (err)
+ return err;
+ if (objects && !kmem_cache_has_cpu_partial(s))
+ return -EINVAL;
+
+ slub_set_cpu_partial(s, objects);
+ flush_all(s);
+ return length;
+}
+SLAB_ATTR(cpu_partial);
+
static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
{
int objects = 0;
@@ -5166,6 +5167,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
return len + sprintf(buf + len, "\n");
}
SLAB_ATTR_RO(slabs_cpu_partial);
+#endif
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
@@ -5496,10 +5498,12 @@ STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
STAT_ATTR(ORDER_FALLBACK, order_fallback);
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
+#ifdef CONFIG_SLUB_CPU_PARTIAL
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
+#endif
#endif /* CONFIG_SLUB_STATS */
static struct attribute *slab_attrs[] = {
@@ -5508,7 +5512,6 @@ static struct attribute *slab_attrs[] = {
&objs_per_slab_attr.attr,
&order_attr.attr,
&min_partial_attr.attr,
- &cpu_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&partial_attr.attr,
@@ -5520,7 +5523,10 @@ static struct attribute *slab_attrs[] = {
&reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr,
&shrink_attr.attr,
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ &cpu_partial_attr.attr,
&slabs_cpu_partial_attr.attr,
+#endif
#ifdef CONFIG_SLUB_DEBUG
&total_objects_attr.attr,
&slabs_attr.attr,
@@ -5562,11 +5568,13 @@ static struct attribute *slab_attrs[] = {
&order_fallback_attr.attr,
&cmpxchg_double_fail_attr.attr,
&cmpxchg_double_cpu_fail_attr.attr,
+#ifdef CONFIG_SLUB_CPU_PARTIAL
&cpu_partial_alloc_attr.attr,
&cpu_partial_free_attr.attr,
&cpu_partial_node_attr.attr,
&cpu_partial_drain_attr.attr,
#endif
+#endif
#ifdef CONFIG_FAILSLAB
&failslab_attr.attr,
#endif
--
2.28.0.windows.1
Powered by blists - more mailing lists