[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220308114142.1744229-4-42.hyeyoo@gmail.com>
Date: Tue, 8 Mar 2022 11:41:30 +0000
From: Hyeonggon Yoo <42.hyeyoo@...il.com>
To: linux-mm@...ck.org
Cc: Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Marco Elver <elver@...gle.com>,
Matthew WilCox <willy@...radead.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
linux-kernel@...r.kernel.org, 42.hyeyoo@...il.com
Subject: [RFC PATCH v1 03/15] mm/sl[au]b: remove CONFIG_TRACING ifdefs for tracing functions
CONFIG_TRACING ifdefs are not necessary because tracepoints do nothing
on kernels without CONFIG_TRACING.
In later cleanup these functions will be removed.
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>
---
include/linux/slab.h | 29 -----------------------------
mm/slab.c | 4 ----
mm/slab_common.c | 2 --
mm/slub.c | 4 ----
4 files changed, 39 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index df8e5dca00a2..a5e3ad058817 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -460,7 +460,6 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_slab_alignment __alloc_size(3);
@@ -468,39 +467,11 @@ extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_slab_alignment
__alloc_size(4);
-#else /* CONFIG_TRACING */
-static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
-
-static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
-
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
__alloc_size(1);
-#ifdef CONFIG_TRACING
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
__assume_page_alignment __alloc_size(1);
-#else
-static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
- unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
{
diff --git a/mm/slab.c b/mm/slab.c
index b41124a1efd9..1f3195344bdf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3519,7 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
-#ifdef CONFIG_TRACING
void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
@@ -3533,7 +3532,6 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
-#endif
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
@@ -3560,7 +3558,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid,
@@ -3577,7 +3574,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
-#endif
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 23f2ab0713b7..2edb77056adc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -954,7 +954,6 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
}
EXPORT_SYMBOL(kmalloc_order);
-#ifdef CONFIG_TRACING
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
void *ret = kmalloc_order(size, flags, order);
@@ -962,7 +961,6 @@ void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);
-#endif
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */
diff --git a/mm/slub.c b/mm/slub.c
index 74369cadc243..267f700abac1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3238,7 +3238,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
}
-#ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
@@ -3247,7 +3246,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
-#endif
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
@@ -3260,7 +3258,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
@@ -3274,7 +3271,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
-#endif
/*
* Slow path handling. This may still be called frequently since objects
--
2.33.1
Powered by blists - more mailing lists