[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210818214021.2476230-4-keescook@chromium.org>
Date: Wed, 18 Aug 2021 14:40:17 -0700
From: Kees Cook <keescook@...omium.org>
To: linux-kernel@...r.kernel.org
Cc: Kees Cook <keescook@...omium.org>, Joe Perches <joe@...ches.com>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>, linux-mm@...ck.org,
Miguel Ojeda <ojeda@...nel.org>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Andy Whitcroft <apw@...onical.com>,
Dwaipayan Ray <dwaipayanray1@...il.com>,
Lukas Bulwahn <lukas.bulwahn@...il.com>,
Daniel Micay <danielmicay@...il.com>,
Dennis Zhou <dennis@...nel.org>, Tejun Heo <tj@...nel.org>,
Masahiro Yamada <masahiroy@...nel.org>,
Michal Marek <michal.lkml@...kovi.net>,
clang-built-linux@...glegroups.com, linux-kbuild@...r.kernel.org,
linux-hardening@...r.kernel.org
Subject: [PATCH v2 3/7] slab: Clean up function declarations
In order to have more readable and regular declarations, move __must_check
to the line above the main function declaration and add all the missing
parameter names.
Suggested-by: Joe Perches <joe@...ches.com>
Cc: Christoph Lameter <cl@...ux.com>
Cc: Pekka Enberg <penberg@...nel.org>
Cc: David Rientjes <rientjes@...gle.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@....com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: linux-mm@...ck.org
Signed-off-by: Kees Cook <keescook@...omium.org>
---
include/linux/slab.h | 68 +++++++++++++++++++++++---------------------
1 file changed, 35 insertions(+), 33 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c0d46b6fa12a..10fd0a8c816a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -152,8 +152,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -181,11 +181,12 @@ int kmem_cache_shrink(struct kmem_cache *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kfree_sensitive(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
+__must_check
+void *krealloc(const void *objp, size_t new_size, gfp_t flags);
+void kfree(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
+size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object);
@@ -426,8 +427,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
#endif /* !CONFIG_SLOB */
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void kmem_cache_free(struct kmem_cache *s, void *objp);
/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -436,8 +437,8 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p);
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
/*
* Caller must not use kfree_bulk() on memory not originally allocated
@@ -449,8 +450,9 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_slab_alignment __malloc;
+void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+ __assume_slab_alignment __malloc;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -464,17 +466,15 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_slab_alignment __malloc;
#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_slab_alignment __malloc;
#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags, int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
@@ -490,10 +490,8 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
return ret;
}
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags, int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
@@ -502,13 +500,15 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+ __assume_page_alignment __malloc;
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+ __assume_page_alignment __malloc;
#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+static __always_inline void *kmalloc_order_trace(size_t size, gfp_t flags,
+ unsigned int order)
{
return kmalloc_order(size, flags, order);
}
@@ -638,8 +638,9 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-static __must_check inline void *
-krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+__must_check
+static inline void *krealloc_array(void *p, size_t new_n, size_t new_size,
+ gfp_t flags)
{
size_t bytes;
@@ -668,7 +669,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
@@ -691,7 +692,8 @@ static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
+extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
--
2.30.2
Powered by blists - more mailing lists