[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190916144558.27282-5-lpf.vector@gmail.com>
Date: Mon, 16 Sep 2019 22:45:55 +0800
From: Pengfei Li <lpf.vector@...il.com>
To: akpm@...ux-foundation.org
Cc: vbabka@...e.cz, cl@...ux.com, penberg@...nel.org,
rientjes@...gle.com, iamjoonsoo.kim@....com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, guro@...com,
Pengfei Li <lpf.vector@...il.com>
Subject: [PATCH v5 4/7] mm, slab: Return ZERO_SIZE_ALLOC for zero sized kmalloc requests
This is a preparation patch, just replace 0 with ZERO_SIZE_ALLOC
as the return value of zero sized requests.
Signed-off-by: Pengfei Li <lpf.vector@...il.com>
Acked-by: David Rientjes <rientjes@...gle.com>
---
include/linux/slab.h | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index e773e5764b7b..1f05f68f2c3e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -121,14 +121,20 @@
#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
/*
- * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
+ * ZERO_SIZE_ALLOC will be returned by kmalloc_index() if it was zero sized
+ * requests.
*
+ * After that, ZERO_SIZE_PTR will be returned by the function that called
+ * kmalloc_index().
+
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
*
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
* Both make kfree a no-op.
*/
-#define ZERO_SIZE_PTR ((void *)16)
+#define ZERO_SIZE_ALLOC (UINT_MAX)
+
+#define ZERO_SIZE_PTR ((void *)16)
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
@@ -350,7 +356,7 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
static __always_inline unsigned int kmalloc_index(size_t size)
{
if (!size)
- return 0;
+ return ZERO_SIZE_ALLOC;
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
@@ -546,7 +552,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#ifndef CONFIG_SLOB
index = kmalloc_index(size);
- if (!index)
+ if (index == ZERO_SIZE_ALLOC)
return ZERO_SIZE_PTR;
return kmem_cache_alloc_trace(
@@ -564,7 +570,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
size <= KMALLOC_MAX_CACHE_SIZE) {
unsigned int i = kmalloc_index(size);
- if (!i)
+ if (i == ZERO_SIZE_ALLOC)
return ZERO_SIZE_PTR;
return kmem_cache_alloc_node_trace(
--
2.21.0
Powered by blists - more mailing lists