[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.64.0706011617330.9781@schroedinger.engr.sgi.com>
Date: Fri, 1 Jun 2007 16:29:29 -0700 (PDT)
From: Christoph Lameter <clameter@....com>
To: Linus Torvalds <torvalds@...ux-foundation.org>
cc: Andrew Morton <akpm@...ux-foundation.org>,
Jeremy Fitzhardinge <jeremy@...p.org>,
Srinivasa Ds <srinivasa@...ibm.com>,
linux-kernel@...r.kernel.org,
Srivatsa Vaddagiri <vatsa@...ibm.com>,
Dinakar Guniguntala <dino@...ibm.com>, pj@....com,
simon.derr@...l.net, clameter@...ulhu.engr.sgi.com,
rientjes@...gle.com
Subject: Re: [RFC] [PATCH] cpuset operations causes Badness at mm/slab.c:777
warning
On Fri, 1 Jun 2007, Linus Torvalds wrote:
> So for *both* of the above reasons, it's actually stupid to return NULL
> for a zero-sized allocation. It would be much better to return another
> pointer that will trap on access. A good candidate might be to return
>
> #define BADPTR ((void *)16)
Something like this? (Not tested yet just for review):
SLUB: Return BADPTR instead of warning for kmalloc(0)
Remove the WARN_ON_ONCE and simply return BADPTR.
BADPTR can be used legitimately as long as it is not dereferenced.
Signed-off-by: Christoph Lameter <clameter@....com>
---
include/linux/slub_def.h | 18 ++++++++----------
mm/slub.c | 8 ++++----
2 files changed, 12 insertions(+), 14 deletions(-)
Index: slub/include/linux/slub_def.h
===================================================================
--- slub.orig/include/linux/slub_def.h 2007-06-01 16:19:36.000000000 -0700
+++ slub/include/linux/slub_def.h 2007-06-01 16:24:54.000000000 -0700
@@ -12,6 +12,8 @@
#include <linux/kobject.h>
#include <linux/log2.h>
+#define BADPTR ((void *)16)
+
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
@@ -74,13 +76,9 @@ extern struct kmem_cache kmalloc_caches[
*/
static inline int kmalloc_index(size_t size)
{
- /*
- * We should return 0 if size == 0 (which would result in the
- * kmalloc caller to get NULL) but we use the smallest object
- * here for legacy reasons. Just issue a warning so that
- * we can discover locations where we do 0 sized allocations.
- */
- WARN_ON_ONCE(size == 0);
+
+ if (!size)
+ return 0;
if (size > KMALLOC_MAX_SIZE)
return -1;
@@ -133,7 +131,7 @@ static inline void *kmalloc(size_t size,
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return BADPTR;
return kmem_cache_alloc(s, flags);
} else
@@ -146,7 +144,7 @@ static inline void *kzalloc(size_t size,
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return BADPTR;
return kmem_cache_zalloc(s, flags);
} else
@@ -162,7 +160,7 @@ static inline void *kmalloc_node(size_t
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return BADPTR;
return kmem_cache_alloc_node(s, flags, node);
} else
Index: slub/mm/slub.c
===================================================================
--- slub.orig/mm/slub.c 2007-06-01 16:21:00.000000000 -0700
+++ slub/mm/slub.c 2007-06-01 16:27:12.000000000 -0700
@@ -2286,7 +2286,7 @@ void *__kmalloc(size_t size, gfp_t flags
if (s)
return slab_alloc(s, flags, -1, __builtin_return_address(0));
- return NULL;
+ return BADPTR;
}
EXPORT_SYMBOL(__kmalloc);
@@ -2297,7 +2297,7 @@ void *__kmalloc_node(size_t size, gfp_t
if (s)
return slab_alloc(s, flags, node, __builtin_return_address(0));
- return NULL;
+ return BADPTR;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2707,7 +2707,7 @@ void *__kmalloc_track_caller(size_t size
struct kmem_cache *s = get_slab(size, gfpflags);
if (!s)
- return NULL;
+ return BADPTR;
return slab_alloc(s, gfpflags, -1, caller);
}
@@ -2718,7 +2718,7 @@ void *__kmalloc_node_track_caller(size_t
struct kmem_cache *s = get_slab(size, gfpflags);
if (!s)
- return NULL;
+ return BADPTR;
return slab_alloc(s, gfpflags, node, caller);
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists