[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.64.0802112246550.7687@sbz-30.cs.Helsinki.FI>
Date: Mon, 11 Feb 2008 22:47:46 +0200 (EET)
From: Pekka J Enberg <penberg@...helsinki.fi>
To: clameter@....com
cc: linux-kernel@...r.kernel.org
Subject: [PATCH] slub: kmalloc page allocator pass-through cleanup
From: Pekka Enberg <penberg@...helsinki.fi>
This adds a proper function for kmalloc page allocator pass-through. While it
simplifies any code that does slab tracing code a lot, I think it's a
worthwhile cleanup in itself.
Signed-off-by: Pekka Enberg <penberg@...helsinki.fi>
---
include/linux/slub_def.h | 8 ++++++--
mm/slub.c | 14 ++++++--------
2 files changed, 12 insertions(+), 10 deletions(-)
Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2008-02-11 22:29:00.000000000 +0200
+++ linux-2.6/include/linux/slub_def.h 2008-02-11 22:30:34.000000000 +0200
@@ -162,12 +162,16 @@
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+}
+
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE / 2)
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2008-02-11 22:30:37.000000000 +0200
+++ linux-2.6/mm/slub.c 2008-02-11 22:32:14.000000000 +0200
@@ -2518,8 +2518,7 @@
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2536,8 +2535,7 @@
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -3050,8 +3048,8 @@
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3066,8 +3064,8 @@
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists