This patch uses the mem_map pages to find the bigblock descriptor for large allocations. -- Steve Signed-off-by: Steven Rostedt mm/slob.c | 74 ++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 41 insertions(+), 33 deletions(-) Index: linux/mm/slob.c =================================================================== --- linux.orig/mm/slob.c +++ linux/mm/slob.c @@ -49,15 +49,42 @@ typedef struct slob_block slob_t; struct bigblock { int order; void *pages; - struct bigblock *next; }; typedef struct bigblock bigblock_t; static slob_t arena = { .next = &arena, .units = 1 }; static slob_t *slobfree = &arena; -static bigblock_t *bigblocks; static DEFINE_SPINLOCK(slob_lock); -static DEFINE_SPINLOCK(block_lock); + +#define __get_slob_block(b) ((unsigned long)(b) & ~(PAGE_SIZE-1)) + +static inline struct page *get_slob_page(const void *mem) +{ + void *virt = (void*)__get_slob_block(mem); + + return virt_to_page(virt); +} + +static inline void zero_slob_block(const void *b) +{ + struct page *page; + page = get_slob_page(b); + memset(&page->lru, 0, sizeof(page->lru)); +} + +static inline void *get_slob_block(const void *b) +{ + struct page *page; + page = get_slob_page(b); + return page->lru.next; +} + +static inline void set_slob_block(const void *b, void *data) +{ + struct page *page; + page = get_slob_page(b); + page->lru.next = data; +} static void slob_free(void *b, int size); static void slob_timer_cbk(void); @@ -109,6 +136,7 @@ static void *slob_alloc(size_t size, gfp if (!cur) return 0; + zero_slob_block(cur); slob_free(cur, PAGE_SIZE); spin_lock_irqsave(&slob_lock, flags); cur = slobfree; @@ -163,7 +191,6 @@ void *__kmalloc(size_t size, gfp_t gfp) { slob_t *m; bigblock_t *bb; - unsigned long flags; if (size < PAGE_SIZE - SLOB_UNIT) { m = slob_alloc(size + SLOB_UNIT, gfp, 0); @@ -178,10 +205,7 @@ void *__kmalloc(size_t size, gfp_t gfp) bb->pages = (void *)__get_free_pages(gfp, bb->order); if (bb->pages) { - spin_lock_irqsave(&block_lock, flags); - bb->next = bigblocks; - bigblocks = bb; - spin_unlock_irqrestore(&block_lock, flags); + set_slob_block(bb->pages, bb); return bb->pages; } @@ -192,25 +216,16 @@ EXPORT_SYMBOL(__kmalloc); void kfree(const void *block) { - bigblock_t *bb, **last = &bigblocks; - unsigned long flags; + bigblock_t *bb; if (!block) return; - if (!((unsigned long)block & (PAGE_SIZE-1))) { - /* might be on the big block list */ - spin_lock_irqsave(&block_lock, flags); - for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) { - if (bb->pages == block) { - *last = bb->next; - spin_unlock_irqrestore(&block_lock, flags); - free_pages((unsigned long)block, bb->order); - slob_free(bb, sizeof(bigblock_t)); - return; - } - } - spin_unlock_irqrestore(&block_lock, flags); + bb = get_slob_block(block); + if (bb) { + free_pages((unsigned long)block, bb->order); + slob_free(bb, sizeof(bigblock_t)); + return; } slob_free((slob_t *)block - 1, 0); @@ -222,20 +237,13 @@ EXPORT_SYMBOL(kfree); unsigned int ksize(const void *block) { bigblock_t *bb; - unsigned long flags; if (!block) return 0; - if (!((unsigned long)block & (PAGE_SIZE-1))) { - spin_lock_irqsave(&block_lock, flags); - for (bb = bigblocks; bb; bb = bb->next) - if (bb->pages == block) { - spin_unlock_irqrestore(&slob_lock, flags); - return PAGE_SIZE << bb->order; - } - spin_unlock_irqrestore(&block_lock, flags); - } + bb = get_slob_block(block); + if (bb) + return PAGE_SIZE << bb->order; return ((slob_t *)block - 1)->units * SLOB_UNIT; }