Use the new dec/inc functions to simplify SLUB's accounting of pages. Signed-off-by: Christoph Lameter --- mm/slub.c | 13 ++++--------- 1 files changed, 4 insertions(+), 9 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-08-27 19:22:13.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-08-27 21:02:51.000000000 -0700 @@ -1038,7 +1038,6 @@ static inline void kmem_cache_open_debug static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page * page; - int pages = 1 << s->order; if (s->order) flags |= __GFP_COMP; @@ -1054,10 +1053,9 @@ static struct page *allocate_slab(struct if (!page) return NULL; - mod_zone_page_state(page_zone(page), + inc_zone_page_state(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE); return page; } @@ -1124,8 +1122,6 @@ out: static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; - if (unlikely(SlabDebug(page))) { void *p; @@ -1135,10 +1131,9 @@ static void __free_slab(struct kmem_cach ClearSlabDebug(page); } - mod_zone_page_state(page_zone(page), + dec_zone_page_state(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - - pages); + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE); page->mapping = NULL; __free_pages(page, s->order); -- - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/