This is necessary because the frozen bit has to be handled in the same cmpxchg_double with the freelist and the counters. Signed-off-by: Christoph Lameter --- mm/slub.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2011-05-24 09:40:08.000000000 -0500 +++ linux-2.6/mm/slub.c 2011-05-24 09:40:37.644875159 -0500 @@ -1284,6 +1284,7 @@ static struct page *new_slab(struct kmem page->freelist = start; page->inuse = 0; + page->frozen = 1; out: return page; } @@ -1422,7 +1423,6 @@ static inline int lock_and_freeze_slab(s { if (slab_trylock(page)) { __remove_partial(n, page); - page->frozen = 1; return 1; } return 0; @@ -1536,7 +1536,6 @@ static void unfreeze_slab(struct kmem_ca { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); - page->frozen = 0; if (page->inuse) { if (page->freelist) { @@ -1669,6 +1668,7 @@ static void deactivate_slab(struct kmem_ } c->page = NULL; c->tid = next_tid(c->tid); + page->frozen = 0; unfreeze_slab(s, page, tail); } @@ -1829,6 +1829,8 @@ static void *__slab_alloc(struct kmem_ca stat(s, ALLOC_REFILL); load_freelist: + VM_BUG_ON(!page->frozen); + object = page->freelist; if (unlikely(!object)) goto another_slab; @@ -1853,6 +1855,7 @@ new_slab: page = get_partial(s, gfpflags, node); if (page) { stat(s, ALLOC_FROM_PARTIAL); + page->frozen = 1; c->node = page_to_nid(page); c->page = page; goto load_freelist; @@ -2373,6 +2376,7 @@ static void early_kmem_cache_node_alloc( BUG_ON(!n); page->freelist = get_freepointer(kmem_cache_node, n); page->inuse++; + page->frozen = 0; kmem_cache_node->node[node] = n; #ifdef CONFIG_SLUB_DEBUG init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/