Disabling interrupts can be avoided now. However, list operation still require disabling interrupts since allocations can occur from interrupt contexts and there is no way to perform atomic list operations. So acquire the list lock opportunistically if there is a chance that list operations would be needed. This may result in needless synchronizations but allows the avoidance of synchronization in the majority of the cases. Dropping interrupt handling significantly simplifies the slowpath. Signed-off-by: Christoph Lameter --- mm/slub.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2011-04-15 14:30:05.000000000 -0500 +++ linux-2.6/mm/slub.c 2011-04-15 14:30:06.000000000 -0500 @@ -2225,13 +2225,11 @@ static void __slab_free(struct kmem_cach struct kmem_cache_node *n = NULL; #ifdef CONFIG_CMPXCHG_LOCAL unsigned long flags; - - local_irq_save(flags); #endif stat(s, FREE_SLOWPATH); if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) - goto out_unlock; + return; do { prior = page->freelist; @@ -2250,7 +2248,11 @@ static void __slab_free(struct kmem_cach * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ +#ifdef CONFIG_CMPXCHG_LOCAL + spin_lock_irqsave(&n->list_lock, flags); +#else spin_lock(&n->list_lock); +#endif } inuse = new.inuse; @@ -2266,7 +2268,7 @@ static void __slab_free(struct kmem_cach */ if (was_frozen) stat(s, FREE_FROZEN); - goto out_unlock; + return; } /* @@ -2289,12 +2291,10 @@ static void __slab_free(struct kmem_cach stat(s, FREE_ADD_PARTIAL); } } - - spin_unlock(&n->list_lock); - -out_unlock: #ifdef CONFIG_CMPXCHG_LOCAL - local_irq_restore(flags); + spin_unlock_irqrestore(&n->list_lock, flags); +#else + spin_unlock(&n->list_lock); #endif return; @@ -2307,9 +2307,10 @@ slab_empty: stat(s, FREE_REMOVE_PARTIAL); } - spin_unlock(&n->list_lock); #ifdef CONFIG_CMPXCHG_LOCAL - local_irq_restore(flags); + spin_unlock_irqrestore(&n->list_lock, flags); +#else + spin_unlock(&n->list_lock); #endif stat(s, FREE_SLAB); discard_slab(s, page); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/