We can now use a this_cpu_cmpxchg_double to update two 64 bit values that are the entire description of the per cpu freelist. There is no need anymore to disable preempt. Signed-off-by: Christoph Lameter Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c 2014-12-09 12:31:45.867575731 -0600 +++ linux/mm/slub.c 2014-12-09 12:31:45.867575731 -0600 @@ -2272,21 +2272,15 @@ static inline void *get_freelist(struct * a call to the page allocator and the setup of a new slab. */ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr) { void *freelist; struct page *page; unsigned long flags; + struct kmem_cache_cpu *c; local_irq_save(flags); -#ifdef CONFIG_PREEMPT - /* - * We may have been preempted and rescheduled on a different - * cpu before disabling interrupts. Need to reload cpu area - * pointer. - */ c = this_cpu_ptr(s->cpu_slab); -#endif if (!c->freelist || is_end_token(c->freelist)) goto new_slab; @@ -2397,7 +2391,6 @@ static __always_inline void *slab_alloc_ gfp_t gfpflags, int node, unsigned long addr) { void **object; - struct kmem_cache_cpu *c; unsigned long tid; if (slab_pre_alloc_hook(s, gfpflags)) @@ -2406,31 +2399,15 @@ static __always_inline void *slab_alloc_ s = memcg_kmem_get_cache(s, gfpflags); redo: /* - * Must read kmem_cache cpu data via this cpu ptr. Preemption is - * enabled. We may switch back and forth between cpus while - * reading from one cpu area. That does not matter as long - * as we end up on the original cpu again when doing the cmpxchg. - * - * Preemption is disabled for the retrieval of the tid because that - * must occur from the current processor. We cannot allow rescheduling - * on a different processor between the determination of the pointer - * and the retrieval of the tid. - */ - preempt_disable(); - c = this_cpu_ptr(s->cpu_slab); - - /* * The transaction ids are globally unique per cpu and per operation on * a per cpu queue. Thus they can be guarantee that the cmpxchg_double * occurs on the right processor and that there was no operation on the * linked list in between. */ - tid = c->tid; - preempt_enable(); - - object = c->freelist; - if (unlikely(!object || is_end_token(object) ||!node_match_ptr(object, node))) { - object = __slab_alloc(s, gfpflags, node, addr, c); + tid = this_cpu_read(s->cpu_slab->tid); + object = this_cpu_read(s->cpu_slab->freelist); + if (unlikely(!object || is_end_token(object) || !node_match_ptr(object, node))) { + object = __slab_alloc(s, gfpflags, node, addr); stat(s, ALLOC_SLOWPATH); } else { void *next_object = get_freepointer_safe(s, object); @@ -2666,30 +2643,21 @@ static __always_inline void slab_free(st struct page *page, void *x, unsigned long addr) { void **object = (void *)x; - struct kmem_cache_cpu *c; + void *freelist; unsigned long tid; slab_free_hook(s, x); redo: - /* - * Determine the currently cpus per cpu slab. - * The cpu may change afterward. However that does not matter since - * data is retrieved via this pointer. If we are on the same cpu - * during the cmpxchg then the free will succedd. - */ - preempt_disable(); - c = this_cpu_ptr(s->cpu_slab); - - tid = c->tid; - preempt_enable(); + tid = this_cpu_read(s->cpu_slab->tid); + freelist = this_cpu_read(s->cpu_slab->freelist); - if (likely(same_slab_page(s, page, c->freelist))) { - set_freepointer(s, object, c->freelist); + if (likely(same_slab_page(s, page, freelist))) { + set_freepointer(s, object, freelist); if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, - c->freelist, tid, + freelist, tid, object, next_tid(tid)))) { note_cmpxchg_failure("slab_free", s, tid); -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/