--- linux-2.6.19/mm/slab.c 2006-12-04 11:50:19.000000000 +0100 +++ linux-2.6.19-ed/mm/slab.c 2006-12-04 17:25:02.000000000 +0100 @@ -371,6 +371,19 @@ static void kmem_list3_init(struct kmem_ } while (0) /* + * Define the reciprocal value of B so that + * ((u32)A / (u32)B) can be replaced by : + * (((u64)A * RECIPROCAL_VALUE(B)) >> 32) + * If RECIPROCAL_VALUE(B) is precalculated, we change a divide by a multiply + */ +static inline u32 reciprocal_value(unsigned int k) +{ + u64 val = (1LL << 32) + (k - 1); + do_div(val, k); + return (u32)val; +} + +/* * struct kmem_cache * * manages a cache. @@ -385,6 +398,7 @@ struct kmem_cache { unsigned int shared; unsigned int buffer_size; + unsigned int reciprocal_buffer_size; /* 3) touched by every alloc & free from the backend */ struct kmem_list3 *nodelists[MAX_NUMNODES]; @@ -626,10 +640,17 @@ static inline void *index_to_obj(struct return slab->s_mem + cache->buffer_size * idx; } +/* + * We want to avoid an expensive divide : (offset / cache->buffer_size) + * Using the fact that buffer_size is a constant for a particular cache, + * we can replace (offset / cache->buffer_size) by + * ((u64)offset * cache->reciprocal_buffer_size) >> 32 + */ static inline unsigned int obj_to_index(struct kmem_cache *cache, struct slab *slab, void *obj) { - return (unsigned)(obj - slab->s_mem) / cache->buffer_size; + unsigned int offset = (obj - slab->s_mem); + return (u32)(((u64)offset * cache->reciprocal_buffer_size) >> 32); } /* @@ -1400,6 +1421,8 @@ void __init kmem_cache_init(void) cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); + cache_cache.reciprocal_buffer_size = + reciprocal_value(cache_cache.buffer_size); for (order = 0; order < MAX_ORDER; order++) { cache_estimate(order, cache_cache.buffer_size, @@ -2297,6 +2320,7 @@ kmem_cache_create (const char *name, siz if (flags & SLAB_CACHE_DMA) cachep->gfpflags |= GFP_DMA; cachep->buffer_size = size; + cachep->reciprocal_buffer_size = reciprocal_value(size); if (flags & CFLGS_OFF_SLAB) { cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);