[PATCH 04/13] This patch only fixes the alloc_layer() portion of ridr.c, to make it use the per-cpu pool of preloaded ridr layer structures. Signed-off-by: Nadia Derbey --- lib/ridr.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) Index: linux-2.6.25-rc8-mm1/lib/ridr.c =================================================================== --- linux-2.6.25-rc8-mm1.orig/lib/ridr.c 2008-04-11 17:40:13.000000000 +0200 +++ linux-2.6.25-rc8-mm1/lib/ridr.c 2008-04-11 17:43:44.000000000 +0200 @@ -22,20 +22,35 @@ struct ridr_preget { }; DEFINE_PER_CPU(struct ridr_preget, ridr_pregets) = { 0, }; +static inline gfp_t ridr_gfp_mask(struct ridr *idp) +{ + return idp->gfp_mask & __GFP_BITS_MASK; +} + static struct ridr_layer *alloc_layer(struct ridr *idp) { - struct ridr_layer *p; - unsigned long flags; + struct ridr_layer *ret = NULL; + gfp_t gfp_mask = ridr_gfp_mask(idp); - spin_lock_irqsave(&idp->lock, flags); - p = idp->id_free; - if (p) { - idp->id_free = p->ary[0]; - idp->id_free_cnt--; - p->ary[0] = NULL; + if (!(gfp_mask & __GFP_WAIT)) { + struct ridr_preget *ridp; + + /* + * Provided the caller has preloaded here, we will always + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ + ridp = &__get_cpu_var(ridr_pregets); + if (ridp->nr) { + ret = ridp->layers[ridp->nr - 1]; + ridp->layers[ridp->nr - 1] = NULL; + ridp->nr--; + } } - spin_unlock_irqrestore(&idp->lock, flags); - return(p); + if (ret == NULL) + ret = kmem_cache_alloc(ridr_layer_cache, gfp_mask); + + return ret; } /* only called when idp->lock is held */ -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/