[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1172133274.6374.12.camel@twins>
Date: Thu, 22 Feb 2007 09:34:34 +0100
From: Peter Zijlstra <a.p.zijlstra@...llo.nl>
To: Christoph Lameter <clameter@....com>
Cc: linux-kernel@...r.kernel.org, linux-mm <linux-mm@...ck.org>,
akpm@...ux-foundation.org
Subject: Re: SLUB: The unqueued Slab allocator
On Wed, 2007-02-21 at 23:00 -0800, Christoph Lameter wrote:
> +/*
> + * Lock order:
> + * 1. slab_lock(page)
> + * 2. slab->list_lock
> + *
That seems to contradict this:
> +/*
> + * Lock page and remove it from the partial list
> + *
> + * Must hold list_lock
> + */
> +static __always_inline int lock_and_del_slab(struct kmem_cache *s,
> + struct page *page)
> +{
> + if (slab_trylock(page)) {
> + list_del(&page->lru);
> + s->nr_partial--;
> + return 1;
> + }
> + return 0;
> +}
> +
> +/*
> + * Get a partial page, lock it and return it.
> + */
> +#ifdef CONFIG_NUMA
> +static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
> +{
> + struct page *page;
> + int searchnode = (node == -1) ? numa_node_id() : node;
> +
> + if (!s->nr_partial)
> + return NULL;
> +
> + spin_lock(&s->list_lock);
> + /*
> + * Search for slab on the right node
> + */
> + list_for_each_entry(page, &s->partial, lru)
> + if (likely(page_to_nid(page) == searchnode) &&
> + lock_and_del_slab(s, page))
> + goto out;
> +
> + if (likely(!(flags & __GFP_THISNODE))) {
> + /*
> + * We can fall back to any other node in order to
> + * reduce the size of the partial list.
> + */
> + list_for_each_entry(page, &s->partial, lru)
> + if (likely(lock_and_del_slab(s, page)))
> + goto out;
> + }
> +
> + /* Nothing found */
> + page = NULL;
> +out:
> + spin_unlock(&s->list_lock);
> + return page;
> +}
> +#else
> +static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
> +{
> + struct page *page;
> +
> + /*
> + * Racy check. If we mistakenly see no partial slabs then we
> + * just allocate an empty slab.
> + */
> + if (!s->nr_partial)
> + return NULL;
> +
> + spin_lock(&s->list_lock);
> + list_for_each_entry(page, &s->partial, lru)
> + if (likely(lock_and_del_slab(s, page)))
> + goto out;
> +
> + /* No slab or all slabs busy */
> + page = NULL;
> +out:
> + spin_unlock(&s->list_lock);
> + return page;
> +}
> +#endif
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists