[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <Pine.LNX.4.64.0608272232130.24179@schroedinger.engr.sgi.com>
Date: Sun, 27 Aug 2006 22:33:28 -0700 (PDT)
From: Christoph Lameter <clameter@....com>
To: akpm@...l.org
cc: Marcelo Tosatti <marcelo@...ck.org>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, Andi Kleen <ak@...e.de>, mpm@...enic.com,
Manfred Spraul <manfred@...orfullife.com>,
Dave Chinner <dgc@....com>
Subject: Re: [MODSLAB 2/4] A slab allocator: SLABIFIER
Some fixups. Clean up #ifdefs and use the right list function to go
through the slabs:
Signed-off-by: Christoph Lameter <clameter@....com>
Index: linux-2.6.18-rc4-mm3/mm/slabifier.c
===================================================================
--- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-26 19:10:49.594764694 -0700
+++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-27 22:31:24.188711553 -0700
@@ -112,12 +112,12 @@ static __always_inline void dec_object_c
static __always_inline void set_object_counter(struct page *page,
int counter)
{
- (*object_counter(page))= counter;
+ *object_counter(page) = counter;
}
static __always_inline int get_object_counter(struct page *page)
{
- return (*object_counter(page));
+ return *object_counter(page);
}
/*
@@ -168,60 +168,58 @@ static __always_inline int lock_and_del_
return 0;
}
-struct page *numa_search(struct slab *s, int node)
-{
+/*
+ * Get a partial page, lock it and return it.
+ */
#ifdef CONFIG_NUMA
- struct list_head *h;
+static struct page *get_partial(struct slab *s, int node)
+{
struct page *page;
+ int searchnode = (node == -1) ? numa_node_id() : node;
+ spin_lock(&s->list_lock);
/*
* Search for slab on the right node
*/
-
- if (node == -1)
- node = numa_node_id();
-
- list_for_each(h, &s->partial) {
- page = container_of(h, struct page, lru);
-
- if (likely(page_to_nid(page) == node) &&
+ list_for_each_entry(page, &s->partial, lru)
+ if (likely(page_to_nid(page) == searchnode) &&
lock_and_del_slab(s, page))
- return page;
+ goto out;
+
+ if (likely(node == -1)) {
+ /*
+ * We can fall back to any other node in order to
+ * reduce the size of the partial list.
+ */
+ list_for_each_entry(page, &s->partial, lru)
+ if (likely(lock_and_del_slab(s, page)))
+ goto out;
}
-#endif
- return NULL;
-}
-/*
- * Get a partial page, lock it and return it.
- */
+ /* Nothing found */
+ page = NULL;
+out:
+ spin_unlock(&s->list_lock);
+ return page;
+}
+#else
static struct page *get_partial(struct slab *s, int node)
{
struct page *page;
- struct list_head *h;
spin_lock(&s->list_lock);
-
- page = numa_search(s, node);
- if (page)
- goto out;
-#ifdef CONFIG_NUMA
- if (node >= 0)
- goto fail;
-#endif
-
- list_for_each(h, &s->partial) {
- page = container_of(h, struct page, lru);
-
+ list_for_each_entry(page, &s->partial, lru)
if (likely(lock_and_del_slab(s, page)))
goto out;
- }
-fail:
+
+ /* No slab or all slabs busy */
page = NULL;
out:
spin_unlock(&s->list_lock);
return page;
}
+#endif
+
/*
* Debugging checks
@@ -758,8 +756,7 @@ dumpret:
goto out_unlock;
if (unlikely(get_object_counter(page) == 0)) {
- if (s->objects > 1)
- remove_partial(s, page);
+ remove_partial(s, page);
check_free_chain(s, page);
slab_unlock(page);
discard_slab(s, page);
@@ -908,6 +905,7 @@ static int slab_shrink(struct slab_cache
* This will put the slab on the front of the partial
* list, the used list or free it.
*/
+ ClearPageActive(page);
putback_slab(s, page);
}
local_irq_restore(flags);
@@ -957,15 +955,12 @@ static int slab_destroy(struct slab_cach
static unsigned long count_objects(struct slab *s, struct list_head *list)
{
int count = 0;
- struct list_head *h;
+ struct page *page;
unsigned long flags;
spin_lock_irqsave(&s->list_lock, flags);
- list_for_each(h, list) {
- struct page *page = lru_to_first_page(h);
-
+ list_for_each_entry(page, list, lru)
count += get_object_counter(page);
- }
spin_unlock_irqrestore(&s->list_lock, flags);
return count;
}
@@ -975,23 +970,21 @@ static unsigned long slab_objects(struct
unsigned long *p_partial)
{
struct slab *s = (void *)sc;
- int partial;
+ int partial = count_objects(s, &s->partial);
+ int nr_slabs = atomic_read(&s->nr_slabs);
int active = 0; /* Active slabs */
int nr_active = 0; /* Objects in active slabs */
int cpu;
- int nr_slabs = atomic_read(&s->nr_slabs);
for_each_possible_cpu(cpu) {
struct page *page = s->active[cpu];
- if (s->active[cpu]) {
+ if (page) {
nr_active++;
active += get_object_counter(page);
}
}
- partial = count_objects(s, &s->partial);
-
if (p_partial)
*p_partial = s->nr_partial;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists