[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20100203213914.D8654B1620@basil.firstfloor.org>
Date: Wed, 3 Feb 2010 22:39:14 +0100 (CET)
From: Andi Kleen <andi@...stfloor.org>
To: submit@...stfloor.org, linux-kernel@...r.kernel.org,
haicheng.li@...el.com, penberg@...helsinki.fi, linux-mm@...ck.org
Subject: [PATCH] [3/4] SLAB: Separate node initialization into separate function
No functional changes.
Needed for next patch.
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
mm/slab.c | 34 +++++++++++++++++++++-------------
1 file changed, 21 insertions(+), 13 deletions(-)
Index: linux-2.6.33-rc3-ak/mm/slab.c
===================================================================
--- linux-2.6.33-rc3-ak.orig/mm/slab.c
+++ linux-2.6.33-rc3-ak/mm/slab.c
@@ -1171,19 +1171,9 @@ free_array_cache:
}
}
-static int __cpuinit cpuup_prepare(long cpu)
+static int slab_node_prepare(int node)
{
struct kmem_cache *cachep;
- struct kmem_list3 *l3 = NULL;
- int node = cpu_to_node(cpu);
- const int memsize = sizeof(struct kmem_list3);
-
- /*
- * We need to do this right in the beginning since
- * alloc_arraycache's are going to use this list.
- * kmalloc_node allows us to add the slab to the right
- * kmem_list3 and not this cpu's kmem_list3
- */
list_for_each_entry(cachep, &cache_chain, next) {
/*
@@ -1192,9 +1182,10 @@ static int __cpuinit cpuup_prepare(long
* node has not already allocated this
*/
if (!cachep->nodelists[node]) {
- l3 = kmalloc_node(memsize, GFP_KERNEL, node);
+ struct kmem_list3 *l3;
+ l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
if (!l3)
- goto bad;
+ return -1;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
@@ -1213,6 +1204,23 @@ static int __cpuinit cpuup_prepare(long
cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
}
+ return 0;
+}
+
+static int __cpuinit cpuup_prepare(long cpu)
+{
+ struct kmem_cache *cachep;
+ struct kmem_list3 *l3 = NULL;
+ int node = cpu_to_node(cpu);
+
+ /*
+ * We need to do this right in the beginning since
+ * alloc_arraycache's are going to use this list.
+ * kmalloc_node allows us to add the slab to the right
+ * kmem_list3 and not this cpu's kmem_list3
+ */
+ if (slab_node_prepare(node) < 0)
+ goto bad;
/*
* Now we can go ahead with allocating the shared arrays and
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists