[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190308041426.16654-13-tobin@kernel.org>
Date: Fri, 8 Mar 2019 15:14:23 +1100
From: "Tobin C. Harding" <tobin@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Tobin C. Harding" <tobin@...nel.org>,
Christopher Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...helsinki.fi>,
Matthew Wilcox <willy@...radead.org>,
Tycho Andersen <tycho@...ho.ws>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC 12/15] xarray: Implement migration function for objects
Implement functions to migrate objects. This is based on
initial code by Matthew Wilcox and was modified to work with
slab object migration.
Co-developed-by: Christoph Lameter <cl@...ux.com>
Signed-off-by: Tobin C. Harding <tobin@...nel.org>
---
lib/radix-tree.c | 13 +++++++++++++
lib/xarray.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 57 insertions(+)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 14d51548bea6..9412c2853726 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1613,6 +1613,17 @@ static int radix_tree_cpu_dead(unsigned int cpu)
return 0;
}
+extern void xa_object_migrate(void *tree_node, int numa_node);
+
+static void radix_tree_migrate(struct kmem_cache *s, void **objects, int nr,
+ int node, void *private)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ xa_object_migrate(objects[i], node);
+}
+
void __init radix_tree_init(void)
{
int ret;
@@ -1627,4 +1638,6 @@ void __init radix_tree_init(void)
ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
NULL, radix_tree_cpu_dead);
WARN_ON(ret < 0);
+ kmem_cache_setup_mobility(radix_tree_node_cachep, NULL,
+ radix_tree_migrate);
}
diff --git a/lib/xarray.c b/lib/xarray.c
index 81c3171ddde9..4f6f17c87769 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1950,6 +1950,50 @@ void xa_destroy(struct xarray *xa)
}
EXPORT_SYMBOL(xa_destroy);
+void xa_object_migrate(struct xa_node *node, int numa_node)
+{
+ struct xarray *xa = READ_ONCE(node->array);
+ void __rcu **slot;
+ struct xa_node *new_node;
+ int i;
+
+ /* Freed or not yet in tree then skip */
+ if (!xa || xa == XA_FREE_MARK)
+ return;
+
+ new_node = kmem_cache_alloc_node(radix_tree_node_cachep,
+ GFP_KERNEL, numa_node);
+
+ xa_lock_irq(xa);
+
+ /* Check again..... */
+ if (xa != node->array || !list_empty(&node->private_list)) {
+ node = new_node;
+ goto unlock;
+ }
+
+ memcpy(new_node, node, sizeof(struct xa_node));
+
+ /* Move pointers to new node */
+ INIT_LIST_HEAD(&new_node->private_list);
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ void *x = xa_entry_locked(xa, new_node, i);
+
+ if (xa_is_node(x))
+ rcu_assign_pointer(xa_to_node(x)->parent, new_node);
+ }
+ if (!new_node->parent)
+ slot = &xa->xa_head;
+ else
+ slot = &xa_parent_locked(xa, new_node)->slots[new_node->offset];
+ rcu_assign_pointer(*slot, xa_mk_node(new_node));
+
+unlock:
+ xa_unlock_irq(xa);
+ xa_node_free(node);
+ rcu_barrier();
+}
+
#ifdef XA_DEBUG
void xa_dump_node(const struct xa_node *node)
{
--
2.21.0
Powered by blists - more mailing lists