[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190430030746.26102-10-tobin@kernel.org>
Date: Tue, 30 Apr 2019 13:07:40 +1000
From: "Tobin C. Harding" <tobin@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Tobin C. Harding" <tobin@...nel.org>,
Roman Gushchin <guro@...com>,
Alexander Viro <viro@....linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Pekka Enberg <penberg@...helsinki.fi>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Christopher Lameter <cl@...ux.com>,
Matthew Wilcox <willy@...radead.org>,
Miklos Szeredi <mszeredi@...hat.com>,
Andreas Dilger <adilger@...ger.ca>,
Waiman Long <longman@...hat.com>,
Tycho Andersen <tycho@...ho.ws>,
"Theodore Ts'o" <tytso@....edu>, Andi Kleen <ak@...ux.intel.com>,
David Chinner <david@...morbit.com>,
Nick Piggin <npiggin@...il.com>,
Rik van Riel <riel@...hat.com>,
Hugh Dickins <hughd@...gle.com>,
Jonathan Corbet <corbet@....net>, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH v4 09/15] xarray: Implement migration function for objects
Implement functions to migrate objects. This is based on initial code by
Matthew Wilcox and was modified to work with slab object migration.
This patch can not be merged until all radix tree & IDR users are
converted to the XArray because xa_nodes and radix tree nodes share the
same slab cache (thanks Matthew).
Co-developed-by: Christoph Lameter <cl@...ux.com>
Signed-off-by: Tobin C. Harding <tobin@...nel.org>
---
lib/radix-tree.c | 13 +++++++++++++
lib/xarray.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 62 insertions(+)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 14d51548bea6..9412c2853726 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1613,6 +1613,17 @@ static int radix_tree_cpu_dead(unsigned int cpu)
return 0;
}
+extern void xa_object_migrate(void *tree_node, int numa_node);
+
+static void radix_tree_migrate(struct kmem_cache *s, void **objects, int nr,
+ int node, void *private)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ xa_object_migrate(objects[i], node);
+}
+
void __init radix_tree_init(void)
{
int ret;
@@ -1627,4 +1638,6 @@ void __init radix_tree_init(void)
ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
NULL, radix_tree_cpu_dead);
WARN_ON(ret < 0);
+ kmem_cache_setup_mobility(radix_tree_node_cachep, NULL,
+ radix_tree_migrate);
}
diff --git a/lib/xarray.c b/lib/xarray.c
index 6be3acbb861f..731dd3d8ddb8 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1971,6 +1971,55 @@ void xa_destroy(struct xarray *xa)
}
EXPORT_SYMBOL(xa_destroy);
+void xa_object_migrate(struct xa_node *node, int numa_node)
+{
+ struct xarray *xa = READ_ONCE(node->array);
+ void __rcu **slot;
+ struct xa_node *new_node;
+ int i;
+
+ /* Freed or not yet in tree then skip */
+ if (!xa || xa == XA_RCU_FREE)
+ return;
+
+ new_node = kmem_cache_alloc_node(radix_tree_node_cachep,
+ GFP_KERNEL, numa_node);
+ if (!new_node)
+ return;
+
+ xa_lock_irq(xa);
+
+ /* Check again..... */
+ if (xa != node->array) {
+ node = new_node;
+ goto unlock;
+ }
+
+ memcpy(new_node, node, sizeof(struct xa_node));
+
+ if (list_empty(&node->private_list))
+ INIT_LIST_HEAD(&new_node->private_list);
+ else
+ list_replace(&node->private_list, &new_node->private_list);
+
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ void *x = xa_entry_locked(xa, new_node, i);
+
+ if (xa_is_node(x))
+ rcu_assign_pointer(xa_to_node(x)->parent, new_node);
+ }
+ if (!new_node->parent)
+ slot = &xa->xa_head;
+ else
+ slot = &xa_parent_locked(xa, new_node)->slots[new_node->offset];
+ rcu_assign_pointer(*slot, xa_mk_node(new_node));
+
+unlock:
+ xa_unlock_irq(xa);
+ xa_node_free(node);
+ rcu_barrier();
+}
+
#ifdef XA_DEBUG
void xa_dump_node(const struct xa_node *node)
{
--
2.21.0
Powered by blists - more mailing lists