[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190520054017.32299-11-tobin@kernel.org>
Date: Mon, 20 May 2019 15:40:11 +1000
From: "Tobin C. Harding" <tobin@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>
Cc: "Tobin C. Harding" <tobin@...nel.org>,
Roman Gushchin <guro@...com>,
Alexander Viro <viro@....linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Pekka Enberg <penberg@...helsinki.fi>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Christopher Lameter <cl@...ux.com>,
Miklos Szeredi <mszeredi@...hat.com>,
Andreas Dilger <adilger@...ger.ca>,
Waiman Long <longman@...hat.com>,
Tycho Andersen <tycho@...ho.ws>, Theodore Ts'o <tytso@....edu>,
Andi Kleen <ak@...ux.intel.com>,
David Chinner <david@...morbit.com>,
Nick Piggin <npiggin@...il.com>,
Rik van Riel <riel@...hat.com>,
Hugh Dickins <hughd@...gle.com>,
Jonathan Corbet <corbet@....net>, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH v5 10/16] xarray: Implement migration function for xa_node objects
Recently Slab Movable Objects (SMO) was implemented for the SLUB
allocator. The XArray can take advantage of this and make the xa_node
slab cache objects movable.
Implement functions to migrate objects and activate SMO when we
initialise the XArray slab cache.
This is based on initial code by Matthew Wilcox and was modified to work
with slab object migration.
Cc: Matthew Wilcox <willy@...radead.org>
Co-developed-by: Christoph Lameter <cl@...ux.com>
Signed-off-by: Tobin C. Harding <tobin@...nel.org>
---
lib/xarray.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 61 insertions(+)
diff --git a/lib/xarray.c b/lib/xarray.c
index a528a5277c9d..c6b077f59e88 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1993,12 +1993,73 @@ static void xa_node_ctor(void *arg)
INIT_LIST_HEAD(&node->private_list);
}
+static void xa_object_migrate(struct xa_node *node, int numa_node)
+{
+ struct xarray *xa = READ_ONCE(node->array);
+ void __rcu **slot;
+ struct xa_node *new_node;
+ int i;
+
+ /* Freed or not yet in tree then skip */
+ if (!xa || xa == XA_RCU_FREE)
+ return;
+
+ new_node = kmem_cache_alloc_node(xa_node_cachep, GFP_KERNEL, numa_node);
+ if (!new_node) {
+ pr_err("%s: slab cache allocation failed\n", __func__);
+ return;
+ }
+
+ xa_lock_irq(xa);
+
+ /* Check again..... */
+ if (xa != node->array) {
+ node = new_node;
+ goto unlock;
+ }
+
+ memcpy(new_node, node, sizeof(struct xa_node));
+
+ if (list_empty(&node->private_list))
+ INIT_LIST_HEAD(&new_node->private_list);
+ else
+ list_replace(&node->private_list, &new_node->private_list);
+
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ void *x = xa_entry_locked(xa, new_node, i);
+
+ if (xa_is_node(x))
+ rcu_assign_pointer(xa_to_node(x)->parent, new_node);
+ }
+ if (!new_node->parent)
+ slot = &xa->xa_head;
+ else
+ slot = &xa_parent_locked(xa, new_node)->slots[new_node->offset];
+ rcu_assign_pointer(*slot, xa_mk_node(new_node));
+
+unlock:
+ xa_unlock_irq(xa);
+ xa_node_free(node);
+ rcu_barrier();
+}
+
+static void xa_migrate(struct kmem_cache *s, void **objects, int nr,
+ int node, void *_unused)
+{
+ int i;
+
+ for (i = 0; i < nr; i++)
+ xa_object_migrate(objects[i], node);
+}
+
+
void __init xarray_slabcache_init(void)
{
xa_node_cachep = kmem_cache_create("xarray_node",
sizeof(struct xa_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
xa_node_ctor);
+ kmem_cache_setup_mobility(xa_node_cachep, NULL, xa_migrate);
}
#ifdef XA_DEBUG
--
2.21.0
Powered by blists - more mailing lists