[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190820223259.22348-17-willy@infradead.org>
Date: Tue, 20 Aug 2019 15:32:37 -0700
From: Matthew Wilcox <willy@...radead.org>
To: netdev@...r.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: [PATCH 16/38] qrtr: Convert qrtr_nodes to XArray
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Moved the kref protection under the xa_lock too. It's a little
disconcerting to not be checking the error code from xa_store(),
but the original code doesn't return an errno from qrtr_node_assign()
and that would be a larger change to the driver than I'm conmfortable
making.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
net/qrtr/qrtr.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 6c8b0f6d28f9..e02fa6be76d2 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -97,10 +97,10 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
static unsigned int qrtr_local_nid = NUMA_NO_NODE;
/* for node ids */
-static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
+static DEFINE_XARRAY(qrtr_nodes);
/* broadcast list */
static LIST_HEAD(qrtr_all_nodes);
-/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
+/* lock for qrtr_all_nodes */
static DEFINE_MUTEX(qrtr_node_lock);
/* local port allocation management */
@@ -138,15 +138,18 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
/* Release node resources and free the node.
*
* Do not call directly, use qrtr_node_release. To be used with
- * kref_put_mutex. As such, the node mutex is expected to be locked on call.
+ * kref_put_lock. As such, the xa_lock is expected to be held on call.
*/
static void __qrtr_node_release(struct kref *kref)
+ __releases(qrtr_nodes.xa_lock)
{
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
if (node->nid != QRTR_EP_NID_AUTO)
- radix_tree_delete(&qrtr_nodes, node->nid);
+ __xa_erase(&qrtr_nodes, node->nid);
+ xa_unlock(&qrtr_nodes);
+ mutex_lock(&qrtr_node_lock);
list_del(&node->item);
mutex_unlock(&qrtr_node_lock);
@@ -167,7 +170,7 @@ static void qrtr_node_release(struct qrtr_node *node)
{
if (!node)
return;
- kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
+ kref_put_lock(&node->ref, __qrtr_node_release, &qrtr_nodes.xa_lock);
}
/* Pass an outgoing packet socket buffer to the endpoint driver. */
@@ -215,10 +218,10 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
{
struct qrtr_node *node;
- mutex_lock(&qrtr_node_lock);
- node = radix_tree_lookup(&qrtr_nodes, nid);
+ xa_lock(&qrtr_nodes);
+ node = xa_load(&qrtr_nodes, nid);
node = qrtr_node_acquire(node);
- mutex_unlock(&qrtr_node_lock);
+ xa_unlock(&qrtr_nodes);
return node;
}
@@ -233,10 +236,8 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
return;
- mutex_lock(&qrtr_node_lock);
- radix_tree_insert(&qrtr_nodes, nid, node);
node->nid = nid;
- mutex_unlock(&qrtr_node_lock);
+ xa_store(&qrtr_nodes, nid, node, GFP_KERNEL);
}
/**
--
2.23.0.rc1
Powered by blists - more mailing lists