[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250307155725.219009-3-sdf@fomichev.me>
Date: Fri, 7 Mar 2025 07:57:23 -0800
From: Stanislav Fomichev <sdf@...ichev.me>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
linux-kernel@...r.kernel.org,
horms@...nel.org,
donald.hunter@...il.com,
michael.chan@...adcom.com,
pavan.chebbi@...adcom.com,
andrew+netdev@...n.ch,
jdamato@...tly.com,
sdf@...ichev.me,
xuanzhuo@...ux.alibaba.com,
almasrymina@...gle.com,
asml.silence@...il.com,
dw@...idwei.uk
Subject: [PATCH net-next v1 2/4] net: protect net_devmem_dmabuf_bindings by new net_devmem_bindings_mutex
In the process of making queue management API rtnl_lock-less, we
need a separate lock to protect xa that keeps a global list of bindings.
Also change the ordering of 'posting' binding to
net_devmem_dmabuf_bindings: xa_alloc is done after binding is fully
initialized (so xa_load lookups fully instantiated bindings) and
xa_erase is done as a first step during unbind.
Cc: Mina Almasry <almasrymina@...gle.com>
Signed-off-by: Stanislav Fomichev <sdf@...ichev.me>
---
net/core/devmem.c | 29 ++++++++++++++---------------
1 file changed, 14 insertions(+), 15 deletions(-)
diff --git a/net/core/devmem.c b/net/core/devmem.c
index 7c6e0b5b6acb..c16cdac46bed 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -25,7 +25,7 @@
/* Device memory support */
-/* Protected by rtnl_lock() */
+static DEFINE_MUTEX(net_devmem_bindings_mutex);
static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
static const struct memory_provider_ops dmabuf_devmem_ops;
@@ -119,6 +119,10 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
unsigned long xa_idx;
unsigned int rxq_idx;
+ mutex_lock(&net_devmem_bindings_mutex);
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+ mutex_unlock(&net_devmem_bindings_mutex);
+
if (binding->list.next)
list_del(&binding->list);
@@ -133,8 +137,6 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
}
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
-
net_devmem_dmabuf_binding_put(binding);
}
@@ -220,24 +222,15 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
}
binding->dev = dev;
-
- err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
- binding, xa_limit_32b, &id_alloc_next,
- GFP_KERNEL);
- if (err < 0)
- goto err_free_binding;
-
xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
-
refcount_set(&binding->ref, 1);
-
binding->dmabuf = dmabuf;
binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
if (IS_ERR(binding->attachment)) {
err = PTR_ERR(binding->attachment);
NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
- goto err_free_id;
+ goto err_free_binding;
}
binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
@@ -305,6 +298,14 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
virtual += len;
}
+ mutex_lock(&net_devmem_bindings_mutex);
+ err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
+ binding, xa_limit_32b, &id_alloc_next,
+ GFP_KERNEL);
+ mutex_unlock(&net_devmem_bindings_mutex);
+ if (err < 0)
+ goto err_free_chunks;
+
return binding;
err_free_chunks:
@@ -316,8 +317,6 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
DMA_FROM_DEVICE);
err_detach:
dma_buf_detach(dmabuf, binding->attachment);
-err_free_id:
- xa_erase(&net_devmem_dmabuf_bindings, binding->id);
err_free_binding:
kfree(binding);
err_put_dmabuf:
--
2.48.1
Powered by blists - more mailing lists