[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250820171214.3597901-9-dtatulea@nvidia.com>
Date: Wed, 20 Aug 2025 20:11:58 +0300
From: Dragos Tatulea <dtatulea@...dia.com>
To: <almasrymina@...gle.com>, <asml.silence@...il.com>, "David S. Miller"
<davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski
<kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Simon Horman
<horms@...nel.org>
CC: Dragos Tatulea <dtatulea@...dia.com>, <cratiu@...dia.com>,
<parav@...dia.com>, <netdev@...r.kernel.org>, <sdf@...a.com>,
<linux-kernel@...r.kernel.org>
Subject: [PATCH net-next v4 7/7] net: devmem: allow binding on rx queues with same DMA devices
Multi-PF netdevs have queues belonging to different PFs which also means
different DMA devices. This means that the binding on the DMA buffer can
be done to the incorrect device.
This change allows devmem binding to multiple queues only when the
queues have the same DMA device. Otherwise an error is returned.
Signed-off-by: Dragos Tatulea <dtatulea@...dia.com>
---
net/core/netdev-genl.c | 34 +++++++++++++++++++++++++++++++++-
1 file changed, 33 insertions(+), 1 deletion(-)
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 0df9c159e515..a8c27f636453 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -906,6 +906,33 @@ static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
return 0;
}
+static struct device *netdev_nl_get_dma_dev(struct net_device *netdev,
+ unsigned long *rxq_bitmap,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dma_dev = NULL;
+ u32 rxq_idx, prev_rxq_idx;
+
+ for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
+ struct device *rxq_dma_dev;
+
+ rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx);
+ /* Multi-PF netdev queues can belong to different DMA devoces.
+ * Block this case.
+ */
+ if (dma_dev && rxq_dma_dev != dma_dev) {
+ NL_SET_ERR_MSG_FMT(extack, "Queue %u has a different dma device than queue %u",
+ rxq_idx, prev_rxq_idx);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ dma_dev = rxq_dma_dev;
+ prev_rxq_idx = rxq_idx;
+ }
+
+ return dma_dev;
+}
+
int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net_devmem_dmabuf_binding *binding;
@@ -969,7 +996,12 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_rxq_bitmap;
- dma_dev = netdev_queue_get_dma_dev(netdev, 0);
+ dma_dev = netdev_nl_get_dma_dev(netdev, rxq_bitmap, info->extack);
+ if (IS_ERR(dma_dev)) {
+ err = PTR_ERR(dma_dev);
+ goto err_rxq_bitmap;
+ }
+
binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
dmabuf_fd, priv, info->extack);
if (IS_ERR(binding)) {
--
2.50.1
Powered by blists - more mailing lists