[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250815110401.2254214-9-dtatulea@nvidia.com>
Date: Fri, 15 Aug 2025 14:03:48 +0300
From: Dragos Tatulea <dtatulea@...dia.com>
To: <almasrymina@...gle.com>, <asml.silence@...il.com>, "David S. Miller"
<davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski
<kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, Simon Horman
<horms@...nel.org>
CC: Dragos Tatulea <dtatulea@...dia.com>, <cratiu@...dia.com>,
<tariqt@...dia.com>, <parav@...dia.com>, Christoph Hellwig
<hch@...radead.org>, <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: [RFC net-next v3 7/7] net: devmem: allow binding on rx queues with same MA devices
Multi-PF netdevs have queues belonging to different PFs which also means
different DMA devices. This means that the binding on the DMA buffer can
be done to the incorrect device.
This change allows devmem binding to multiple queues only when the
queues have the same DMA device. Otherwise an error is returned.
Signed-off-by: Dragos Tatulea <dtatulea@...dia.com>
---
net/core/netdev-genl.c | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 3e990f100bf0..649b62803529 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -903,6 +903,31 @@ static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
return 0;
}
+static struct device *netdev_nl_get_dma_dev(struct net_device *netdev,
+ unsigned long *rxq_bitmap,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dma_dev = NULL;
+ u32 rxq_idx;
+
+ for_each_set_bit(rxq_idx, rxq_bitmap, netdev->num_rx_queues) {
+ struct device *rxq_dma_dev;
+
+ rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx);
+ /* Multi-PF netdev queues can belong to different DMA devoces.
+ * Block this case.
+ */
+ if (rxq_dma_dev && dma_dev && rxq_dma_dev != dma_dev) {
+ NL_SET_ERR_MSG(extack, "Can't bind to queues from different dma devices");
+ return NULL;
+ }
+
+ dma_dev = rxq_dma_dev;
+ }
+
+ return dma_dev;
+}
+
int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net_devmem_dmabuf_binding *binding;
@@ -962,7 +987,7 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
}
netdev_nl_read_rxq_bitmap(info, rxq_bitmap);
- dma_dev = netdev_queue_get_dma_dev(netdev, 0);
+ dma_dev = netdev_nl_get_dma_dev(netdev, rxq_bitmap, info->extack);
binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
dmabuf_fd, priv, info->extack);
if (IS_ERR(binding)) {
--
2.50.1
Powered by blists - more mailing lists