[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250919213153.103606-11-daniel@iogearbox.net>
Date: Fri, 19 Sep 2025 23:31:43 +0200
From: Daniel Borkmann <daniel@...earbox.net>
To: netdev@...r.kernel.org
Cc: bpf@...r.kernel.org,
kuba@...nel.org,
davem@...emloft.net,
razor@...ckwall.org,
pabeni@...hat.com,
willemb@...gle.com,
sdf@...ichev.me,
john.fastabend@...il.com,
martin.lau@...nel.org,
jordan@...fe.io,
maciej.fijalkowski@...el.com,
magnus.karlsson@...el.com,
David Wei <dw@...idwei.uk>
Subject: [PATCH net-next 10/20] xsk: Move pool registration into single function
Small refactor to move the pool registration into xsk_reg_pool_at_qid,
such that the netdev and queue_id can be registered there. No change
in functionality.
Signed-off-by: Daniel Borkmann <daniel@...earbox.net>
Co-developed-by: David Wei <dw@...idwei.uk>
Signed-off-by: David Wei <dw@...idwei.uk>
---
net/xdp/xsk.c | 5 +++++
net/xdp/xsk_buff_pool.c | 16 +++-------------
2 files changed, 8 insertions(+), 13 deletions(-)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 72e34bd2d925..82ad89f6ba35 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -141,6 +141,11 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
dev->real_num_rx_queues,
dev->real_num_tx_queues))
return -EINVAL;
+ if (xsk_get_pool_from_qid(dev, queue_id))
+ return -EBUSY;
+
+ pool->netdev = dev;
+ pool->queue_id = queue_id;
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].pool = pool;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 26165baf99f4..375696f895d4 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -169,32 +169,24 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
force_zc = flags & XDP_ZEROCOPY;
force_copy = flags & XDP_COPY;
-
if (force_zc && force_copy)
return -EINVAL;
- if (xsk_get_pool_from_qid(netdev, queue_id))
- return -EBUSY;
-
- pool->netdev = netdev;
- pool->queue_id = queue_id;
err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
if (err)
return err;
if (flags & XDP_USE_SG)
pool->umem->flags |= XDP_UMEM_SG_FLAG;
-
if (flags & XDP_USE_NEED_WAKEUP)
pool->uses_need_wakeup = true;
- /* Tx needs to be explicitly woken up the first time. Also
- * for supporting drivers that do not implement this
- * feature. They will always have to call sendto() or poll().
+ /* Tx needs to be explicitly woken up the first time. Also
+ * for supporting drivers that do not implement this feature.
+ * They will always have to call sendto() or poll().
*/
pool->cached_need_wakeup = XDP_WAKEUP_TX;
dev_hold(netdev);
-
if (force_copy)
/* For copy-mode, we are done. */
return 0;
@@ -203,12 +195,10 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
err = -EOPNOTSUPP;
goto err_unreg_pool;
}
-
if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) {
err = -EOPNOTSUPP;
goto err_unreg_pool;
}
-
if (dev_get_min_mp_channel_count(netdev)) {
err = -EBUSY;
goto err_unreg_pool;
--
2.43.0
Powered by blists - more mailing lists