[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250116231704.2402455-18-dw@davidwei.uk>
Date: Thu, 16 Jan 2025 15:16:59 -0800
From: David Wei <dw@...idwei.uk>
To: io-uring@...r.kernel.org,
netdev@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>,
Pavel Begunkov <asml.silence@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
David Ahern <dsahern@...nel.org>,
Mina Almasry <almasrymina@...gle.com>,
Stanislav Fomichev <stfomichev@...il.com>,
Joe Damato <jdamato@...tly.com>,
Pedro Tammela <pctammela@...atatu.com>
Subject: [PATCH net-next v11 17/21] io_uring/zcrx: set pp memory provider for an rx queue
Set the page pool memory provider for the rx queue configured for zero
copy to io_uring. Then the rx queue is reset using
netdev_rx_queue_restart() and netdev core + page pool will take care of
filling the rx queue from the io_uring zero copy memory provider.
For now, there is only one ifq so its destruction happens implicitly
during io_uring cleanup.
Reviewed-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: David Wei <dw@...idwei.uk>
---
io_uring/zcrx.c | 47 +++++++++++++++++++++++++++++++++++++++--------
1 file changed, 39 insertions(+), 8 deletions(-)
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index cecfb04a117c..0cfa8c0ecff8 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -11,11 +11,12 @@
#include <net/page_pool/helpers.h>
#include <net/page_pool/memory_provider.h>
#include <net/netlink.h>
-
-#include <trace/events/page_pool.h>
+#include <net/netdev_rx_queue.h>
#include <net/tcp.h>
#include <net/rps.h>
+#include <trace/events/page_pool.h>
+
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
@@ -275,8 +276,32 @@ static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
spin_unlock(&ifq->lock);
}
+static void io_close_queue(struct io_zcrx_ifq *ifq)
+{
+ struct net_device *netdev;
+ netdevice_tracker netdev_tracker;
+ struct pp_memory_provider_params p = {
+ .mp_ops = &io_uring_pp_zc_ops,
+ .mp_priv = ifq,
+ };
+
+ if (ifq->if_rxq == -1)
+ return;
+
+ spin_lock(&ifq->lock);
+ netdev = ifq->netdev;
+ netdev_tracker = ifq->netdev_tracker;
+ ifq->netdev = NULL;
+ spin_unlock(&ifq->lock);
+
+ if (netdev)
+ net_mp_close_rxq(netdev, ifq->if_rxq, &p);
+ ifq->if_rxq = -1;
+}
+
static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
{
+ io_close_queue(ifq);
io_zcrx_drop_netdev(ifq);
if (ifq->area)
@@ -291,6 +316,7 @@ static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
struct io_uring_zcrx_ifq_reg __user *arg)
{
+ struct pp_memory_provider_params mp_param = {};
struct io_uring_zcrx_area_reg area;
struct io_uring_zcrx_ifq_reg reg;
struct io_uring_region_desc rd;
@@ -341,7 +367,6 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
goto err;
ifq->rq_entries = reg.rq_entries;
- ifq->if_rxq = reg.if_rxq;
ret = -ENODEV;
ifq->netdev = netdev_get_by_index(current->nsproxy->net_ns, reg.if_idx,
@@ -358,16 +383,20 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
if (ret)
goto err;
+ mp_param.mp_ops = &io_uring_pp_zc_ops;
+ mp_param.mp_priv = ifq;
+ ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param);
+ if (ret)
+ goto err;
+ ifq->if_rxq = reg.if_rxq;
+
reg.offsets.rqes = sizeof(struct io_uring);
reg.offsets.head = offsetof(struct io_uring, head);
reg.offsets.tail = offsetof(struct io_uring, tail);
if (copy_to_user(arg, ®, sizeof(reg)) ||
- copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd))) {
- ret = -EFAULT;
- goto err;
- }
- if (copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
+ copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
+ copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
ret = -EFAULT;
goto err;
}
@@ -444,6 +473,8 @@ void io_shutdown_zcrx_ifqs(struct io_ring_ctx *ctx)
if (ctx->ifq)
io_zcrx_scrub(ctx->ifq);
+
+ io_close_queue(ctx->ifq);
}
static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
--
2.43.5
Powered by blists - more mailing lists