[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f6d352a8eb9f0297196fdaf0eccc6d9e2a44a357.1753694914.git.asml.silence@gmail.com>
Date: Mon, 28 Jul 2025 12:04:26 +0100
From: Pavel Begunkov <asml.silence@...il.com>
To: Jakub Kicinski <kuba@...nel.org>,
netdev@...r.kernel.org
Cc: asml.silence@...il.com,
io-uring@...r.kernel.org,
Eric Dumazet <edumazet@...gle.com>,
Willem de Bruijn <willemb@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
andrew+netdev@...n.ch,
horms@...nel.org,
davem@...emloft.net,
sdf@...ichev.me,
almasrymina@...gle.com,
dw@...idwei.uk,
michael.chan@...adcom.com,
dtatulea@...dia.com,
ap420073@...il.com
Subject: [RFC v1 22/22] io_uring/zcrx: implement large rx buffer support
There are network cards that support receive buffers larger than 4K, and
that can be vastly beneficial for performance, and benchmarks for this
patch showed up to 30% CPU util improvement for 32K vs 4K buffers.
Allows zcrx users to specify the size in struct
io_uring_zcrx_ifq_reg::rx_buf_len. If set to zero, zcrx will use a
default value. zcrx will check and fail if the memory backing the area
can't be split into physically contiguous chunks of the required size.
It's more restrictive as it only needs dma addresses to be contig, but
that's beyond this series.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
include/uapi/linux/io_uring.h | 2 +-
io_uring/zcrx.c | 39 +++++++++++++++++++++++++++++------
2 files changed, 34 insertions(+), 7 deletions(-)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 9d306eb5251c..8e3a342a4ad8 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -1041,7 +1041,7 @@ struct io_uring_zcrx_ifq_reg {
struct io_uring_zcrx_offsets offsets;
__u32 zcrx_id;
- __u32 __resv2;
+ __u32 rx_buf_len;
__u64 __resv[3];
};
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index a00243e10164..3caa3f472af1 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -13,6 +13,7 @@
#include <net/page_pool/memory_provider.h>
#include <net/netlink.h>
#include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
#include <net/tcp.h>
#include <net/rps.h>
@@ -53,6 +54,18 @@ static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
}
+static int io_area_max_shift(struct io_zcrx_mem *mem)
+{
+ struct sg_table *sgt = mem->sgt;
+ struct scatterlist *sg;
+ unsigned order = -1U;
+ unsigned i;
+
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ order = min(order, __ffs(sg->length));
+ return order;
+}
+
static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
struct io_zcrx_area *area)
{
@@ -384,8 +397,10 @@ static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
}
static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
- struct io_uring_zcrx_area_reg *area_reg)
+ struct io_uring_zcrx_area_reg *area_reg,
+ struct io_uring_zcrx_ifq_reg *reg)
{
+ int buf_size_shift = PAGE_SHIFT;
struct io_zcrx_area *area;
unsigned nr_iovs;
int i, ret;
@@ -400,7 +415,16 @@ static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
if (ret)
goto err;
- ifq->niov_shift = PAGE_SHIFT;
+ if (reg->rx_buf_len) {
+ if (!is_power_of_2(reg->rx_buf_len) ||
+ reg->rx_buf_len < PAGE_SIZE)
+ return -EINVAL;
+ buf_size_shift = ilog2(reg->rx_buf_len);
+ }
+ if (buf_size_shift > io_area_max_shift(&area->mem))
+ return -EINVAL;
+
+ ifq->niov_shift = buf_size_shift;
nr_iovs = area->mem.size >> ifq->niov_shift;
area->nia.num_niovs = nr_iovs;
@@ -522,6 +546,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
struct io_uring_zcrx_ifq_reg __user *arg)
{
struct pp_memory_provider_params mp_param = {};
+ struct netdev_queue_config qcfg = {};
struct io_uring_zcrx_area_reg area;
struct io_uring_zcrx_ifq_reg reg;
struct io_uring_region_desc rd;
@@ -544,8 +569,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
return -EFAULT;
if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
return -EFAULT;
- if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) ||
- reg.__resv2 || reg.zcrx_id)
+ if (!mem_is_zero(®.__resv, sizeof(reg.__resv)) || reg.zcrx_id)
return -EINVAL;
if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
return -EINVAL;
@@ -589,13 +613,14 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
}
get_device(ifq->dev);
- ret = io_zcrx_create_area(ifq, &area);
+ ret = io_zcrx_create_area(ifq, &area, ®);
if (ret)
goto err;
mp_param.mp_ops = &io_uring_pp_zc_ops;
mp_param.mp_priv = ifq;
- ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
+ qcfg.rx_buf_len = 1U << ifq->niov_shift;
+ ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, &qcfg);
if (ret)
goto err;
ifq->if_rxq = reg.if_rxq;
@@ -612,6 +637,8 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
goto err;
}
+ reg.rx_buf_len = 1U << ifq->niov_shift;
+
if (copy_to_user(arg, ®, sizeof(reg)) ||
copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
--
2.49.0
Powered by blists - more mailing lists