[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231107214045.2172393-10-dw@davidwei.uk>
Date: Tue, 7 Nov 2023 13:40:34 -0800
From: David Wei <dw@...idwei.uk>
To: io-uring@...r.kernel.org,
netdev@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>,
Pavel Begunkov <asml.silence@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
David Ahern <dsahern@...nel.org>,
Mina Almasry <almasrymina@...gle.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Dragos Tatulea <dtatulea@...dia.com>
Subject: [PATCH 09/20] io_uring: allocate a uarg for freeing zero copy skbs
As ZC skbs are marked as zero copy, they will bypass the default skb
frag destructor. This patch adds a static uarg that is attached to ZC
bufs and a callback that returns them to the freelist of a ZC pool.
Co-developed-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: David Wei <dw@...idwei.uk>
---
include/linux/io_uring.h | 7 +++++++
include/linux/netdevice.h | 1 +
io_uring/zc_rx.c | 44 +++++++++++++++++++++++++++++++++++++++
io_uring/zc_rx.h | 1 +
4 files changed, 53 insertions(+)
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 624515a8bdd5..fb88e000c156 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -72,6 +72,8 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
struct io_zc_rx_ifq;
struct io_zc_rx_buf *io_zc_rx_get_buf(struct io_zc_rx_ifq *ifq);
+struct io_zc_rx_buf *io_zc_rx_buf_from_page(struct io_zc_rx_ifq *ifq,
+ struct page *page);
void io_zc_rx_put_buf(struct io_zc_rx_ifq *ifq, struct io_zc_rx_buf *buf);
static inline dma_addr_t io_zc_rx_buf_dma(struct io_zc_rx_buf *buf)
@@ -122,6 +124,11 @@ static inline struct io_zc_rx_buf *io_zc_rx_get_buf(struct io_zc_rx_ifq *ifq)
{
return NULL;
}
+static inline struct io_zc_rx_buf *io_zc_rx_buf_from_page(struct io_zc_rx_ifq *ifq,
+ struct page *page)
+{
+ return NULL;
+}
static inline void io_zc_rx_put_buf(struct io_zc_rx_ifq *ifq, struct io_zc_rx_buf *buf)
{
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f9c82c89a96b..ec82fc984941 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1027,6 +1027,7 @@ struct netdev_bpf {
struct {
struct io_zc_rx_ifq *ifq;
u16 queue_id;
+ struct ubuf_info *uarg;
} zc_rx;
};
};
diff --git a/io_uring/zc_rx.c b/io_uring/zc_rx.c
index 840a21549d89..59f279486e9a 100644
--- a/io_uring/zc_rx.c
+++ b/io_uring/zc_rx.c
@@ -46,6 +46,11 @@ static inline u64 mk_page_info(u16 pool_id, u32 pgid)
return (u64)0xface << 48 | (u64)pool_id << 32 | (u64)pgid;
}
+static inline bool is_zc_rx_page(struct page *page)
+{
+ return PagePrivate(page) && ((page_private(page) >> 48) == 0xface);
+}
+
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
static int __io_queue_mgmt(struct net_device *dev, struct io_zc_rx_ifq *ifq,
@@ -61,6 +66,7 @@ static int __io_queue_mgmt(struct net_device *dev, struct io_zc_rx_ifq *ifq,
cmd.command = XDP_SETUP_ZC_RX;
cmd.zc_rx.ifq = ifq;
cmd.zc_rx.queue_id = queue_id;
+ cmd.zc_rx.uarg = ifq ? &ifq->uarg : 0;
return ndo_bpf(dev, &cmd);
}
@@ -75,6 +81,26 @@ static int io_close_zc_rxq(struct io_zc_rx_ifq *ifq)
return __io_queue_mgmt(ifq->dev, NULL, ifq->if_rxq_id);
}
+static void io_zc_rx_skb_free(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct io_zc_rx_ifq *ifq;
+ struct io_zc_rx_buf *buf;
+ struct page *page;
+ int i;
+
+ ifq = container_of(uarg, struct io_zc_rx_ifq, uarg);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ page = skb_frag_page(&shinfo->frags[i]);
+ buf = io_zc_rx_buf_from_page(ifq, page);
+ if (likely(buf))
+ io_zc_rx_put_buf(ifq, buf);
+ else
+ __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
+ }
+}
+
static int io_zc_rx_map_buf(struct device *dev, struct page *page, u16 pool_id,
u32 pgid, struct io_zc_rx_buf *buf)
{
@@ -270,6 +296,8 @@ int io_register_zc_rx_ifq(struct io_ring_ctx *ctx,
if (ret)
goto err;
+ ifq->uarg.callback = io_zc_rx_skb_free;
+ ifq->uarg.flags = SKBFL_ALL_ZEROCOPY | SKBFL_FIXED_FRAG;
ifq->rq_entries = reg.rq_entries;
ifq->cq_entries = reg.cq_entries;
ifq->cached_rq_head = 0;
@@ -466,4 +494,20 @@ void io_zc_rx_put_buf(struct io_zc_rx_ifq *ifq, struct io_zc_rx_buf *buf)
}
EXPORT_SYMBOL(io_zc_rx_put_buf);
+struct io_zc_rx_buf *io_zc_rx_buf_from_page(struct io_zc_rx_ifq *ifq,
+ struct page *page)
+{
+ struct io_zc_rx_pool *pool;
+ int pgid;
+
+ if (!is_zc_rx_page(page))
+ return NULL;
+
+ pool = ifq->pool;
+ pgid = page_private(page) & 0xffffffff;
+
+ return &pool->bufs[pgid];
+}
+EXPORT_SYMBOL(io_zc_rx_buf_from_page);
+
#endif
diff --git a/io_uring/zc_rx.h b/io_uring/zc_rx.h
index a3df820e52e7..b99be0227e9e 100644
--- a/io_uring/zc_rx.h
+++ b/io_uring/zc_rx.h
@@ -15,6 +15,7 @@ struct io_zc_rx_ifq {
struct io_rbuf_ring *ring;
struct io_uring_rbuf_rqe *rqes;
struct io_uring_rbuf_cqe *cqes;
+ struct ubuf_info uarg;
u32 rq_entries, cq_entries;
u32 cached_rq_head;
u32 cached_cq_tail;
--
2.39.3
Powered by blists - more mailing lists