[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251021131209.41491-7-kerneljasonxing@gmail.com>
Date: Tue, 21 Oct 2025 21:12:06 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
joe@...a.to,
willemdebruijn.kernel@...il.com
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next v3 6/9] xsk: extend xskq_cons_read_desc_batch to count nb_pkts
From: Jason Xing <kernelxing@...cent.com>
Add a new parameter nb_pkts to count how many packets are needed
practically by copy mode with the help of XDP_PKT_CONTD option.
Add descs to provide a way to pass xs->desc_cache to store the
descriptors for copy mode.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
net/xdp/xsk.c | 3 ++-
net/xdp/xsk_queue.h | 5 +++--
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index b057d10fcf6a..d30090a8420f 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -523,7 +523,8 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_descs)
if (!nb_descs)
goto out;
- nb_descs = xskq_cons_read_desc_batch(xs->tx, pool, nb_descs);
+ nb_descs = xskq_cons_read_desc_batch(xs->tx, pool, pool->tx_descs,
+ nb_descs, NULL);
if (!nb_descs) {
xs->tx->queue_empty_descs++;
goto out;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index f16f390370dc..9caa0cfe29de 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -235,10 +235,9 @@ static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
static inline
u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
- u32 max)
+ struct xdp_desc *descs, u32 max, u32 *nb_pkts)
{
u32 cached_cons = q->cached_cons, nb_entries = 0;
- struct xdp_desc *descs = pool->tx_descs;
u32 total_descs = 0, nr_frags = 0;
/* track first entry, if stumble upon *any* invalid descriptor, rewind
@@ -258,6 +257,8 @@ u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
if (likely(!parsed.mb)) {
total_descs += (nr_frags + 1);
nr_frags = 0;
+ if (nb_pkts)
+ (*nb_pkts)++;
} else {
nr_frags++;
if (nr_frags == pool->xdp_zc_max_segs) {
--
2.41.3
Powered by blists - more mailing lists