[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251021131209.41491-5-kerneljasonxing@gmail.com>
Date: Tue, 21 Oct 2025 21:12:04 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
joe@...a.to,
willemdebruijn.kernel@...il.com
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next v3 4/9] xsk: add direct xmit in batch function
From: Jason Xing <kernelxing@...cent.com>
Add batch xmit logic.
Only grabbing the lock and disable bottom half once and sent all
the aggregated packets in one loop. Via skb->list, the already built
skbs can be handled one by one.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
include/net/xdp_sock.h | 1 +
net/core/dev.c | 22 ++++++++++++++++++++++
2 files changed, 23 insertions(+)
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index cb5aa8a314fe..5cdb8290f752 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -133,6 +133,7 @@ struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
struct sk_buff *allocated_skb,
struct xdp_desc *desc);
int xsk_alloc_batch_skb(struct xdp_sock *xs, u32 nb_pkts, u32 nb_descs, int *err);
+int xsk_direct_xmit_batch(struct xdp_sock *xs, struct net_device *dev);
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
diff --git a/net/core/dev.c b/net/core/dev.c
index a64cef2c537e..32de76c79d29 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -163,6 +163,7 @@
#include <net/page_pool/memory_provider.h>
#include <net/rps.h>
#include <linux/phy_link_topology.h>
+#include <net/xdp_sock.h>
#include "dev.h"
#include "devmem.h"
@@ -4792,6 +4793,27 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
}
EXPORT_SYMBOL(__dev_queue_xmit);
+int xsk_direct_xmit_batch(struct xdp_sock *xs, struct net_device *dev)
+{
+ u16 queue_id = xs->queue_id;
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_id);
+ int ret = NETDEV_TX_BUSY;
+ struct sk_buff *skb;
+
+ local_bh_disable();
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ while ((skb = __skb_dequeue(&xs->batch.send_queue)) != NULL) {
+ skb_set_queue_mapping(skb, queue_id);
+ ret = netdev_start_xmit(skb, dev, txq, false);
+ if (ret != NETDEV_TX_OK)
+ break;
+ }
+ HARD_TX_UNLOCK(dev, txq);
+ local_bh_enable();
+
+ return ret;
+}
+
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
struct net_device *dev = skb->dev;
--
2.41.3
Powered by blists - more mailing lists