[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240820073330.9161-12-xuanzhuo@linux.alibaba.com>
Date: Tue, 20 Aug 2024 15:33:28 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: netdev@...r.kernel.org
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
Jason Wang <jasowang@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Eugenio PĂ©rez <eperezma@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
virtualization@...ts.linux.dev,
bpf@...r.kernel.org
Subject: [PATCH net-next 11/13] virtio_net: xsk: tx: handle the transmitted xsk buffer
virtnet_free_old_xmit distinguishes three type ptr(skb, xdp frame, xsk
buffer) by the last bits of the pointer.
Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
---
drivers/net/virtio_net.c | 85 ++++++++++++++++++++++++++++------------
1 file changed, 59 insertions(+), 26 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3743694d3c3b..a898568bed5c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -83,6 +83,7 @@ struct virtnet_sq_free_stats {
u64 bytes;
u64 napi_packets;
u64 napi_bytes;
+ u64 xsk;
};
struct virtnet_sq_stats {
@@ -511,6 +512,7 @@ static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
struct sk_buff *curr_skb,
struct page *page, void *buf,
int len, int truesize);
+static void virtnet_xsk_completed(struct send_queue *sq, int num);
enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_SKB,
@@ -595,12 +597,24 @@ static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
case VIRTNET_XMIT_TYPE_XSK:
stats->bytes += virtnet_ptr_to_xsk(ptr);
+ stats->xsk++;
break;
}
}
netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
}
+static void virtnet_free_old_xmit(struct send_queue *sq,
+ struct netdev_queue *txq,
+ bool in_napi,
+ struct virtnet_sq_free_stats *stats)
+{
+ __free_old_xmit(sq, txq, in_napi, stats);
+
+ if (stats->xsk)
+ virtnet_xsk_completed(sq, stats->xsk);
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -1021,7 +1035,7 @@ static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
{
struct virtnet_sq_free_stats stats = {0};
- __free_old_xmit(sq, txq, in_napi, &stats);
+ virtnet_free_old_xmit(sq, txq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
@@ -1382,29 +1396,6 @@ static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue
return err;
}
-static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct send_queue *sq;
-
- if (!netif_running(dev))
- return -ENETDOWN;
-
- if (qid >= vi->curr_queue_pairs)
- return -EINVAL;
-
- sq = &vi->sq[qid];
-
- if (napi_if_scheduled_mark_missed(&sq->napi))
- return 0;
-
- local_bh_disable();
- virtqueue_napi_schedule(&sq->napi, sq->vq);
- local_bh_enable();
-
- return 0;
-}
-
static void *virtnet_xsk_to_ptr(u32 len)
{
unsigned long p;
@@ -1476,8 +1467,12 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
u64 kicks = 0;
int sent;
+ /* Avoid to wakeup napi meanless, so call __free_old_xmit. */
__free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
+ if (stats.xsk)
+ xsk_tx_completed(sq->xsk_pool, stats.xsk);
+
sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
@@ -1496,6 +1491,44 @@ static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
return sent == budget;
}
+static void xsk_wakeup(struct send_queue *sq)
+{
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return;
+
+ local_bh_disable();
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ local_bh_enable();
+}
+
+static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+
+ xsk_wakeup(sq);
+ return 0;
+}
+
+static void virtnet_xsk_completed(struct send_queue *sq, int num)
+{
+ xsk_tx_completed(sq->xsk_pool, num);
+
+ /* If this is called by rx poll, start_xmit and xdp xmit we should
+ * wakeup the tx napi to consume the xsk tx queue, because the tx
+ * interrupt may not be triggered.
+ */
+ xsk_wakeup(sq);
+}
+
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
@@ -1609,8 +1642,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
- false, &stats);
+ virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
+ false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
--
2.32.0.3.g01195cf9f
Powered by blists - more mailing lists