[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210519043201-mutt-send-email-mst@kernel.org>
Date: Wed, 19 May 2021 04:35:48 -0400
From: "Michael S. Tsirkin" <mst@...hat.com>
To: Dave Taht <dave.taht@...il.com>
Cc: Xianting Tian <xianting.tian@...ux.alibaba.com>,
Jason Wang <jasowang@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
virtualization@...ts.linux-foundation.org,
Linux Kernel Network Developers <netdev@...r.kernel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: virtio_net: BQL?
On Mon, May 17, 2021 at 11:43:43AM -0700, Dave Taht wrote:
> Not really related to this patch, but is there some reason why virtio
> has no support for BQL?
So just so you can try it out, I rebased my old patch.
XDP is handled incorrectly by it so we shouldn't apply it as is,
but should be good enough for you to see whether it helps.
Completely untested!
Signed-off-by: Michael S. Tsirkin <mst@...hat.com>
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7be93ca01650..4bfb682a20b2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -556,6 +556,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
kicks = 1;
}
out:
+ /* TODO: netdev_tx_completed_queue? */
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.bytes += bytes;
sq->stats.packets += packets;
@@ -1376,7 +1377,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+static void free_old_xmit_skbs(struct netdev_queue *txq, struct send_queue *sq, bool in_napi)
{
unsigned int len;
unsigned int packets = 0;
@@ -1406,6 +1407,8 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
if (!packets)
return;
+ netdev_tx_completed_queue(txq, packets, bytes);
+
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.bytes += bytes;
sq->stats.packets += packets;
@@ -1434,7 +1437,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
if (__netif_tx_trylock(txq)) {
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit_skbs(txq, sq, true);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
@@ -1522,7 +1525,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit_skbs(txq, sq, true);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
@@ -1606,10 +1609,11 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
bool kick = !netdev_xmit_more();
bool use_napi = sq->napi.weight;
+ unsigned int bytes = skb->len;
/* Free up any pending old buffers before queueing new ones. */
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, false);
+ free_old_xmit_skbs(txq, sq, false);
if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
@@ -1638,6 +1642,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
+ netdev_tx_sent_queue(txq, bytes);
+
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
* An alternative would be to force queuing layer to requeue the skb by
@@ -1653,7 +1659,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
+ free_old_xmit_skbs(txq, sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
Powered by blists - more mailing lists