[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240509044747.101237-5-hengqi@linux.alibaba.com>
Date: Thu, 9 May 2024 12:47:47 +0800
From: Heng Qi <hengqi@...ux.alibaba.com>
To: netdev@...r.kernel.org,
virtualization@...ts.linux.dev
Cc: Jakub Kicinski <kuba@...nel.org>,
"David S . Miller" <davem@...emloft.net>,
Paolo Abeni <pabeni@...hat.com>,
Eric Dumazet <edumazet@...gle.com>,
Jason Wang <jasowang@...hat.com>,
"Michael S . Tsirkin" <mst@...hat.com>,
Brett Creeley <bcreeley@....com>,
Ratheesh Kannoth <rkannoth@...vell.com>,
Alexander Lobakin <aleksander.lobakin@...el.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Tal Gilboa <talgi@...dia.com>,
Jonathan Corbet <corbet@....net>,
linux-doc@...r.kernel.org,
Maxime Chevallier <maxime.chevallier@...tlin.com>,
Jiri Pirko <jiri@...nulli.us>,
Paul Greenwalt <paul.greenwalt@...el.com>,
Ahmed Zaki <ahmed.zaki@...el.com>,
Vladimir Oltean <vladimir.oltean@....com>,
Kory Maincent <kory.maincent@...tlin.com>,
Andrew Lunn <andrew@...n.ch>,
justinstitt@...gle.com,
donald.hunter@...il.com
Subject: [PATCH net-next v13 4/4] virtio-net: support dim profile fine-tuning
Virtio-net has different types of back-end device implementations.
In order to effectively optimize the dim library's gains for different
device implementations, let's use the new interface params to
initialize and query dim results from a customized profile list.
Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
---
drivers/net/virtio_net.c | 47 ++++++++++++++++++++++++++++++++++------
1 file changed, 40 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 218a446c4c27..9e0624ae962d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2447,7 +2447,7 @@ static int virtnet_open(struct net_device *dev)
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
- cancel_work_sync(&vi->rq[i].dim.work);
+ net_dim_work_cancel(&vi->rq[i].dim);
}
return err;
@@ -2613,7 +2613,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (running) {
napi_disable(&rq->napi);
- cancel_work_sync(&rq->dim.work);
+ net_dim_work_cancel(&rq->dim);
}
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
@@ -2873,7 +2873,7 @@ static int virtnet_close(struct net_device *dev)
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
- cancel_work_sync(&vi->rq[i].dim.work);
+ net_dim_work_cancel(&vi->rq[i].dim);
}
return 0;
@@ -4403,7 +4403,7 @@ static void virtnet_rx_dim_work(struct work_struct *work)
if (!rq->dim_enabled)
goto out;
- update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ update_moder = net_dim_get_rx_irq_moder(dev, dim);
if (update_moder.usec != rq->intr_coal.max_usecs ||
update_moder.pkts != rq->intr_coal.max_packets) {
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
@@ -5101,6 +5101,36 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
}
+static int virtnet_init_irq_moder(struct virtnet_info *vi)
+{
+ u8 profile_flags = 0, coal_flags = 0;
+ int ret, i;
+
+ profile_flags |= DIM_PROFILE_RX;
+ coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
+ ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE,
+ 0, virtnet_rx_dim_work, NULL);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ net_dim_setting(vi->dev, &vi->rq[i].dim, false);
+
+ return 0;
+}
+
+static void virtnet_free_irq_moder(struct virtnet_info *vi)
+{
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return;
+
+ rtnl_lock();
+ net_dim_free_irq_moder(vi->dev);
+ rtnl_unlock();
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -5380,9 +5410,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
- INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
- vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
@@ -5803,6 +5830,10 @@ static int virtnet_probe(struct virtio_device *vdev)
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->sq[i].napi.weight)
vi->sq[i].intr_coal.max_packets = 1;
+
+ err = virtnet_init_irq_moder(vi);
+ if (err)
+ goto free;
}
#ifdef CONFIG_SYSFS
@@ -5954,6 +5985,8 @@ static void virtnet_remove(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
+ virtnet_free_irq_moder(vi);
+
unregister_netdev(vi->dev);
net_failover_destroy(vi->failover);
--
2.32.0.3.g01195cf9f
Powered by blists - more mailing lists