[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240402123656.GA1648445@maili.marvell.com>
Date: Tue, 2 Apr 2024 18:06:56 +0530
From: Ratheesh Kannoth <rkannoth@...vell.com>
To: Heng Qi <hengqi@...ux.alibaba.com>
CC: <netdev@...r.kernel.org>, <virtualization@...ts.linux.dev>,
"David S.
Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub
Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Jason Wang
<jasowang@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Xuan Zhuo
<xuanzhuo@...ux.alibaba.com>
Subject: Re: [PATCH net-next v3 2/3] virtio-net: refactor dim
initialization/destruction
On 2024-04-02 at 17:43:07, Heng Qi (hengqi@...ux.alibaba.com) wrote:
> Extract the initialization and destruction actions
> of dim for use in the next patch.
>
> Signed-off-by: Heng Qi <hengqi@...ux.alibaba.com>
> ---
> drivers/net/virtio_net.c | 37 ++++++++++++++++++++++++++-----------
> 1 file changed, 26 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index e709d44..5c56fdc 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -2278,6 +2278,13 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
> return err;
> }
>
> +static void virtnet_dim_clean(struct virtnet_info *vi,
> + int start_qnum, int end_qnum)
> +{
> + for (; start_qnum <= end_qnum; start_qnum++)
> + cancel_work_sync(&vi->rq[start_qnum].dim.work);
> +}
> +
> static int virtnet_open(struct net_device *dev)
> {
> struct virtnet_info *vi = netdev_priv(dev);
> @@ -2301,11 +2308,9 @@ static int virtnet_open(struct net_device *dev)
> err_enable_qp:
> disable_delayed_refill(vi);
> cancel_delayed_work_sync(&vi->refill);
> -
> - for (i--; i >= 0; i--) {
> + virtnet_dim_clean(vi, 0, i);
> + for (i--; i >= 0; i--)
> virtnet_disable_queue_pair(vi, i);
Now function argument is "i", not "i - 1".
Is it intentional ? commit message did not indicate any fixes.
> - cancel_work_sync(&vi->rq[i].dim.work);
> - }
>
> return err;
> }
> @@ -2470,7 +2475,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
>
> if (running) {
> napi_disable(&rq->napi);
> - cancel_work_sync(&rq->dim.work);
> + virtnet_dim_clean(vi, qindex, qindex);
> }
>
> err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
> @@ -2720,10 +2725,9 @@ static int virtnet_close(struct net_device *dev)
> /* Make sure refill_work doesn't re-enable napi! */
> cancel_delayed_work_sync(&vi->refill);
>
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> + virtnet_dim_clean(vi, 0, vi->max_queue_pairs - 1);
> + for (i = 0; i < vi->max_queue_pairs; i++)
> virtnet_disable_queue_pair(vi, i);
> - cancel_work_sync(&vi->rq[i].dim.work);
> - }
>
> return 0;
> }
> @@ -4422,6 +4426,19 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
> return ret;
> }
>
> +static void virtnet_dim_init(struct virtnet_info *vi)
> +{
> + int i;
> +
> + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
> + return;
> +
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> + INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
> + vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
> + }
> +}
> +
> static int virtnet_alloc_queues(struct virtnet_info *vi)
> {
> int i;
> @@ -4441,6 +4458,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
> goto err_rq;
>
> INIT_DELAYED_WORK(&vi->refill, refill_work);
> + virtnet_dim_init(vi);
> for (i = 0; i < vi->max_queue_pairs; i++) {
> vi->rq[i].pages = NULL;
> netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
> @@ -4449,9 +4467,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
> virtnet_poll_tx,
> napi_tx ? napi_weight : 0);
>
> - INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
> - vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
> -
> sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
> ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
> sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
> --
> 1.8.3.1
>
Powered by blists - more mailing lists