[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
<CH0PR12MB8580CC83AEEE0DD58D4B1828C9092@CH0PR12MB8580.namprd12.prod.outlook.com>
Date: Mon, 15 Apr 2024 20:24:42 +0000
From: Dan Jurgens <danielj@...dia.com>
To: Heng Qi <hengqi@...ux.alibaba.com>, "netdev@...r.kernel.org"
<netdev@...r.kernel.org>
CC: "mst@...hat.com" <mst@...hat.com>, "jasowang@...hat.com"
<jasowang@...hat.com>, "xuanzhuo@...ux.alibaba.com"
<xuanzhuo@...ux.alibaba.com>, "virtualization@...ts.linux.dev"
<virtualization@...ts.linux.dev>, "davem@...emloft.net"
<davem@...emloft.net>, "edumazet@...gle.com" <edumazet@...gle.com>,
"kuba@...nel.org" <kuba@...nel.org>, "pabeni@...hat.com" <pabeni@...hat.com>,
Jiri Pirko <jiri@...dia.com>
Subject: RE: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX
coalesce
> From: Heng Qi <hengqi@...ux.alibaba.com>
> Sent: Monday, April 15, 2024 8:42 AM
> To: Dan Jurgens <danielj@...dia.com>; netdev@...r.kernel.org
> Cc: mst@...hat.com; jasowang@...hat.com; xuanzhuo@...ux.alibaba.com;
> virtualization@...ts.linux.dev; davem@...emloft.net;
> edumazet@...gle.com; kuba@...nel.org; pabeni@...hat.com; Jiri Pirko
> <jiri@...dia.com>
> Subject: Re: [PATCH net-next v3 5/6] virtio_net: Add a lock for per queue RX
> coalesce
>
>
>
> 在 2024/4/13 上午3:53, Daniel Jurgens 写道:
> > Once the RTNL locking around the control buffer is removed there can
> > be contention on the per queue RX interrupt coalescing data. Use a
> > spin lock per queue.
> >
> > Signed-off-by: Daniel Jurgens <danielj@...dia.com>
> > ---
> > drivers/net/virtio_net.c | 23 ++++++++++++++++-------
> > 1 file changed, 16 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index
> > b3aa4d2a15e9..8724caa7c2ed 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -190,6 +190,7 @@ struct receive_queue {
> > u32 packets_in_napi;
> >
> > struct virtnet_interrupt_coalesce intr_coal;
> > + spinlock_t intr_coal_lock;
> >
> > /* Chain pages by the private ptr. */
> > struct page *pages;
> > @@ -3087,11 +3088,13 @@ static int virtnet_set_ringparam(struct
> net_device *dev,
> > return err;
> >
> > /* The reason is same as the transmit virtqueue reset
> */
> > - err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> > - vi-
> >intr_coal_rx.max_usecs,
> > - vi-
> >intr_coal_rx.max_packets);
> > - if (err)
> > - return err;
> > + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> > + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
> > + vi-
> >intr_coal_rx.max_usecs,
> > + vi-
> >intr_coal_rx.max_packets);
> > + if (err)
> > + return err;
> > + }
> > }
> > }
> >
> > @@ -3510,8 +3513,10 @@ static int
> virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
> > vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
> > vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
> > for (i = 0; i < vi->max_queue_pairs; i++) {
> > - vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
> > - vi->rq[i].intr_coal.max_packets = ec-
> >rx_max_coalesced_frames;
> > + scoped_guard(spinlock, &vi->rq[i].intr_coal_lock) {
> > + vi->rq[i].intr_coal.max_usecs = ec-
> >rx_coalesce_usecs;
> > + vi->rq[i].intr_coal.max_packets = ec-
> >rx_max_coalesced_frames;
> > + }
> > }
> >
> > return 0;
> > @@ -3542,6 +3547,7 @@ static int
> virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
> > u32 max_usecs, max_packets;
> > int err;
> >
> > + guard(spinlock)(&vi->rq[queue].intr_coal_lock);
> > max_usecs = vi->rq[queue].intr_coal.max_usecs;
> > max_packets = vi->rq[queue].intr_coal.max_packets;
> >
> > @@ -3606,6 +3612,7 @@ static void virtnet_rx_dim_work(struct
> work_struct *work)
> > if (!rq->dim_enabled)
> > goto out;
>
> We should also protect rq->dim_enabled access, incorrect values may be
> read in rx_dim_worker because it is modified in
> set_coalesce/set_per_queue_coalesce.
Good point. Thanks
>
> Thanks.
>
> >
> > + guard(spinlock)(&rq->intr_coal_lock);
> > update_moder = net_dim_get_rx_moderation(dim->mode, dim-
> >profile_ix);
> > if (update_moder.usec != rq->intr_coal.max_usecs ||
> > update_moder.pkts != rq->intr_coal.max_packets) { @@ -3756,6
> > +3763,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device
> *dev,
> > return -EINVAL;
> >
> > if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
> > + guard(spinlock)(&vi->rq[queue].intr_coal_lock);
> > ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
> > ec->tx_coalesce_usecs = vi-
> >sq[queue].intr_coal.max_usecs;
> > ec->tx_max_coalesced_frames = vi-
> >sq[queue].intr_coal.max_packets;
> > @@ -4501,6 +4509,7 @@ static int virtnet_alloc_queues(struct
> > virtnet_info *vi)
> >
> > u64_stats_init(&vi->rq[i].stats.syncp);
> > u64_stats_init(&vi->sq[i].stats.syncp);
> > + spin_lock_init(&vi->rq[i].intr_coal_lock);
> > }
> >
> > return 0;
Powered by blists - more mailing lists