[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEtgZ_=L2noqdADgNTr_E7s3adw=etvcFt3G7ZERQq43_g@mail.gmail.com>
Date: Wed, 31 May 2023 09:07:25 +0800
From: Jason Wang <jasowang@...hat.com>
To: "Michael S. Tsirkin" <mst@...hat.com>
Cc: xuanzhuo@...ux.alibaba.com, davem@...emloft.net, edumazet@...gle.com,
kuba@...nel.org, pabeni@...hat.com, virtualization@...ts.linux-foundation.org,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
alvaro.karsz@...id-run.com
Subject: Re: [PATCH V3 net-next 1/2] virtio-net: convert rx mode setting to
use workqueue
On Mon, May 29, 2023 at 9:21 AM Jason Wang <jasowang@...hat.com> wrote:
>
> On Sun, May 28, 2023 at 7:39 PM Michael S. Tsirkin <mst@...hat.com> wrote:
> >
> > On Fri, May 26, 2023 at 09:31:34AM +0800, Jason Wang wrote:
> > > On Thu, May 25, 2023 at 3:41 PM Michael S. Tsirkin <mst@...hat.com> wrote:
> > > >
> > > > On Thu, May 25, 2023 at 11:43:34AM +0800, Jason Wang wrote:
> > > > > On Wed, May 24, 2023 at 5:15 PM Michael S. Tsirkin <mst@...hat.com> wrote:
> > > > > >
> > > > > > On Wed, May 24, 2023 at 04:18:41PM +0800, Jason Wang wrote:
> > > > > > > This patch convert rx mode setting to be done in a workqueue, this is
> > > > > > > a must for allow to sleep when waiting for the cvq command to
> > > > > > > response since current code is executed under addr spin lock.
> > > > > > >
> > > > > > > Signed-off-by: Jason Wang <jasowang@...hat.com>
> > > > > > > ---
> > > > > > > Changes since V1:
> > > > > > > - use RTNL to synchronize rx mode worker
> > > > > > > ---
> > > > > > > drivers/net/virtio_net.c | 55 +++++++++++++++++++++++++++++++++++++---
> > > > > > > 1 file changed, 52 insertions(+), 3 deletions(-)
> > > > > > >
> > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > index 56ca1d270304..5d2f1da4eaa0 100644
> > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > @@ -265,6 +265,12 @@ struct virtnet_info {
> > > > > > > /* Work struct for config space updates */
> > > > > > > struct work_struct config_work;
> > > > > > >
> > > > > > > + /* Work struct for config rx mode */
> > > > > >
> > > > > > With a bit less abbreviation maybe? setting rx mode?
> > > > >
> > > > > That's fine.
> > > > >
> > > > > >
> > > > > > > + struct work_struct rx_mode_work;
> > > > > > > +
> > > > > > > + /* Is rx mode work enabled? */
> > > > > >
> > > > > > Ugh not a great comment.
> > > > >
> > > > > Any suggestions for this. E.g we had:
> > > > >
> > > > > /* Is delayed refill enabled? */
> > > >
> > > > /* OK to queue work setting RX mode? */
> > >
> > > Ok.
> > >
> > > >
> > > >
> > > > > >
> > > > > > > + bool rx_mode_work_enabled;
> > > > > > > +
> > > > > >
> > > > > >
> > > > > >
> > > > > > > /* Does the affinity hint is set for virtqueues? */
> > > > > > > bool affinity_hint_set;
> > > > > > >
> > > > > > > @@ -388,6 +394,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> > > > > > > spin_unlock_bh(&vi->refill_lock);
> > > > > > > }
> > > > > > >
> > > > > > > +static void enable_rx_mode_work(struct virtnet_info *vi)
> > > > > > > +{
> > > > > > > + rtnl_lock();
> > > > > > > + vi->rx_mode_work_enabled = true;
> > > > > > > + rtnl_unlock();
> > > > > > > +}
> > > > > > > +
> > > > > > > +static void disable_rx_mode_work(struct virtnet_info *vi)
> > > > > > > +{
> > > > > > > + rtnl_lock();
> > > > > > > + vi->rx_mode_work_enabled = false;
> > > > > > > + rtnl_unlock();
> > > > > > > +}
> > > > > > > +
> > > > > > > static void virtqueue_napi_schedule(struct napi_struct *napi,
> > > > > > > struct virtqueue *vq)
> > > > > > > {
> > > > > > > @@ -2341,9 +2361,11 @@ static int virtnet_close(struct net_device *dev)
> > > > > > > return 0;
> > > > > > > }
> > > > > > >
> > > > > > > -static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > +static void virtnet_rx_mode_work(struct work_struct *work)
> > > > > > > {
> > > > > > > - struct virtnet_info *vi = netdev_priv(dev);
> > > > > > > + struct virtnet_info *vi =
> > > > > > > + container_of(work, struct virtnet_info, rx_mode_work);
> > > > > > > + struct net_device *dev = vi->dev;
> > > > > > > struct scatterlist sg[2];
> > > > > > > struct virtio_net_ctrl_mac *mac_data;
> > > > > > > struct netdev_hw_addr *ha;
> > > > > > > @@ -2356,6 +2378,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
> > > > > > > return;
> > > > > > >
> > > > > > > + rtnl_lock();
> > > > > > > +
> > > > > > > vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
> > > > > > > vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
> > > > > > >
> > > > > > > @@ -2373,14 +2397,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
> > > > > > > vi->ctrl->allmulti ? "en" : "dis");
> > > > > > >
> > > > > > > + netif_addr_lock_bh(dev);
> > > > > > > +
> > > > > > > uc_count = netdev_uc_count(dev);
> > > > > > > mc_count = netdev_mc_count(dev);
> > > > > > > /* MAC filter - use one buffer for both lists */
> > > > > > > buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
> > > > > > > (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
> > > > > > > mac_data = buf;
> > > > > > > - if (!buf)
> > > > > > > + if (!buf) {
> > > > > > > + netif_addr_unlock_bh(dev);
> > > > > > > + rtnl_unlock();
> > > > > > > return;
> > > > > > > + }
> > > > > > >
> > > > > > > sg_init_table(sg, 2);
> > > > > > >
> > > > > > > @@ -2401,6 +2430,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > netdev_for_each_mc_addr(ha, dev)
> > > > > > > memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
> > > > > > >
> > > > > > > + netif_addr_unlock_bh(dev);
> > > > > > > +
> > > > > > > sg_set_buf(&sg[1], mac_data,
> > > > > > > sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
> > > > > > >
> > > > > > > @@ -2408,9 +2439,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
> > > > > > > dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
> > > > > > >
> > > > > > > + rtnl_unlock();
> > > > > > > +
> > > > > > > kfree(buf);
> > > > > > > }
> > > > > > >
> > > > > > > +static void virtnet_set_rx_mode(struct net_device *dev)
> > > > > > > +{
> > > > > > > + struct virtnet_info *vi = netdev_priv(dev);
> > > > > > > +
> > > > > > > + if (vi->rx_mode_work_enabled)
> > > > > > > + schedule_work(&vi->rx_mode_work);
> > > > > > > +}
> > > > > > > +
> > > > > >
> > > > > > > static int virtnet_vlan_rx_add_vid(struct net_device *dev,
> > > > > > > __be16 proto, u16 vid)
> > > > > > > {
> > > > > > > @@ -3181,6 +3222,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
> > > > > > >
> > > > > > > /* Make sure no work handler is accessing the device */
> > > > > > > flush_work(&vi->config_work);
> > > > > > > + disable_rx_mode_work(vi);
> > > > > > > + flush_work(&vi->rx_mode_work);
> > > > > > >
> > > > > > > netif_tx_lock_bh(vi->dev);
> > > > > > > netif_device_detach(vi->dev);
> > > > > >
> > > > > > Hmm so queued rx mode work will just get skipped
> > > > > > and on restore we get a wrong rx mode.
> > > > > > Any way to make this more robust?
> > > > >
> > > > > It could be done by scheduling a work on restore.
> > >
> > > Rethink this, I think we don't need to care about this case since the
> > > user processes should have been frozened.
> >
> > Yes but not the workqueue. Want to switch to system_freezable_wq?
>
> Yes, I will do it in v2.
Actually, this doesn't work. Freezable workqueue can only guarantee
when being freezed the new work will be queued and not scheduled until
thaw. So the ktrhead that is executing the workqueue is not freezable.
The busy loop (even with cond_resched()) will force suspend in this
case.
I wonder if we should switch to using a dedicated kthread for
virtio-net then we can allow it to be frozen.
Thanks
>
> Thanks
>
> >
> > > And that the reason we don't
> > > even need to hold RTNL here.
> > >
> > > Thanks
> > >
> > > > >
> > > > > Thanks
> > > >
> > > >
> > > > > >
> > > > > >
> > > > > > > @@ -3203,6 +3246,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
> > > > > > > virtio_device_ready(vdev);
> > > > > > >
> > > > > > > enable_delayed_refill(vi);
> > > > > > > + enable_rx_mode_work(vi);
> > > > > > >
> > > > > > > if (netif_running(vi->dev)) {
> > > > > > > err = virtnet_open(vi->dev);
> > > > > > > @@ -4002,6 +4046,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> > > > > > > vdev->priv = vi;
> > > > > > >
> > > > > > > INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> > > > > > > + INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
> > > > > > > spin_lock_init(&vi->refill_lock);
> > > > > > >
> > > > > > > if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
> > > > > > > @@ -4110,6 +4155,8 @@ static int virtnet_probe(struct virtio_device *vdev)
> > > > > > > if (vi->has_rss || vi->has_rss_hash_report)
> > > > > > > virtnet_init_default_rss(vi);
> > > > > > >
> > > > > > > + enable_rx_mode_work(vi);
> > > > > > > +
> > > > > > > /* serialize netdev register + virtio_device_ready() with ndo_open() */
> > > > > > > rtnl_lock();
> > > > > > >
> > > > > > > @@ -4207,6 +4254,8 @@ static void virtnet_remove(struct virtio_device *vdev)
> > > > > > >
> > > > > > > /* Make sure no work handler is accessing the device. */
> > > > > > > flush_work(&vi->config_work);
> > > > > > > + disable_rx_mode_work(vi);
> > > > > > > + flush_work(&vi->rx_mode_work);
> > > > > > >
> > > > > > > unregister_netdev(vi->dev);
> > > > > > >
> > > > > > > --
> > > > > > > 2.25.1
> > > > > >
> > > >
> >
Powered by blists - more mailing lists