lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEv4560JKv7UM6v6rADNMbhTv-86KJebAwYebFOkUndnPg@mail.gmail.com>
Date:   Fri, 11 Feb 2022 15:45:46 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Cc:     "Michael S. Tsirkin" <mst@...hat.com>,
        virtualization <virtualization@...ts.linux-foundation.org>,
        linux-kernel <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v4 14/14] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

On Fri, Feb 11, 2022 at 3:24 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
>
> On Fri, 11 Feb 2022 15:05:40 +0800, Jason Wang <jasowang@...hat.com> wrote:
> >
> > 在 2022/2/9 下午8:29, Xuan Zhuo 写道:
> > > This patch implements virtio pci support for QUEUE RESET.
> > >
> > > Performing reset on a queue is divided into these steps:
> > >
> > > 1. reset_vq: reset one vq
> > > 2. recycle the buffer from vq by virtqueue_detach_unused_buf()
> > > 3. release the ring of the vq by vring_release_virtqueue()
> > > 4. enable_reset_vq: re-enable the reset queue
> > >
> > > This patch implements reset_vq, enable_reset_vq in the pci scenario
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> > > ---
> > >   drivers/virtio/virtio_pci_common.c |  8 +--
> > >   drivers/virtio/virtio_pci_modern.c | 80 ++++++++++++++++++++++++++++--
> > >   drivers/virtio/virtio_ring.c       |  2 +
> > >   include/linux/virtio.h             |  1 +
> > >   4 files changed, 85 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > > index cb01eb0cb2e4..303637ac4914 100644
> > > --- a/drivers/virtio/virtio_pci_common.c
> > > +++ b/drivers/virtio/virtio_pci_common.c
> > > @@ -255,9 +255,11 @@ static void vp_del_vq(struct virtqueue *vq)
> > >     struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> > >     unsigned long flags;
> > >
> > > -   spin_lock_irqsave(&vp_dev->lock, flags);
> > > -   list_del(&info->node);
> > > -   spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > +   if (!vq->reset) {
> > > +           spin_lock_irqsave(&vp_dev->lock, flags);
> > > +           list_del(&info->node);
> > > +           spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > +   }
> > >
> > >     vp_dev->del_vq(info);
> > >     kfree(info);
> > > diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> > > index d29d40bf0b45..cc45515eda50 100644
> > > --- a/drivers/virtio/virtio_pci_modern.c
> > > +++ b/drivers/virtio/virtio_pci_modern.c
> > > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
> > >     if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> > >                     pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> > >             __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > > +
> > > +   if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > > +           __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> > >   }
> > >
> > >   /* virtio config->finalize_features() implementation */
> > > @@ -176,6 +179,70 @@ static void vp_reset(struct virtio_device *vdev)
> > >     vp_disable_cbs(vdev);
> > >   }
> > >
> > > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > > +{
> > > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > > +   struct virtio_pci_vq_info *info;
> > > +   unsigned long flags;
> > > +   u16 msix_vec;
> > > +
> > > +   if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > > +           return -ENOENT;
> > > +
> > > +   vp_modern_set_queue_reset(mdev, vq->index);
> > > +
> > > +   info = vp_dev->vqs[vq->index];
> > > +   msix_vec = info->msix_vector;
> > > +
> > > +   /* Disable VQ callback. */
> > > +   if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
> > > +           disable_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec));
> >
> >
> > I think we need a comment to explain why per_vq_mode needs to be dealt
> > with differently.
>
> OK.
>
> >
> >
> > > +
> > > +   /* delete vq */
> > > +   spin_lock_irqsave(&vp_dev->lock, flags);
> > > +   list_del(&info->node);
> > > +   spin_unlock_irqrestore(&vp_dev->lock, flags);
> >
> >
> > So I don't see where vring is freed and vp_setup_vq() may try to
> > allocate new memory, won't it be a memory leak in this case?
>
> 1. reset_vq: reset one vq
> 2. recycle the buffer from vq by virtqueue_detach_unused_buf()
> 3. release the ring of the vq by vring_release_virtqueue()
> 4. enable_reset_vq: re-enable the reset queue
>
> vring_release_virtqueue() (#8 patch) will release the vring.
> That is called by the driver.
>
> I think I should add a check to vp_modern_enable_reset_vq() that
> vring_release_virtqueue() has already been called.

I wonder if we can have a better API.

Consider we know there's a requirement of vring re-allocation. I
wonder how about adding per vq config ops like:

del_vq()
find_vq()

We can limit them only after a virtqueue is reset before it is
enabled. We can have a full allocation on the resources e.g interrupt
(if some codes could be reused).

Then a driver can do
reset_vq()
detach_unused_buf()
del_vq
find_vq() /* with new parameters like ring_num and others like find_vqs() */
enable_reset_vq()

?

Thanks

>
> Thanks
>
> >
> > Thanks
> >
> >
> > > +
> > > +   vq->reset = true;
> > > +
> > > +   INIT_LIST_HEAD(&info->node);
> > > +
> > > +   return 0;
> > > +}
> > > +
> > > +static int vp_modern_enable_reset_vq(struct virtqueue *vq, u16 ring_num)
> > > +{
> > > +   struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > > +   struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > > +   struct virtio_pci_vq_info *info;
> > > +   struct virtqueue *_vq;
> > > +   u16 msix_vec;
> > > +
> > > +   if (!vq->reset)
> > > +           return -EPERM;
> > > +
> > > +   /* check queue reset status */
> > > +   if (vp_modern_get_queue_reset(mdev, vq->index) != 1)
> > > +           return -EBUSY;
> > > +
> > > +   info = vp_dev->vqs[vq->index];
> > > +   _vq = vp_setup_vq(vq->vdev, vq->index, NULL, NULL, NULL,
> > > +                    info->msix_vector, ring_num);
> > > +   if (IS_ERR(_vq)) {
> > > +           vq->reset = true;
> > > +           return PTR_ERR(_vq);
> > > +   }
> > > +
> > > +   vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
> > > +
> > > +   msix_vec = vp_dev->vqs[vq->index]->msix_vector;
> > > +   if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
> > > +           enable_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec));
> > > +
> > > +   return 0;
> > > +}
> > > +
> > >   static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> > >   {
> > >     return vp_modern_config_vector(&vp_dev->mdev, vector);
> > > @@ -231,10 +298,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
> > >                             virtqueue_get_avail_addr(vq),
> > >                             virtqueue_get_used_addr(vq));
> > >
> > > -   vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
> > >     if (!vq->priv) {
> > > -           err = -ENOMEM;
> > > -           goto err_map_notify;
> > > +           vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index,
> > > +                                                              NULL);
> > > +           if (!vq->priv) {
> > > +                   err = -ENOMEM;
> > > +                   goto err_map_notify;
> > > +           }
> >
> >
> > This seems unrelated or an artifact of previous patches?
> >
> > Thanks
> >
> >
> > >     }
> > >
> > >     if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
> > > @@ -402,6 +472,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
> > >     .set_vq_affinity = vp_set_vq_affinity,
> > >     .get_vq_affinity = vp_get_vq_affinity,
> > >     .get_shm_region  = vp_get_shm_region,
> > > +   .reset_vq        = vp_modern_reset_vq,
> > > +   .enable_reset_vq = vp_modern_enable_reset_vq,
> > >   };
> > >
> > >   static const struct virtio_config_ops virtio_pci_config_ops = {
> > > @@ -420,6 +492,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
> > >     .set_vq_affinity = vp_set_vq_affinity,
> > >     .get_vq_affinity = vp_get_vq_affinity,
> > >     .get_shm_region  = vp_get_shm_region,
> > > +   .reset_vq        = vp_modern_reset_vq,
> > > +   .enable_reset_vq = vp_modern_enable_reset_vq,
> > >   };
> > >
> > >   /* the PCI probing function */
> > > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > > index b8747df8dc1f..4f6028e1e2d9 100644
> > > --- a/drivers/virtio/virtio_ring.c
> > > +++ b/drivers/virtio/virtio_ring.c
> > > @@ -1731,6 +1731,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
> > >     vq->vq.vdev = vdev;
> > >     vq->vq.num_free = num;
> > >     vq->vq.index = index;
> > > +   vq->vq.reset = false;
> > >     vq->we_own_ring = true;
> > >     vq->notify = notify;
> > >     vq->weak_barriers = weak_barriers;
> > > @@ -2220,6 +2221,7 @@ static int __vring_init_virtqueue(struct virtqueue *_vq,
> > >     vq->vq.vdev = vdev;
> > >     vq->vq.num_free = vring.num;
> > >     vq->vq.index = index;
> > > +   vq->vq.reset = false;
> > >     vq->we_own_ring = false;
> > >     vq->notify = notify;
> > >     vq->weak_barriers = weak_barriers;
> > > diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> > > index dd1657c3a488..5d4817d79f3f 100644
> > > --- a/include/linux/virtio.h
> > > +++ b/include/linux/virtio.h
> > > @@ -32,6 +32,7 @@ struct virtqueue {
> > >     unsigned int index;
> > >     unsigned int num_free;
> > >     void *priv;
> > > +   bool reset;
> > >   };
> > >
> > >   int virtqueue_add_outbuf(struct virtqueue *vq,
> >
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ