[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1644386755.446152-1-xuanzhuo@linux.alibaba.com>
Date: Wed, 9 Feb 2022 14:05:55 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: Jason Wang <jasowang@...hat.com>
Cc: "Michael S. Tsirkin" <mst@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>, bpf@...r.kernel.org,
virtualization <virtualization@...ts.linux-foundation.org>,
netdev <netdev@...r.kernel.org>
Subject: Re: [PATCH v3 13/17] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET
On Wed, 9 Feb 2022 13:44:06 +0800, Jason Wang <jasowang@...hat.com> wrote:
> On Tue, Feb 8, 2022 at 3:44 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
> >
> > On Tue, 8 Feb 2022 10:55:37 +0800, Jason Wang <jasowang@...hat.com> wrote:
> > > On Mon, Feb 7, 2022 at 4:19 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
> > > >
> > > > On Mon, 7 Feb 2022 14:57:13 +0800, Jason Wang <jasowang@...hat.com> wrote:
> > > > >
> > > > > 在 2022/1/26 下午3:35, Xuan Zhuo 写道:
> > > > > > This patch implements virtio pci support for QUEUE RESET.
> > > > > >
> > > > > > Performing reset on a queue is divided into two steps:
> > > > > >
> > > > > > 1. reset_vq: reset one vq
> > > > > > 2. enable_reset_vq: re-enable the reset queue
> > > > > >
> > > > > > In the first step, these tasks will be completed:
> > > > > > 1. notify the hardware queue to reset
> > > > > > 2. recycle the buffer from vq
> > > > > > 3. release the ring of the vq
> > > > > >
> > > > > > The process of enable reset vq:
> > > > > > vp_modern_enable_reset_vq()
> > > > > > vp_enable_reset_vq()
> > > > > > __vp_setup_vq()
> > > > > > setup_vq()
> > > > > > vring_setup_virtqueue()
> > > > > >
> > > > > > In this process, we added two parameters, vq and num, and finally passed
> > > > > > them to vring_setup_virtqueue(). And reuse the original info and vq.
> > > > > >
> > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> > > > > > ---
> > > > > > drivers/virtio/virtio_pci_common.c | 36 +++++++----
> > > > > > drivers/virtio/virtio_pci_common.h | 5 ++
> > > > > > drivers/virtio/virtio_pci_modern.c | 100 +++++++++++++++++++++++++++++
> > > > > > 3 files changed, 128 insertions(+), 13 deletions(-)
> > > > > >
> > > > > > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > > > > > index c02936d29a31..ad21638fbf66 100644
> > > > > > --- a/drivers/virtio/virtio_pci_common.c
> > > > > > +++ b/drivers/virtio/virtio_pci_common.c
> > > > > > @@ -205,23 +205,28 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
> > > > > > return err;
> > > > > > }
> > > > > >
> > > > > > -static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
> > > > > > - void (*callback)(struct virtqueue *vq),
> > > > > > - const char *name,
> > > > > > - bool ctx,
> > > > > > - u16 msix_vec, u16 num)
> > > > > > +struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
> > > > > > + void (*callback)(struct virtqueue *vq),
> > > > > > + const char *name,
> > > > > > + bool ctx,
> > > > > > + u16 msix_vec, u16 num)
> > > > > > {
> > > > > > struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > > > > - struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
> > > > > > + struct virtio_pci_vq_info *info;
> > > > > > struct virtqueue *vq;
> > > > > > unsigned long flags;
> > > > > >
> > > > > > - /* fill out our structure that represents an active queue */
> > > > > > - if (!info)
> > > > > > - return ERR_PTR(-ENOMEM);
> > > > > > + info = vp_dev->vqs[index];
> > > > > > + if (!info) {
> > > > > > + info = kzalloc(sizeof *info, GFP_KERNEL);
> > > > > > +
> > > > > > + /* fill out our structure that represents an active queue */
> > > > > > + if (!info)
> > > > > > + return ERR_PTR(-ENOMEM);
> > > > > > + }
> > > > > >
> > > > > > vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
> > > > > > - msix_vec, NULL, num);
> > > > > > + msix_vec, info->vq, num);
> > > > > > if (IS_ERR(vq))
> > > > > > goto out_info;
> > > > > >
> > > > > > @@ -238,6 +243,9 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
> > > > > > return vq;
> > > > > >
> > > > > > out_info:
> > > > > > + if (info->vq && info->vq->reset)
> > > > > > + return vq;
> > > > > > +
> > > > > > kfree(info);
> > > > > > return vq;
> > > > > > }
> > > > > > @@ -248,9 +256,11 @@ static void vp_del_vq(struct virtqueue *vq)
> > > > > > struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> > > > > > unsigned long flags;
> > > > > >
> > > > > > - spin_lock_irqsave(&vp_dev->lock, flags);
> > > > > > - list_del(&info->node);
> > > > > > - spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > > > > + if (!vq->reset) {
> > > > > > + spin_lock_irqsave(&vp_dev->lock, flags);
> > > > > > + list_del(&info->node);
> > > > > > + spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > > > > + }
> > > > > >
> > > > > > vp_dev->del_vq(info);
> > > > > > kfree(info);
> > > > > > diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> > > > > > index 65db92245e41..c1d15f7c0be4 100644
> > > > > > --- a/drivers/virtio/virtio_pci_common.h
> > > > > > +++ b/drivers/virtio/virtio_pci_common.h
> > > > > > @@ -119,6 +119,11 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
> > > > > > struct virtqueue *vqs[], vq_callback_t *callbacks[],
> > > > > > const char * const names[], const bool *ctx,
> > > > > > struct irq_affinity *desc);
> > > > > > +struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
> > > > > > + void (*callback)(struct virtqueue *vq),
> > > > > > + const char *name,
> > > > > > + bool ctx,
> > > > > > + u16 msix_vec, u16 num);
> > > > > > const char *vp_bus_name(struct virtio_device *vdev);
> > > > > >
> > > > > > /* Setup the affinity for a virtqueue:
> > > > > > diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> > > > > > index 2ce58de549de..6789411169e4 100644
> > > > > > --- a/drivers/virtio/virtio_pci_modern.c
> > > > > > +++ b/drivers/virtio/virtio_pci_modern.c
> > > > > > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
> > > > > > if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> > > > > > pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> > > > > > __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > > > > > +
> > > > > > + if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > > > > > + __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> > > > > > }
> > > > > >
> > > > > > /* virtio config->finalize_features() implementation */
> > > > > > @@ -176,6 +179,94 @@ static void vp_reset(struct virtio_device *vdev)
> > > > > > vp_disable_cbs(vdev);
> > > > > > }
> > > > > >
> > > > > > +static int vp_modern_reset_vq(struct virtio_reset_vq *param)
> > > > > > +{
> > > > > > + struct virtio_pci_device *vp_dev = to_vp_device(param->vdev);
> > > > > > + struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > > > > > + struct virtio_pci_vq_info *info;
> > > > > > + u16 msix_vec, queue_index;
> > > > > > + unsigned long flags;
> > > > > > + void *buf;
> > > > > > +
> > > > > > + if (!virtio_has_feature(param->vdev, VIRTIO_F_RING_RESET))
> > > > > > + return -ENOENT;
> > > > > > +
> > > > > > + queue_index = param->queue_index;
> > > > > > +
> > > > > > + vp_modern_set_queue_reset(mdev, queue_index);
> > > > > > +
> > > > > > + /* After write 1 to queue reset, the driver MUST wait for a read of
> > > > > > + * queue reset to return 1.
> > > > > > + */
> > > > > > + while (vp_modern_get_queue_reset(mdev, queue_index) != 1)
> > > > > > + msleep(1);
> > > > >
> > > > >
> > > > > Is this better to move this logic into vp_modern_set_queue_reset()?
> > > > >
> > > >
> > > > OK.
> > > >
> > > > >
> > > > > > +
> > > > > > + info = vp_dev->vqs[queue_index];
> > > > > > + msix_vec = info->msix_vector;
> > > > > > +
> > > > > > + /* Disable VQ callback. */
> > > > > > + if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
> > > > > > + disable_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec));
> > > > >
> > > > >
> > > > > How about the INTX case where irq is shared? I guess we need to disable
> > > > > and enable the irq as well.
> > > >
> > > > For the INTX scenario, I don't think we need to disable/enable the irq. But I do
> > > > have a mistake, I should put the following list_del(&info->node) here, so that
> > > > when the irq comes, it will no longer operate this vq.
> > > >
> > > > In fact, for no INTX case, the disable_irq here is not necessary, according to
> > > > the requirements of the spec, after reset, the device should not generate irq
> > > > anymore.
> > >
> > > I may miss something but I don't think INTX differs from MSI from the
> > > vq handler perspective.
> > >
> > > > Here just to prevent accidents.
> > >
> > > The problem is that if you don't disable/sync IRQ there could be a
> > > race between the vq irq handler and the virtqueue_detach_unused_buf()?
> > >
> > > >
> > > > >
> > > > >
> > > > > > +
> > > > > > + while ((buf = virtqueue_detach_unused_buf(info->vq)) != NULL)
> > > > > > + param->free_unused_cb(param, buf);
> > > > >
> > > > >
> > > > > Any reason that we can't leave this logic to driver? (Or is there any
> > > > > operations that must be done before the following operations?)
> > > >
> > > > As you commented before, we merged free unused buf and reset queue.
> > > >
> > > > I think it's a good note, otherwise, we're going to
> > > >
> > > > 1. reset vq
> > > > 2. free unused(by driver)
> > > > 3. free ring of vq
> > >
> > > Rethink about this, I'd prefer to leave it to the driver for consistency.
> > >
> > > E.g the virtqueue_detach_unused_buf() is called by each driver nowdays.
> >
> > In this case, go back to my previous design and add three helpers:
> >
> > int (*reset_vq)();
> > int (*free_reset_vq)();
>
> So this is only needed if there are any transport specific operations.
> But I don't see there's any.
Yes, you are right.
Performing reset on a queue is divided into four steps
1. virtio_config_ops->reset_vq(): reset one vq
2. recycle the buffer from vq by virtqueue_detach_unused_buf()
3. release the ring of the vq by vring_release_virtqueue() (new function)
4. virtio_config_ops->enable_reset_vq(): re-enable the reset queue
Thanks.
>
> Thanks
>
> > int (*enable_reset_vq)();
> >
> > Thanks.
> >
> > >
> > > >
> > > >
> > > > >
> > > > >
> > > > > > +
> > > > > > + /* delete vq */
> > > > > > + spin_lock_irqsave(&vp_dev->lock, flags);
> > > > > > + list_del(&info->node);
> > > > > > + spin_unlock_irqrestore(&vp_dev->lock, flags);
> > > > > > +
> > > > > > + INIT_LIST_HEAD(&info->node);
> > > > > > +
> > > > > > + if (vp_dev->msix_enabled)
> > > > > > + vp_modern_queue_vector(mdev, info->vq->index,
> > > > > > + VIRTIO_MSI_NO_VECTOR);
> > > > >
> > > > >
> > > > > I wonder if this is a must.
> > > > >
> > > > >
> > > > > > +
> > > > > > + if (!mdev->notify_base)
> > > > > > + pci_iounmap(mdev->pci_dev,
> > > > > > + (void __force __iomem *)info->vq->priv);
> > > > >
> > > > >
> > > > > Is this a must? what happens if we simply don't do this?
> > > > >
> > > >
> > > > The purpose of these two operations is mainly to be symmetrical with del_vq().
> > >
> > > This is another question I want to ask. Any reason for calling
> > > del_vq()? Is it because you need to exclude a vq from the vq handler?
> > >
> > > For any case, the MSI and notification stuff seems unnecessary.
> > >
> > > Thanks
> > >
> > > >
> > > >
> > > > >
> > > > > > +
> > > > > > + vring_reset_virtqueue(info->vq);
> > > > > > +
> > > > > > + return 0;
> > > > > > +}
> > > > > > +
> > > > > > +static struct virtqueue *vp_modern_enable_reset_vq(struct virtio_reset_vq *param)
> > > > > > +{
> > > > > > + struct virtio_pci_device *vp_dev = to_vp_device(param->vdev);
> > > > > > + struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > > > > > + struct virtio_pci_vq_info *info;
> > > > > > + u16 msix_vec, queue_index;
> > > > > > + struct virtqueue *vq;
> > > > > > +
> > > > > > + if (!virtio_has_feature(param->vdev, VIRTIO_F_RING_RESET))
> > > > > > + return ERR_PTR(-ENOENT);
> > > > > > +
> > > > > > + queue_index = param->queue_index;
> > > > > > +
> > > > > > + info = vp_dev->vqs[queue_index];
> > > > > > +
> > > > > > + if (!info->vq->reset)
> > > > > > + return ERR_PTR(-EPERM);
> > > > > > +
> > > > > > + /* check queue reset status */
> > > > > > + if (vp_modern_get_queue_reset(mdev, queue_index) != 1)
> > > > > > + return ERR_PTR(-EBUSY);
> > > > > > +
> > > > > > + vq = vp_setup_vq(param->vdev, queue_index, param->callback, param->name,
> > > > > > + param->ctx, info->msix_vector, param->ring_num);
> > > > > > + if (IS_ERR(vq))
> > > > > > + return vq;
> > > > > > +
> > > > > > + vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
> > > > > > +
> > > > > > + msix_vec = vp_dev->vqs[queue_index]->msix_vector;
> > > > > > + if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
> > > > > > + enable_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec));
> > > > >
> > > > >
> > > > > How about the INT-X case?
> > > >
> > > > Explained above.
> > > >
> > > > Thanks.
> > > >
> > > > >
> > > > > Thanks
> > > > >
> > > > >
> > > > > > +
> > > > > > + return vq;
> > > > > > +}
> > > > > > +
> > > > > > static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> > > > > > {
> > > > > > return vp_modern_config_vector(&vp_dev->mdev, vector);
> > > > > > @@ -284,6 +375,11 @@ static void del_vq(struct virtio_pci_vq_info *info)
> > > > > > struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > > > > > struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > > > > >
> > > > > > + if (vq->reset) {
> > > > > > + vring_del_virtqueue(vq);
> > > > > > + return;
> > > > > > + }
> > > > > > +
> > > > > > if (vp_dev->msix_enabled)
> > > > > > vp_modern_queue_vector(mdev, vq->index,
> > > > > > VIRTIO_MSI_NO_VECTOR);
> > > > > > @@ -403,6 +499,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
> > > > > > .set_vq_affinity = vp_set_vq_affinity,
> > > > > > .get_vq_affinity = vp_get_vq_affinity,
> > > > > > .get_shm_region = vp_get_shm_region,
> > > > > > + .reset_vq = vp_modern_reset_vq,
> > > > > > + .enable_reset_vq = vp_modern_enable_reset_vq,
> > > > > > };
> > > > > >
> > > > > > static const struct virtio_config_ops virtio_pci_config_ops = {
> > > > > > @@ -421,6 +519,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
> > > > > > .set_vq_affinity = vp_set_vq_affinity,
> > > > > > .get_vq_affinity = vp_get_vq_affinity,
> > > > > > .get_shm_region = vp_get_shm_region,
> > > > > > + .reset_vq = vp_modern_reset_vq,
> > > > > > + .enable_reset_vq = vp_modern_enable_reset_vq,
> > > > > > };
> > > > > >
> > > > > > /* the PCI probing function */
> > > > >
> > > >
> > >
> >
>
Powered by blists - more mailing lists