[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210913022257-mutt-send-email-mst@kernel.org>
Date: Mon, 13 Sep 2021 02:28:11 -0400
From: "Michael S. Tsirkin" <mst@...hat.com>
To: Jason Wang <jasowang@...hat.com>
Cc: virtualization <virtualization@...ts.linux-foundation.org>,
linux-kernel <linux-kernel@...r.kernel.org>,
"Hetzelt, Felicitas" <f.hetzelt@...berlin.de>,
"kaplan, david" <david.kaplan@....com>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
pbonzini <pbonzini@...hat.com>, Andi Kleen <ak@...ux.intel.com>,
Dan Williams <dan.j.williams@...el.com>,
"Kuppuswamy, Sathyanarayanan"
<sathyanarayanan.kuppuswamy@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Richard Henderson <rth@...ddle.net>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
James E J Bottomley <James.Bottomley@...senpartnership.com>,
Helge Deller <deller@....de>,
"David S . Miller" <davem@...emloft.net>,
Arnd Bergmann <arnd@...db.de>,
Jonathan Corbet <corbet@....net>,
Peter H Anvin <hpa@...or.com>,
Dave Hansen <dave.hansen@...el.com>,
Tony Luck <tony.luck@...el.com>,
Kirill Shutemov <kirill.shutemov@...ux.intel.com>,
Sean Christopherson <seanjc@...gle.com>,
Kuppuswamy Sathyanarayanan <knsathya@...nel.org>,
X86 ML <x86@...nel.org>
Subject: Re: [PATCH 6/9] virtio_pci: harden MSI-X interrupts
On Mon, Sep 13, 2021 at 02:08:02PM +0800, Jason Wang wrote:
> On Mon, Sep 13, 2021 at 2:04 PM Michael S. Tsirkin <mst@...hat.com> wrote:
> >
> > On Mon, Sep 13, 2021 at 01:53:50PM +0800, Jason Wang wrote:
> > > We used to synchronize pending MSI-X irq handlers via
> > > synchronize_irq(), this may not work for the untrusted device which
> > > may keep sending interrupts after reset which may lead unexpected
> > > results. Similarly, we should not enable MSI-X interrupt until the
> > > device is ready. So this patch fixes those two issues by:
> > >
> > > 1) switching to use disable_irq() to prevent the virtio interrupt
> > > handlers to be called after the device is reset.
> > > 2) using IRQF_NO_AUTOEN and enable the MSI-X irq during .ready()
> > >
> > > This can make sure the virtio interrupt handler won't be called before
> > > virtio_device_ready() and after reset.
> > >
> > > Signed-off-by: Jason Wang <jasowang@...hat.com>
> >
> > I don't get the threat model here. Isn't disabling irqs done by the
> > hypervisor anyway? Is there a reason to trust disable_irq but not
> > device reset?
>
> My understanding is that e.g in the case of SEV/TDX we don't trust the
> hypervisor. So the hypervisor can keep sending interrupts even if the
> device is reset. The guest can only trust its own software interrupt
> management logic to avoid call virtio callback in this case.
>
> Thanks
Hmm but I don't see how do these patches do this.
They call disable_irq but can't the hypervisor keep
sending interrupts after disable_irq, too?
> >
> > Cc a bunch more people ...
> >
> >
> > > ---
> > > drivers/virtio/virtio_pci_common.c | 27 +++++++++++++++++++++------
> > > drivers/virtio/virtio_pci_common.h | 6 ++++--
> > > drivers/virtio/virtio_pci_legacy.c | 5 +++--
> > > drivers/virtio/virtio_pci_modern.c | 6 ++++--
> > > 4 files changed, 32 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > > index b35bb2d57f62..0b9523e6dd39 100644
> > > --- a/drivers/virtio/virtio_pci_common.c
> > > +++ b/drivers/virtio/virtio_pci_common.c
> > > @@ -24,8 +24,8 @@ MODULE_PARM_DESC(force_legacy,
> > > "Force legacy mode for transitional virtio 1 devices");
> > > #endif
> > >
> > > -/* wait for pending irq handlers */
> > > -void vp_synchronize_vectors(struct virtio_device *vdev)
> > > +/* disable irq handlers */
> > > +void vp_disable_vectors(struct virtio_device *vdev)
> > > {
> > > struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > int i;
> > > @@ -34,7 +34,20 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
> > > synchronize_irq(vp_dev->pci_dev->irq);
> > >
> > > for (i = 0; i < vp_dev->msix_vectors; ++i)
> > > - synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
> > > + disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
> > > +}
> > > +
> > > +/* enable irq handlers */
> > > +void vp_enable_vectors(struct virtio_device *vdev)
> > > +{
> > > + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > + int i;
> > > +
> > > + if (vp_dev->intx_enabled)
> > > + return;
> > > +
> > > + for (i = 0; i < vp_dev->msix_vectors; ++i)
> > > + enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
> > > }
> > >
> > > /* the notify function used when creating a virt queue */
> > > @@ -141,7 +154,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
> > > snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
> > > "%s-config", name);
> > > err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
> > > - vp_config_changed, 0, vp_dev->msix_names[v],
> > > + vp_config_changed, IRQF_NO_AUTOEN,
> > > + vp_dev->msix_names[v],
> > > vp_dev);
> > > if (err)
> > > goto error;
> > > @@ -160,7 +174,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
> > > snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
> > > "%s-virtqueues", name);
> > > err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
> > > - vp_vring_interrupt, 0, vp_dev->msix_names[v],
> > > + vp_vring_interrupt, IRQF_NO_AUTOEN,
> > > + vp_dev->msix_names[v],
> > > vp_dev);
> > > if (err)
> > > goto error;
> > > @@ -337,7 +352,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
> > > "%s-%s",
> > > dev_name(&vp_dev->vdev.dev), names[i]);
> > > err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
> > > - vring_interrupt, 0,
> > > + vring_interrupt, IRQF_NO_AUTOEN,
> > > vp_dev->msix_names[msix_vec],
> > > vqs[i]);
> > > if (err)
> > > diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> > > index beec047a8f8d..a235ce9ff6a5 100644
> > > --- a/drivers/virtio/virtio_pci_common.h
> > > +++ b/drivers/virtio/virtio_pci_common.h
> > > @@ -102,8 +102,10 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
> > > return container_of(vdev, struct virtio_pci_device, vdev);
> > > }
> > >
> > > -/* wait for pending irq handlers */
> > > -void vp_synchronize_vectors(struct virtio_device *vdev);
> > > +/* disable irq handlers */
> > > +void vp_disable_vectors(struct virtio_device *vdev);
> > > +/* enable irq handlers */
> > > +void vp_enable_vectors(struct virtio_device *vdev);
> > > /* the notify function used when creating a virt queue */
> > > bool vp_notify(struct virtqueue *vq);
> > > /* the config->del_vqs() implementation */
> > > diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
> > > index d62e9835aeec..bdf6bc667ab5 100644
> > > --- a/drivers/virtio/virtio_pci_legacy.c
> > > +++ b/drivers/virtio/virtio_pci_legacy.c
> > > @@ -97,8 +97,8 @@ static void vp_reset(struct virtio_device *vdev)
> > > /* Flush out the status write, and flush in device writes,
> > > * including MSi-X interrupts, if any. */
> > > ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
> > > - /* Flush pending VQ/configuration callbacks. */
> > > - vp_synchronize_vectors(vdev);
> > > + /* Disable VQ/configuration callbacks. */
> > > + vp_disable_vectors(vdev);
> > > }
> > >
> > > static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> > > @@ -194,6 +194,7 @@ static void del_vq(struct virtio_pci_vq_info *info)
> > > }
> > >
> > > static const struct virtio_config_ops virtio_pci_config_ops = {
> > > + .ready = vp_enable_vectors,
> > > .get = vp_get,
> > > .set = vp_set,
> > > .get_status = vp_get_status,
> > > diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> > > index 30654d3a0b41..acf0f6b6381d 100644
> > > --- a/drivers/virtio/virtio_pci_modern.c
> > > +++ b/drivers/virtio/virtio_pci_modern.c
> > > @@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
> > > */
> > > while (vp_modern_get_status(mdev))
> > > msleep(1);
> > > - /* Flush pending VQ/configuration callbacks. */
> > > - vp_synchronize_vectors(vdev);
> > > + /* Disable VQ/configuration callbacks. */
> > > + vp_disable_vectors(vdev);
> > > }
> > >
> > > static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> > > @@ -380,6 +380,7 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
> > > }
> > >
> > > static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
> > > + .ready = vp_enable_vectors,
> > > .get = NULL,
> > > .set = NULL,
> > > .generation = vp_generation,
> > > @@ -397,6 +398,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
> > > };
> > >
> > > static const struct virtio_config_ops virtio_pci_config_ops = {
> > > + .ready = vp_enable_vectors,
> > > .get = vp_get,
> > > .set = vp_set,
> > > .generation = vp_generation,
> > > --
> > > 2.25.1
> >
Powered by blists - more mailing lists