[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <A2975661238FB949B60364EF0F2C25743A21D6DF@SHSMSX104.ccr.corp.intel.com>
Date: Wed, 1 Apr 2020 07:49:24 +0000
From: "Liu, Yi L" <yi.l.liu@...el.com>
To: "Tian, Kevin" <kevin.tian@...el.com>,
"alex.williamson@...hat.com" <alex.williamson@...hat.com>,
"eric.auger@...hat.com" <eric.auger@...hat.com>
CC: "jacob.jun.pan@...ux.intel.com" <jacob.jun.pan@...ux.intel.com>,
"joro@...tes.org" <joro@...tes.org>,
"Raj, Ashok" <ashok.raj@...el.com>,
"Tian, Jun J" <jun.j.tian@...el.com>,
"Sun, Yi Y" <yi.y.sun@...el.com>,
"jean-philippe@...aro.org" <jean-philippe@...aro.org>,
"peterx@...hat.com" <peterx@...hat.com>,
"iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"Wu, Hao" <hao.wu@...el.com>
Subject: RE: [PATCH v1 7/8] vfio/type1: Add VFIO_IOMMU_CACHE_INVALIDATE
> From: Tian, Kevin <kevin.tian@...el.com>
> Sent: Monday, March 30, 2020 8:58 PM
> To: Liu, Yi L <yi.l.liu@...el.com>; alex.williamson@...hat.com;
> Subject: RE: [PATCH v1 7/8] vfio/type1: Add VFIO_IOMMU_CACHE_INVALIDATE
>
> > From: Liu, Yi L <yi.l.liu@...el.com>
> > Sent: Sunday, March 22, 2020 8:32 PM
> >
> > From: Liu Yi L <yi.l.liu@...ux.intel.com>
> >
> > For VFIO IOMMUs with the type VFIO_TYPE1_NESTING_IOMMU, guest "owns"
> > the
> > first-level/stage-1 translation structures, the host IOMMU driver has
> > no knowledge of first-level/stage-1 structure cache updates unless the
> > guest invalidation requests are trapped and propagated to the host.
> >
> > This patch adds a new IOCTL VFIO_IOMMU_CACHE_INVALIDATE to propagate
> > guest
> > first-level/stage-1 IOMMU cache invalidations to host to ensure IOMMU
> > cache correctness.
> >
> > With this patch, vSVA (Virtual Shared Virtual Addressing) can be used
> > safely as the host IOMMU iotlb correctness are ensured.
> >
> > Cc: Kevin Tian <kevin.tian@...el.com>
> > CC: Jacob Pan <jacob.jun.pan@...ux.intel.com>
> > Cc: Alex Williamson <alex.williamson@...hat.com>
> > Cc: Eric Auger <eric.auger@...hat.com>
> > Cc: Jean-Philippe Brucker <jean-philippe@...aro.org>
> > Signed-off-by: Liu Yi L <yi.l.liu@...ux.intel.com>
> > Signed-off-by: Eric Auger <eric.auger@...hat.com>
> > Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
> > ---
> > drivers/vfio/vfio_iommu_type1.c | 49
> > +++++++++++++++++++++++++++++++++++++++++
> > include/uapi/linux/vfio.h | 22 ++++++++++++++++++
> > 2 files changed, 71 insertions(+)
> >
> > diff --git a/drivers/vfio/vfio_iommu_type1.c
> > b/drivers/vfio/vfio_iommu_type1.c index a877747..937ec3f 100644
> > --- a/drivers/vfio/vfio_iommu_type1.c
> > +++ b/drivers/vfio/vfio_iommu_type1.c
> > @@ -2423,6 +2423,15 @@ static long
> > vfio_iommu_type1_unbind_gpasid(struct vfio_iommu *iommu,
> > return ret;
> > }
> >
> > +static int vfio_cache_inv_fn(struct device *dev, void *data)
>
> vfio_iommu_cache_inv_fn
got it.
> > +{
> > + struct domain_capsule *dc = (struct domain_capsule *)data;
> > + struct iommu_cache_invalidate_info *cache_inv_info =
> > + (struct iommu_cache_invalidate_info *) dc->data;
> > +
> > + return iommu_cache_invalidate(dc->domain, dev, cache_inv_info); }
> > +
> > static long vfio_iommu_type1_ioctl(void *iommu_data,
> > unsigned int cmd, unsigned long arg) { @@ -
> 2629,6 +2638,46 @@
> > static long vfio_iommu_type1_ioctl(void *iommu_data,
> > }
> > kfree(gbind_data);
> > return ret;
> > + } else if (cmd == VFIO_IOMMU_CACHE_INVALIDATE) {
> > + struct vfio_iommu_type1_cache_invalidate cache_inv;
> > + u32 version;
> > + int info_size;
> > + void *cache_info;
> > + int ret;
> > +
> > + minsz = offsetofend(struct
> > vfio_iommu_type1_cache_invalidate,
> > + flags);
> > +
> > + if (copy_from_user(&cache_inv, (void __user *)arg, minsz))
> > + return -EFAULT;
> > +
> > + if (cache_inv.argsz < minsz || cache_inv.flags)
> > + return -EINVAL;
> > +
> > + /* Get the version of struct iommu_cache_invalidate_info */
> > + if (copy_from_user(&version,
> > + (void __user *) (arg + minsz), sizeof(version)))
> > + return -EFAULT;
> > +
> > + info_size = iommu_uapi_get_data_size(
> > + IOMMU_UAPI_CACHE_INVAL,
> > version);
> > +
> > + cache_info = kzalloc(info_size, GFP_KERNEL);
> > + if (!cache_info)
> > + return -ENOMEM;
> > +
> > + if (copy_from_user(cache_info,
> > + (void __user *) (arg + minsz), info_size)) {
> > + kfree(cache_info);
> > + return -EFAULT;
> > + }
> > +
> > + mutex_lock(&iommu->lock);
> > + ret = vfio_iommu_for_each_dev(iommu, vfio_cache_inv_fn,
> > + cache_info);
> > + mutex_unlock(&iommu->lock);
> > + kfree(cache_info);
> > + return ret;
> > }
> >
> > return -ENOTTY;
> > diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
> > index 2235bc6..62ca791 100644
> > --- a/include/uapi/linux/vfio.h
> > +++ b/include/uapi/linux/vfio.h
> > @@ -899,6 +899,28 @@ struct vfio_iommu_type1_bind {
> > */
> > #define VFIO_IOMMU_BIND _IO(VFIO_TYPE, VFIO_BASE + 23)
> >
> > +/**
> > + * VFIO_IOMMU_CACHE_INVALIDATE - _IOW(VFIO_TYPE, VFIO_BASE + 24,
> > + * struct vfio_iommu_type1_cache_invalidate)
> > + *
> > + * Propagate guest IOMMU cache invalidation to the host. The cache
> > + * invalidation information is conveyed by @cache_info, the content
> > + * format would be structures defined in uapi/linux/iommu.h. User
> > + * should be aware of that the struct iommu_cache_invalidate_info
> > + * has a @version field, vfio needs to parse this field before
> > +getting
> > + * data from userspace.
> > + *
> > + * Availability of this IOCTL is after VFIO_SET_IOMMU.
> > + *
> > + * returns: 0 on success, -errno on failure.
> > + */
> > +struct vfio_iommu_type1_cache_invalidate {
> > + __u32 argsz;
> > + __u32 flags;
> > + struct iommu_cache_invalidate_info cache_info;
> > +};
> > +#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE +
> > 24)
> > +
> > /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU
> > -------- */
> >
> > /*
> > --
> > 2.7.4
>
> This patch looks good to me in general. But since there is still a major open about
> version compatibility, I'll hold my r-b until that open is closed. 😊
>
thanks,
Regards,
Yi Liu
Powered by blists - more mailing lists