[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <A2975661238FB949B60364EF0F2C25743A220BA4@SHSMSX104.ccr.corp.intel.com>
Date: Fri, 3 Apr 2020 13:30:49 +0000
From: "Liu, Yi L" <yi.l.liu@...el.com>
To: Alex Williamson <alex.williamson@...hat.com>
CC: "eric.auger@...hat.com" <eric.auger@...hat.com>,
"Tian, Kevin" <kevin.tian@...el.com>,
"jacob.jun.pan@...ux.intel.com" <jacob.jun.pan@...ux.intel.com>,
"joro@...tes.org" <joro@...tes.org>,
"Raj, Ashok" <ashok.raj@...el.com>,
"Tian, Jun J" <jun.j.tian@...el.com>,
"Sun, Yi Y" <yi.y.sun@...el.com>,
"jean-philippe@...aro.org" <jean-philippe@...aro.org>,
"peterx@...hat.com" <peterx@...hat.com>,
"iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"Wu, Hao" <hao.wu@...el.com>
Subject: RE: [PATCH v1 6/8] vfio/type1: Bind guest page tables to host
Hi Alex,
> From: Alex Williamson <alex.williamson@...hat.com>
> Sent: Friday, April 3, 2020 3:57 AM
> To: Liu, Yi L <yi.l.liu@...el.com>
>
> On Sun, 22 Mar 2020 05:32:03 -0700
> "Liu, Yi L" <yi.l.liu@...el.com> wrote:
>
> > From: Liu Yi L <yi.l.liu@...el.com>
> >
> > VFIO_TYPE1_NESTING_IOMMU is an IOMMU type which is backed by hardware
> > IOMMUs that have nesting DMA translation (a.k.a dual stage address
> > translation). For such hardware IOMMUs, there are two stages/levels of
> > address translation, and software may let userspace/VM to own the first-
> > level/stage-1 translation structures. Example of such usage is vSVA (
> > virtual Shared Virtual Addressing). VM owns the first-level/stage-1
> > translation structures and bind the structures to host, then hardware
> > IOMMU would utilize nesting translation when doing DMA translation fo
> > the devices behind such hardware IOMMU.
> >
> > This patch adds vfio support for binding guest translation (a.k.a stage 1)
> > structure to host iommu. And for VFIO_TYPE1_NESTING_IOMMU, not only bind
> > guest page table is needed, it also requires to expose interface to guest
> > for iommu cache invalidation when guest modified the first-level/stage-1
> > translation structures since hardware needs to be notified to flush stale
> > iotlbs. This would be introduced in next patch.
> >
> > In this patch, guest page table bind and unbind are done by using flags
> > VFIO_IOMMU_BIND_GUEST_PGTBL and VFIO_IOMMU_UNBIND_GUEST_PGTBL
> under IOCTL
> > VFIO_IOMMU_BIND, the bind/unbind data are conveyed by
> > struct iommu_gpasid_bind_data. Before binding guest page table to host,
> > VM should have got a PASID allocated by host via VFIO_IOMMU_PASID_REQUEST.
> >
> > Bind guest translation structures (here is guest page table) to host
> > are the first step to setup vSVA (Virtual Shared Virtual Addressing).
> >
> > Cc: Kevin Tian <kevin.tian@...el.com>
> > CC: Jacob Pan <jacob.jun.pan@...ux.intel.com>
> > Cc: Alex Williamson <alex.williamson@...hat.com>
> > Cc: Eric Auger <eric.auger@...hat.com>
> > Cc: Jean-Philippe Brucker <jean-philippe@...aro.org>
> > Signed-off-by: Jean-Philippe Brucker <jean-philippe@...aro.com>
> > Signed-off-by: Liu Yi L <yi.l.liu@...el.com>
> > Signed-off-by: Jacob Pan <jacob.jun.pan@...ux.intel.com>
> > ---
> > drivers/vfio/vfio_iommu_type1.c | 158
> ++++++++++++++++++++++++++++++++++++++++
> > include/uapi/linux/vfio.h | 46 ++++++++++++
> > 2 files changed, 204 insertions(+)
> >
> > diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> > index 82a9e0b..a877747 100644
> > --- a/drivers/vfio/vfio_iommu_type1.c
> > +++ b/drivers/vfio/vfio_iommu_type1.c
> > @@ -130,6 +130,33 @@ struct vfio_regions {
> > #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
> > (!list_empty(&iommu->domain_list))
> >
> > +struct domain_capsule {
> > + struct iommu_domain *domain;
> > + void *data;
> > +};
> > +
> > +/* iommu->lock must be held */
> > +static int vfio_iommu_for_each_dev(struct vfio_iommu *iommu,
> > + int (*fn)(struct device *dev, void *data),
> > + void *data)
> > +{
> > + struct domain_capsule dc = {.data = data};
> > + struct vfio_domain *d;
> > + struct vfio_group *g;
> > + int ret = 0;
> > +
> > + list_for_each_entry(d, &iommu->domain_list, next) {
> > + dc.domain = d->domain;
> > + list_for_each_entry(g, &d->group_list, next) {
> > + ret = iommu_group_for_each_dev(g->iommu_group,
> > + &dc, fn);
> > + if (ret)
> > + break;
> > + }
> > + }
> > + return ret;
> > +}
> > +
> > static int put_pfn(unsigned long pfn, int prot);
> >
> > /*
> > @@ -2314,6 +2341,88 @@ static int vfio_iommu_info_add_nesting_cap(struct
> vfio_iommu *iommu,
> > return 0;
> > }
> >
> > +static int vfio_bind_gpasid_fn(struct device *dev, void *data)
> > +{
> > + struct domain_capsule *dc = (struct domain_capsule *)data;
> > + struct iommu_gpasid_bind_data *gbind_data =
> > + (struct iommu_gpasid_bind_data *) dc->data;
> > +
> > + return iommu_sva_bind_gpasid(dc->domain, dev, gbind_data);
> > +}
> > +
> > +static int vfio_unbind_gpasid_fn(struct device *dev, void *data)
> > +{
> > + struct domain_capsule *dc = (struct domain_capsule *)data;
> > + struct iommu_gpasid_bind_data *gbind_data =
> > + (struct iommu_gpasid_bind_data *) dc->data;
> > +
> > + return iommu_sva_unbind_gpasid(dc->domain, dev,
> > + gbind_data->hpasid);
> > +}
> > +
> > +/**
> > + * Unbind specific gpasid, caller of this function requires hold
> > + * vfio_iommu->lock
> > + */
> > +static long vfio_iommu_type1_do_guest_unbind(struct vfio_iommu *iommu,
> > + struct iommu_gpasid_bind_data *gbind_data)
> > +{
> > + return vfio_iommu_for_each_dev(iommu,
> > + vfio_unbind_gpasid_fn, gbind_data);
> > +}
> > +
> > +static long vfio_iommu_type1_bind_gpasid(struct vfio_iommu *iommu,
> > + struct iommu_gpasid_bind_data *gbind_data)
> > +{
> > + int ret = 0;
> > +
> > + mutex_lock(&iommu->lock);
> > + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
> > + ret = -EINVAL;
> > + goto out_unlock;
> > + }
> > +
> > + ret = vfio_iommu_for_each_dev(iommu,
> > + vfio_bind_gpasid_fn, gbind_data);
> > + /*
> > + * If bind failed, it may not be a total failure. Some devices
> > + * within the iommu group may have bind successfully. Although
> > + * we don't enable pasid capability for non-singletion iommu
> > + * groups, a unbind operation would be helpful to ensure no
> > + * partial binding for an iommu group.
>
> Where was the non-singleton group restriction done, I missed that.
Hmm, it's missed. thanks for spotting it. How about adding this
check in the vfio_iommu_for_each_dev()? If looped a non-singleton
group, just skip it. It applies to the cache_inv path all the
same.
> > + */
> > + if (ret)
> > + /*
> > + * Undo all binds that already succeeded, no need to
> > + * check the return value here since some device within
> > + * the group has no successful bind when coming to this
> > + * place switch.
> > + */
> > + vfio_iommu_type1_do_guest_unbind(iommu, gbind_data);
>
> However, the for_each_dev function stops when the callback function
> returns error, are we just assuming we stop at the same device as we
> faulted on the first time and that we traverse the same set of devices
> the second time? It seems strange to me that unbind should be able to
> fail.
unbind can fail if a user attempts to unbind a pasid which is not belonged
to it or a pasid which hasn't ever been bound. Otherwise, I didn't see a
reason to fail.
> > +
> > +out_unlock:
> > + mutex_unlock(&iommu->lock);
> > + return ret;
> > +}
> > +
> > +static long vfio_iommu_type1_unbind_gpasid(struct vfio_iommu *iommu,
> > + struct iommu_gpasid_bind_data *gbind_data)
> > +{
> > + int ret = 0;
> > +
> > + mutex_lock(&iommu->lock);
> > + if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
> > + ret = -EINVAL;
> > + goto out_unlock;
> > + }
> > +
> > + ret = vfio_iommu_type1_do_guest_unbind(iommu, gbind_data);
>
> How is a user supposed to respond to their unbind failing?
If it's a malicious unbind (e.g. unbind a not yet bound pasid or unbind
a pasid which doesn't belong to current user).
> > +
> > +out_unlock:
> > + mutex_unlock(&iommu->lock);
> > + return ret;
> > +}
> > +
> > static long vfio_iommu_type1_ioctl(void *iommu_data,
> > unsigned int cmd, unsigned long arg)
> > {
> > @@ -2471,6 +2580,55 @@ static long vfio_iommu_type1_ioctl(void
> *iommu_data,
> > default:
> > return -EINVAL;
> > }
> > +
> > + } else if (cmd == VFIO_IOMMU_BIND) {
> > + struct vfio_iommu_type1_bind bind;
> > + u32 version;
> > + int data_size;
> > + void *gbind_data;
> > + int ret;
> > +
> > + minsz = offsetofend(struct vfio_iommu_type1_bind, flags);
> > +
> > + if (copy_from_user(&bind, (void __user *)arg, minsz))
> > + return -EFAULT;
> > +
> > + if (bind.argsz < minsz)
> > + return -EINVAL;
> > +
> > + /* Get the version of struct iommu_gpasid_bind_data */
> > + if (copy_from_user(&version,
> > + (void __user *) (arg + minsz),
> > + sizeof(version)))
> > + return -EFAULT;
>
> Why are we coping things from beyond the size we've validated that the
> user has provided again?
let me wait for the result in Jacob's thread below. looks like need
to have a decision from you and Joreg. If using argsze is good, then
I guess we don't need the version-to-size mapping. right? Actually,
the version-to-size mapping is added to ensure vfio copy data correctly.
https://lkml.org/lkml/2020/4/2/876
> > +
> > + data_size = iommu_uapi_get_data_size(
> > + IOMMU_UAPI_BIND_GPASID, version);
> > + gbind_data = kzalloc(data_size, GFP_KERNEL);
> > + if (!gbind_data)
> > + return -ENOMEM;
> > +
> > + if (copy_from_user(gbind_data,
> > + (void __user *) (arg + minsz), data_size)) {
> > + kfree(gbind_data);
> > + return -EFAULT;
> > + }
>
> And again. argsz isn't just for minsz.
>
> > +
> > + switch (bind.flags & VFIO_IOMMU_BIND_MASK) {
> > + case VFIO_IOMMU_BIND_GUEST_PGTBL:
> > + ret = vfio_iommu_type1_bind_gpasid(iommu,
> > + gbind_data);
> > + break;
> > + case VFIO_IOMMU_UNBIND_GUEST_PGTBL:
> > + ret = vfio_iommu_type1_unbind_gpasid(iommu,
> > + gbind_data);
> > + break;
> > + default:
> > + ret = -EINVAL;
> > + break;
> > + }
> > + kfree(gbind_data);
> > + return ret;
> > }
> >
> > return -ENOTTY;
> > diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
> > index ebeaf3e..2235bc6 100644
> > --- a/include/uapi/linux/vfio.h
> > +++ b/include/uapi/linux/vfio.h
> > @@ -14,6 +14,7 @@
> >
> > #include <linux/types.h>
> > #include <linux/ioctl.h>
> > +#include <linux/iommu.h>
> >
> > #define VFIO_API_VERSION 0
> >
> > @@ -853,6 +854,51 @@ struct vfio_iommu_type1_pasid_request {
> > */
> > #define VFIO_IOMMU_PASID_REQUEST _IO(VFIO_TYPE, VFIO_BASE + 22)
> >
> > +/**
> > + * Supported flags:
> > + * - VFIO_IOMMU_BIND_GUEST_PGTBL: bind guest page tables to host for
> > + * nesting type IOMMUs. In @data field It takes struct
> > + * iommu_gpasid_bind_data.
> > + * - VFIO_IOMMU_UNBIND_GUEST_PGTBL: undo a bind guest page table
> operation
> > + * invoked by VFIO_IOMMU_BIND_GUEST_PGTBL.
>
> This must require iommu_gpasid_bind_data in the data field as well,
> right?
yes.
Regards,
Yi Liu
Powered by blists - more mailing lists