lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 7 May 2022 18:24:01 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     Gautam Dawar <gdawar@...inx.com>
Cc:     "Michael S. Tsirkin" <mst@...hat.com>, kvm <kvm@...r.kernel.org>,
        virtualization <virtualization@...ts.linux-foundation.org>,
        netdev <netdev@...r.kernel.org>,
        linux-kernel <linux-kernel@...r.kernel.org>,
        Martin Petrus Hubertus Habets <martinh@...inx.com>,
        Harpreet Singh Anand <hanand@...inx.com>,
        Martin Porter <martinpo@...inx.com>,
        Pablo Cascon Katchadourian <pabloc@...inx.com>,
        Dinan Gunawardena <dinang@...inx.com>,
        "tanuj.kamde@....com" <tanuj.kamde@....com>,
        "habetsm.xilinx@...il.com" <habetsm.xilinx@...il.com>,
        "ecree.xilinx@...il.com" <ecree.xilinx@...il.com>,
        eperezma <eperezma@...hat.com>,
        Wu Zongyong <wuzongyong@...ux.alibaba.com>,
        Christophe JAILLET <christophe.jaillet@...adoo.fr>,
        Eli Cohen <elic@...dia.com>,
        Zhu Lingshan <lingshan.zhu@...el.com>,
        Stefano Garzarella <sgarzare@...hat.com>,
        Xie Yongji <xieyongji@...edance.com>,
        Si-Wei Liu <si-wei.liu@...cle.com>,
        Parav Pandit <parav@...dia.com>,
        Longpeng <longpeng2@...wei.com>,
        Dan Carpenter <dan.carpenter@...cle.com>,
        Zhang Min <zhang.min9@....com.cn>
Subject: Re: [PATCH v2 15/19] vhost-vdpa: support ASID based IOTLB API

On Thu, Apr 28, 2022 at 2:28 PM Gautam Dawar <gdawar@...inx.com> wrote:
>
> -----Original Message-----
> From: Jason Wang <jasowang@...hat.com>
> Sent: Friday, April 1, 2022 9:55 AM
> To: Gautam Dawar <gdawar@...inx.com>
> Cc: Michael S. Tsirkin <mst@...hat.com>; kvm <kvm@...r.kernel.org>; virtualization <virtualization@...ts.linux-foundation.org>; netdev <netdev@...r.kernel.org>; linux-kernel <linux-kernel@...r.kernel.org>; Martin Petrus Hubertus Habets <martinh@...inx.com>; Harpreet Singh Anand <hanand@...inx.com>; Martin Porter <martinpo@...inx.com>; Pablo Cascon <pabloc@...inx.com>; Dinan Gunawardena <dinang@...inx.com>; tanuj.kamde@....com; habetsm.xilinx@...il.com; ecree.xilinx@...il.com; eperezma <eperezma@...hat.com>; Gautam Dawar <gdawar@...inx.com>; Wu Zongyong <wuzongyong@...ux.alibaba.com>; Christophe JAILLET <christophe.jaillet@...adoo.fr>; Eli Cohen <elic@...dia.com>; Zhu Lingshan <lingshan.zhu@...el.com>; Stefano Garzarella <sgarzare@...hat.com>; Xie Yongji <xieyongji@...edance.com>; Si-Wei Liu <si-wei.liu@...cle.com>; Parav Pandit <parav@...dia.com>; Longpeng <longpeng2@...wei.com>; Dan Carpenter <dan.carpenter@...cle.com>; Zhang Min <zhang.min9@....com.cn>
> Subject: Re: [PATCH v2 15/19] vhost-vdpa: support ASID based IOTLB API
>
> On Thu, Mar 31, 2022 at 2:17 AM Gautam Dawar <gautam.dawar@...inx.com> wrote:
> >
> > This patch extends the vhost-vdpa to support ASID based IOTLB API. The
> > vhost-vdpa device will allocated multiple IOTLBs for vDPA device that
> > supports multiple address spaces. The IOTLBs and vDPA device memory
> > mappings is determined and maintained through ASID.
> >
> > Note that we still don't support vDPA device with more than one
> > address spaces that depends on platform IOMMU. This work will be done
> > by moving the IOMMU logic from vhost-vDPA to vDPA device driver.
> >
> > Signed-off-by: Jason Wang <jasowang@...hat.com>
> > Signed-off-by: Gautam Dawar <gdawar@...inx.com>
> > ---
> >  drivers/vhost/vdpa.c  | 109 ++++++++++++++++++++++++++++++++++--------
> >  drivers/vhost/vhost.c |   2 +-
> >  2 files changed, 91 insertions(+), 20 deletions(-)
> >
> > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index
> > 6c7ee0f18892..1f1d1c425573 100644
> > --- a/drivers/vhost/vdpa.c
> > +++ b/drivers/vhost/vdpa.c
> > @@ -28,7 +28,8 @@
> >  enum {
> >         VHOST_VDPA_BACKEND_FEATURES =
> >         (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
> > -       (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
> > +       (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
> > +       (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
> >  };
> >
> >  #define VHOST_VDPA_DEV_MAX (1U << MINORBITS) @@ -57,12 +58,20 @@
> > struct vhost_vdpa {
> >         struct eventfd_ctx *config_ctx;
> >         int in_batch;
> >         struct vdpa_iova_range range;
> > +       u32 batch_asid;
> >  };
> >
> >  static DEFINE_IDA(vhost_vdpa_ida);
> >
> >  static dev_t vhost_vdpa_major;
> >
> > +static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb) {
> > +       struct vhost_vdpa_as *as = container_of(iotlb, struct
> > +                                               vhost_vdpa_as, iotlb);
> > +       return as->id;
> > +}
> > +
> >  static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32
> > asid)  {
> >         struct hlist_head *head = &v->as[asid %
> > VHOST_VDPA_IOTLB_BUCKETS]; @@ -75,6 +84,16 @@ static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
> >         return NULL;
> >  }
> >
> > +static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32
> > +asid) {
> > +       struct vhost_vdpa_as *as = asid_to_as(v, asid);
> > +
> > +       if (!as)
> > +               return NULL;
> > +
> > +       return &as->iotlb;
> > +}
> > +
> >  static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa
> > *v, u32 asid)  {
> >         struct hlist_head *head = &v->as[asid %
> > VHOST_VDPA_IOTLB_BUCKETS]; @@ -83,6 +102,9 @@ static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
> >         if (asid_to_as(v, asid))
> >                 return NULL;
> >
> > +       if (asid >= v->vdpa->nas)
> > +               return NULL;
> > +
> >         as = kmalloc(sizeof(*as), GFP_KERNEL);
> >         if (!as)
> >                 return NULL;
> > @@ -94,6 +116,17 @@ static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
> >         return as;
> >  }
> >
> > +static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
> > +                                                     u32 asid) {
> > +       struct vhost_vdpa_as *as = asid_to_as(v, asid);
> > +
> > +       if (as)
> > +               return as;
> > +
> > +       return vhost_vdpa_alloc_as(v, asid); }
> > +
> >  static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)  {
> >         struct vhost_vdpa_as *as = asid_to_as(v, asid); @@ -692,6
> > +725,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
> >         struct vhost_dev *dev = &v->vdev;
> >         struct vdpa_device *vdpa = v->vdpa;
> >         const struct vdpa_config_ops *ops = vdpa->config;
> > +       u32 asid = iotlb_to_asid(iotlb);
> >         int r = 0;
> >
> >         r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1, @@
> > -700,10 +734,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
> >                 return r;
> >
> >         if (ops->dma_map) {
> > -               r = ops->dma_map(vdpa, 0, iova, size, pa, perm, opaque);
> > +               r = ops->dma_map(vdpa, asid, iova, size, pa, perm,
> > + opaque);
> >         } else if (ops->set_map) {
> >                 if (!v->in_batch)
> > -                       r = ops->set_map(vdpa, 0, iotlb);
> > +                       r = ops->set_map(vdpa, asid, iotlb);
> >         } else {
> >                 r = iommu_map(v->domain, iova, pa, size,
> >                               perm_to_iommu_flags(perm)); @@ -725,17
> > +759,24 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,  {
> >         struct vdpa_device *vdpa = v->vdpa;
> >         const struct vdpa_config_ops *ops = vdpa->config;
> > +       u32 asid = iotlb_to_asid(iotlb);
> >
> >         vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
> >
> >         if (ops->dma_map) {
> > -               ops->dma_unmap(vdpa, 0, iova, size);
> > +               ops->dma_unmap(vdpa, asid, iova, size);
> >         } else if (ops->set_map) {
> >                 if (!v->in_batch)
> > -                       ops->set_map(vdpa, 0, iotlb);
> > +                       ops->set_map(vdpa, asid, iotlb);
> >         } else {
> >                 iommu_unmap(v->domain, iova, size);
> >         }
> > +
> > +       /* If we are in the middle of batch processing, delay the free
> > +        * of AS until BATCH_END.
> > +        */
> > +       if (!v->in_batch && !iotlb->nmaps)
> > +               vhost_vdpa_remove_as(v, asid);
> >  }
> >
> >  static int vhost_vdpa_va_map(struct vhost_vdpa *v, @@ -943,19 +984,38
> > @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
> >         struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
> >         struct vdpa_device *vdpa = v->vdpa;
> >         const struct vdpa_config_ops *ops = vdpa->config;
> > -       struct vhost_vdpa_as *as = asid_to_as(v, 0);
> > -       struct vhost_iotlb *iotlb = &as->iotlb;
> > +       struct vhost_iotlb *iotlb = NULL;
> > +       struct vhost_vdpa_as *as = NULL;
> >         int r = 0;
> >
> > -       if (asid != 0)
> > -               return -EINVAL;
> > -
> >         mutex_lock(&dev->mutex);
> >
> >         r = vhost_dev_check_owner(dev);
> >         if (r)
> >                 goto unlock;
> >
> > +       if (msg->type == VHOST_IOTLB_UPDATE ||
> > +           msg->type == VHOST_IOTLB_BATCH_BEGIN) {
> > +               as = vhost_vdpa_find_alloc_as(v, asid);
>
> I wonder if it's better to mandate the ASID to [0, dev->nas), otherwise user space is free to use arbitrary IDs which may exceeds the #address spaces that is supported by the device.
> [GD>>] Isn’t the following check in vhost_vdpa_alloc_as () sufficient to ensure ASID's value in the range [0, dev->nas):
>         if (asid >= v->vdpa->nas)
>                 return NULL;

I think you're right.

So we are fine.

Thanks

>
> Thanks
>
> > +               if (!as) {
> > +                       dev_err(&v->dev, "can't find and alloc asid %d\n",
> > +                               asid);
> > +                       return -EINVAL;
> > +               }
> > +               iotlb = &as->iotlb;
> > +       } else
> > +               iotlb = asid_to_iotlb(v, asid);
> > +
> > +       if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
> > +               if (v->in_batch && v->batch_asid != asid) {
> > +                       dev_info(&v->dev, "batch id %d asid %d\n",
> > +                                v->batch_asid, asid);
> > +               }
> > +               if (!iotlb)
> > +                       dev_err(&v->dev, "no iotlb for asid %d\n", asid);
> > +               return -EINVAL;
> > +       }
> > +
> >         switch (msg->type) {
> >         case VHOST_IOTLB_UPDATE:
> >                 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg); @@
> > -964,12 +1024,15 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
> >                 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
> >                 break;
> >         case VHOST_IOTLB_BATCH_BEGIN:
> > +               v->batch_asid = asid;
> >                 v->in_batch = true;
> >                 break;
> >         case VHOST_IOTLB_BATCH_END:
> >                 if (v->in_batch && ops->set_map)
> > -                       ops->set_map(vdpa, 0, iotlb);
> > +                       ops->set_map(vdpa, asid, iotlb);
> >                 v->in_batch = false;
> > +               if (!iotlb->nmaps)
> > +                       vhost_vdpa_remove_as(v, asid);
> >                 break;
> >         default:
> >                 r = -EINVAL;
> > @@ -1057,9 +1120,17 @@ static void vhost_vdpa_set_iova_range(struct
> > vhost_vdpa *v)
> >
> >  static void vhost_vdpa_cleanup(struct vhost_vdpa *v)  {
> > +       struct vhost_vdpa_as *as;
> > +       u32 asid;
> > +
> >         vhost_dev_cleanup(&v->vdev);
> >         kfree(v->vdev.vqs);
> > -       vhost_vdpa_remove_as(v, 0);
> > +
> > +       for (asid = 0; asid < v->vdpa->nas; asid++) {
> > +               as = asid_to_as(v, asid);
> > +               if (as)
> > +                       vhost_vdpa_remove_as(v, asid);
> > +       }
> >  }
> >
> >  static int vhost_vdpa_open(struct inode *inode, struct file *filep)
> > @@ -1095,12 +1166,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
> >         vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
> >                        vhost_vdpa_process_iotlb_msg);
> >
> > -       if (!vhost_vdpa_alloc_as(v, 0))
> > -               goto err_alloc_as;
> > -
> >         r = vhost_vdpa_alloc_domain(v);
> >         if (r)
> > -               goto err_alloc_as;
> > +               goto err_alloc_domain;
> >
> >         vhost_vdpa_set_iova_range(v);
> >
> > @@ -1108,7 +1176,7 @@ static int vhost_vdpa_open(struct inode *inode,
> > struct file *filep)
> >
> >         return 0;
> >
> > -err_alloc_as:
> > +err_alloc_domain:
> >         vhost_vdpa_cleanup(v);
> >  err:
> >         atomic_dec(&v->opened);
> > @@ -1233,8 +1301,11 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
> >         int minor;
> >         int i, r;
> >
> > -       /* Only support 1 address space and 1 groups */
> > -       if (vdpa->ngroups != 1 || vdpa->nas != 1)
> > +       /* We can't support platform IOMMU device with more than 1
> > +        * group or as
> > +        */
> > +       if (!ops->set_map && !ops->dma_map &&
> > +           (vdpa->ngroups > 1 || vdpa->nas > 1))
> >                 return -EOPNOTSUPP;
> >
> >         v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
> > diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index
> > d1e58f976f6e..5022c648d9c0 100644
> > --- a/drivers/vhost/vhost.c
> > +++ b/drivers/vhost/vhost.c
> > @@ -1167,7 +1167,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
> >                                 ret = -EINVAL;
> >                                 goto done;
> >                         }
> > -                       offset = sizeof(__u16);
> > +                       offset = 0;
> >                 } else
> >                         offset = sizeof(__u32);
> >                 break;
> > --
> > 2.30.1
> >
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ