[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230316083825.wslrk7abt4nts4us@sgarzare-redhat>
Date: Thu, 16 Mar 2023 09:38:25 +0100
From: Stefano Garzarella <sgarzare@...hat.com>
To: Jason Wang <jasowang@...hat.com>
Cc: virtualization@...ts.linux-foundation.org,
Andrey Zhadchenko <andrey.zhadchenko@...tuozzo.com>,
eperezma@...hat.com, netdev@...r.kernel.org, stefanha@...hat.com,
linux-kernel@...r.kernel.org,
"Michael S. Tsirkin" <mst@...hat.com>, kvm@...r.kernel.org
Subject: Re: [PATCH v2 4/8] vringh: support VA with iotlb
On Tue, Mar 14, 2023 at 12:53:57PM +0800, Jason Wang wrote:
>On Thu, Mar 2, 2023 at 7:35 PM Stefano Garzarella <sgarzare@...hat.com> wrote:
>>
>> vDPA supports the possibility to use user VA in the iotlb messages.
>> So, let's add support for user VA in vringh to use it in the vDPA
>> simulators.
>>
>> Signed-off-by: Stefano Garzarella <sgarzare@...hat.com>
>> ---
>>
>> Notes:
>> v2:
>> - replace kmap_atomic() with kmap_local_page() [see previous patch]
>> - fix cast warnings when build with W=1 C=1
>>
>> include/linux/vringh.h | 5 +-
>> drivers/vdpa/mlx5/net/mlx5_vnet.c | 2 +-
>> drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 +-
>> drivers/vhost/vringh.c | 247 ++++++++++++++++++++++++------
>> 4 files changed, 205 insertions(+), 53 deletions(-)
>>
>> diff --git a/include/linux/vringh.h b/include/linux/vringh.h
>> index 1991a02c6431..d39b9f2dcba0 100644
>> --- a/include/linux/vringh.h
>> +++ b/include/linux/vringh.h
>> @@ -32,6 +32,9 @@ struct vringh {
>> /* Can we get away with weak barriers? */
>> bool weak_barriers;
>>
>> + /* Use user's VA */
>> + bool use_va;
>> +
>> /* Last available index we saw (ie. where we're up to). */
>> u16 last_avail_idx;
>>
>> @@ -279,7 +282,7 @@ void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
>> spinlock_t *iotlb_lock);
>>
>> int vringh_init_iotlb(struct vringh *vrh, u64 features,
>> - unsigned int num, bool weak_barriers,
>> + unsigned int num, bool weak_barriers, bool use_va,
>> struct vring_desc *desc,
>> struct vring_avail *avail,
>> struct vring_used *used);
>> diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> index 3a0e721aef05..babc8dd171a6 100644
>> --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
>> @@ -2537,7 +2537,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
>>
>> if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
>> err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
>> - MLX5_CVQ_MAX_ENT, false,
>> + MLX5_CVQ_MAX_ENT, false, false,
>> (struct vring_desc *)(uintptr_t)cvq->desc_addr,
>> (struct vring_avail *)(uintptr_t)cvq->driver_addr,
>> (struct vring_used *)(uintptr_t)cvq->device_addr);
>> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> index 6a0a65814626..481eb156658b 100644
>> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
>> @@ -60,7 +60,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
>> struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
>> uint16_t last_avail_idx = vq->vring.last_avail_idx;
>>
>> - vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true,
>> + vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true, false,
>> (struct vring_desc *)(uintptr_t)vq->desc_addr,
>> (struct vring_avail *)
>> (uintptr_t)vq->driver_addr,
>> @@ -81,7 +81,7 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
>> vq->cb = NULL;
>> vq->private = NULL;
>> vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
>> - VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
>> + VDPASIM_QUEUE_MAX, false, false, NULL, NULL, NULL);
>>
>> vq->vring.notify = NULL;
>> }
>> diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
>> index 0ba3ef809e48..61c79cea44ca 100644
>> --- a/drivers/vhost/vringh.c
>> +++ b/drivers/vhost/vringh.c
>> @@ -1094,15 +1094,99 @@ EXPORT_SYMBOL(vringh_need_notify_kern);
>>
>> #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
>>
>> -static int iotlb_translate(const struct vringh *vrh,
>> - u64 addr, u64 len, u64 *translated,
>> - struct bio_vec iov[],
>> - int iov_size, u32 perm)
>> +static int iotlb_translate_va(const struct vringh *vrh,
>> + u64 addr, u64 len, u64 *translated,
>> + struct iovec iov[],
>> + int iov_size, u32 perm)
>> {
>> struct vhost_iotlb_map *map;
>> struct vhost_iotlb *iotlb = vrh->iotlb;
>> + u64 s = 0, last = addr + len - 1;
>> int ret = 0;
>> +
>> + spin_lock(vrh->iotlb_lock);
>> +
>> + while (len > s) {
>> + u64 size;
>> +
>> + if (unlikely(ret >= iov_size)) {
>> + ret = -ENOBUFS;
>> + break;
>> + }
>> +
>> + map = vhost_iotlb_itree_first(iotlb, addr, last);
>> + if (!map || map->start > addr) {
>> + ret = -EINVAL;
>> + break;
>> + } else if (!(map->perm & perm)) {
>> + ret = -EPERM;
>> + break;
>> + }
>> +
>> + size = map->size - addr + map->start;
>> + iov[ret].iov_len = min(len - s, size);
>> + iov[ret].iov_base = (void __user *)(unsigned long)
>> + (map->addr + addr - map->start);
>> + s += size;
>> + addr += size;
>> + ++ret;
>> + }
>> +
>> + spin_unlock(vrh->iotlb_lock);
>> +
>> + if (translated)
>> + *translated = min(len, s);
>> +
>> + return ret;
>> +}
>> +
>> +static inline int copy_from_va(const struct vringh *vrh, void *dst, void *src,
>> + u64 len, u64 *translated)
>> +{
>> + struct iovec iov[16];
>> + struct iov_iter iter;
>> + int ret;
>> +
>> + ret = iotlb_translate_va(vrh, (u64)(uintptr_t)src, len, translated, iov,
>> + ARRAY_SIZE(iov), VHOST_MAP_RO);
>> + if (ret == -ENOBUFS)
>> + ret = ARRAY_SIZE(iov);
>> + else if (ret < 0)
>> + return ret;
>> +
>> + iov_iter_init(&iter, ITER_SOURCE, iov, ret, *translated);
>> +
>> + return copy_from_iter(dst, *translated, &iter);
>> +}
>> +
>> +static inline int copy_to_va(const struct vringh *vrh, void *dst, void *src,
>> + u64 len, u64 *translated)
>> +{
>> + struct iovec iov[16];
>> + struct iov_iter iter;
>> + int ret;
>> +
>> + ret = iotlb_translate_va(vrh, (u64)(uintptr_t)dst, len, translated, iov,
>> + ARRAY_SIZE(iov), VHOST_MAP_WO);
>> + if (ret == -ENOBUFS)
>> + ret = ARRAY_SIZE(iov);
>> + else if (ret < 0)
>> + return ret;
>> +
>> + iov_iter_init(&iter, ITER_DEST, iov, ret, *translated);
>> +
>> + return copy_to_iter(src, *translated, &iter);
>> +}
>> +
>> +static int iotlb_translate_pa(const struct vringh *vrh,
>> + u64 addr, u64 len, u64 *translated,
>> + struct bio_vec iov[],
>> + int iov_size, u32 perm)
>> +{
>> + struct vhost_iotlb_map *map;
>> + struct vhost_iotlb *iotlb = vrh->iotlb;
>> u64 s = 0, last = addr + len - 1;
>> + int ret = 0;
>>
>> spin_lock(vrh->iotlb_lock);
>>
>> @@ -1141,28 +1225,61 @@ static int iotlb_translate(const struct vringh *vrh,
>> return ret;
>> }
>>
>> +static inline int copy_from_pa(const struct vringh *vrh, void *dst, void *src,
>> + u64 len, u64 *translated)
>> +{
>> + struct bio_vec iov[16];
>> + struct iov_iter iter;
>> + int ret;
>> +
>> + ret = iotlb_translate_pa(vrh, (u64)(uintptr_t)src, len, translated, iov,
>> + ARRAY_SIZE(iov), VHOST_MAP_RO);
>> + if (ret == -ENOBUFS)
>> + ret = ARRAY_SIZE(iov);
>> + else if (ret < 0)
>> + return ret;
>> +
>> + iov_iter_bvec(&iter, ITER_SOURCE, iov, ret, *translated);
>> +
>> + return copy_from_iter(dst, *translated, &iter);
>> +}
>> +
>> +static inline int copy_to_pa(const struct vringh *vrh, void *dst, void *src,
>> + u64 len, u64 *translated)
>> +{
>> + struct bio_vec iov[16];
>> + struct iov_iter iter;
>> + int ret;
>> +
>> + ret = iotlb_translate_pa(vrh, (u64)(uintptr_t)dst, len, translated, iov,
>> + ARRAY_SIZE(iov), VHOST_MAP_WO);
>> + if (ret == -ENOBUFS)
>> + ret = ARRAY_SIZE(iov);
>> + else if (ret < 0)
>> + return ret;
>> +
>> + iov_iter_bvec(&iter, ITER_DEST, iov, ret, *translated);
>> +
>> + return copy_to_iter(src, *translated, &iter);
>> +}
>> +
>> static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
>> void *src, size_t len)
>> {
>> u64 total_translated = 0;
>>
>> while (total_translated < len) {
>> - struct bio_vec iov[16];
>> - struct iov_iter iter;
>> u64 translated;
>> int ret;
>>
>> - ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
>> - len - total_translated, &translated,
>> - iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
>> - if (ret == -ENOBUFS)
>> - ret = ARRAY_SIZE(iov);
>> - else if (ret < 0)
>> - return ret;
>> -
>> - iov_iter_bvec(&iter, ITER_SOURCE, iov, ret, translated);
>> + if (vrh->use_va) {
>> + ret = copy_from_va(vrh, dst, src,
>> + len - total_translated, &translated);
>> + } else {
>> + ret = copy_from_pa(vrh, dst, src,
>> + len - total_translated, &translated);
>> + }
>>
>> - ret = copy_from_iter(dst, translated, &iter);
>> if (ret < 0)
>> return ret;
>>
>> @@ -1180,22 +1297,17 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
>> u64 total_translated = 0;
>>
>> while (total_translated < len) {
>> - struct bio_vec iov[16];
>> - struct iov_iter iter;
>> u64 translated;
>> int ret;
>>
>> - ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
>> - len - total_translated, &translated,
>> - iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
>> - if (ret == -ENOBUFS)
>> - ret = ARRAY_SIZE(iov);
>> - else if (ret < 0)
>> - return ret;
>> -
>> - iov_iter_bvec(&iter, ITER_DEST, iov, ret, translated);
>> + if (vrh->use_va) {
>> + ret = copy_to_va(vrh, dst, src,
>> + len - total_translated, &translated);
>> + } else {
>> + ret = copy_to_pa(vrh, dst, src,
>> + len - total_translated, &translated);
>> + }
>>
>> - ret = copy_to_iter(src, translated, &iter);
>> if (ret < 0)
>> return ret;
>>
>> @@ -1210,20 +1322,37 @@ static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
>> static inline int getu16_iotlb(const struct vringh *vrh,
>> u16 *val, const __virtio16 *p)
>> {
>> - struct bio_vec iov;
>> - void *kaddr, *from;
>> int ret;
>>
>> /* Atomic read is needed for getu16 */
>> - ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
>> - &iov, 1, VHOST_MAP_RO);
>> - if (ret < 0)
>> - return ret;
>> + if (vrh->use_va) {
>> + struct iovec iov;
>> + __virtio16 tmp;
>> +
>> + ret = iotlb_translate_va(vrh, (u64)(uintptr_t)p, sizeof(*p),
>> + NULL, &iov, 1, VHOST_MAP_RO);
>> + if (ret < 0)
>> + return ret;
>
>Nit: since we have copy_to_va/copy_to_pa variants, let's introduce
>getu16_iotlb_va/pa variants?
Yep!
>
>>
>> - kaddr = kmap_local_page(iov.bv_page);
>> - from = kaddr + iov.bv_offset;
>> - *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
>> - kunmap_local(kaddr);
>> + ret = __get_user(tmp, (__virtio16 __user *)iov.iov_base);
>> + if (ret)
>> + return ret;
>> +
>> + *val = vringh16_to_cpu(vrh, tmp);
>> + } else {
>> + struct bio_vec iov;
>> + void *kaddr, *from;
>> +
>> + ret = iotlb_translate_pa(vrh, (u64)(uintptr_t)p, sizeof(*p),
>> + NULL, &iov, 1, VHOST_MAP_RO);
>> + if (ret < 0)
>> + return ret;
>> +
>> + kaddr = kmap_local_page(iov.bv_page);
>
>If we decide to have a use_va switch, is kmap_local_page() still required here?
>
I think yes. This is related to the email where Fabio clarified for us,
right?
>Other looks good.
Thanks,
Stefano
Powered by blists - more mailing lists