[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJaqyWeMAVDD+vSNdtCb34Oeh8inAfzYDC4qqT4daqVttcowGw@mail.gmail.com>
Date: Wed, 13 Aug 2025 09:15:13 +0200
From: Eugenio Perez Martin <eperezma@...hat.com>
To: Jason Wang <jasowang@...hat.com>
Cc: mst@...hat.com, xuanzhuo@...ux.alibaba.com, virtualization@...ts.linux.dev,
linux-kernel@...r.kernel.org, hch@...radead.org,
Christoph Hellwig <hch@....de>
Subject: Re: [PATCH V5 2/9] virtio_ring: switch to use dma_{map|unmap}_page()
On Wed, Aug 13, 2025 at 7:48 AM Jason Wang <jasowang@...hat.com> wrote:
>
> This patch switches to use dma_{map|unmap}_page() to reduce the
> coverage of DMA operations. This would help for the following rework
> on the virtio map operations.
>
> Reviewed-by: Christoph Hellwig <hch@....de>
Reviewed-by: Eugenio Pérez <eperezma@...hat.com>
> Signed-off-by: Jason Wang <jasowang@...hat.com>
> ---
> drivers/virtio/virtio_ring.c | 55 +++++++++++++++---------------------
> 1 file changed, 23 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index 103bad8cffff..75e5f6336c8d 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -405,8 +405,8 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> return (dma_addr_t)virt_to_phys(cpu_addr);
>
> - return dma_map_single(vring_dma_dev(vq),
> - cpu_addr, size, direction);
> + return virtqueue_dma_map_single_attrs(&vq->vq, cpu_addr,
> + size, direction, 0);
> }
>
> static int vring_mapping_error(const struct vring_virtqueue *vq,
> @@ -451,22 +451,14 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> if (flags & VRING_DESC_F_INDIRECT) {
> if (!vq->use_dma_api)
> goto out;
> + } else if (!vring_need_unmap_buffer(vq, extra))
> + goto out;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra->addr,
> - extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> - } else {
> - if (!vring_need_unmap_buffer(vq, extra))
> - goto out;
> -
> - dma_unmap_page(vring_dma_dev(vq),
> - extra->addr,
> - extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> - }
> + dma_unmap_page(vring_dma_dev(vq),
> + extra->addr,
> + extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE);
>
> out:
> return extra->next;
> @@ -1276,20 +1268,13 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> if (flags & VRING_DESC_F_INDIRECT) {
> if (!vq->use_dma_api)
> return;
> + } else if (!vring_need_unmap_buffer(vq, extra))
> + return;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> - } else {
> - if (!vring_need_unmap_buffer(vq, extra))
> - return;
> -
> - dma_unmap_page(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> - }
> + dma_unmap_page(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE);
> }
>
> static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
> @@ -3161,7 +3146,13 @@ dma_addr_t virtqueue_dma_map_single_attrs(const struct virtqueue *_vq, void *ptr
> return (dma_addr_t)virt_to_phys(ptr);
> }
>
> - return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
> + /* DMA must never operate on areas that might be remapped. */
> + if (dev_WARN_ONCE(&_vq->vdev->dev, is_vmalloc_addr(ptr),
> + "rejecting DMA map of vmalloc memory\n"))
> + return DMA_MAPPING_ERROR;
> +
> + return dma_map_page_attrs(vring_dma_dev(vq), virt_to_page(ptr),
> + offset_in_page(ptr), size, dir, attrs);
> }
> EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
>
> @@ -3186,7 +3177,7 @@ void virtqueue_dma_unmap_single_attrs(const struct virtqueue *_vq,
> if (!vq->use_dma_api)
> return;
>
> - dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
> + dma_unmap_page_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
> }
> EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
>
> --
> 2.31.1
>
Powered by blists - more mailing lists