[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <cab2246a-5ed8-4651-999f-260c1c4275ad@amd.com>
Date: Mon, 29 Jan 2024 14:12:19 +0100
From: Christian König <christian.koenig@....com>
To: Julia Zhang <julia.zhang@....com>,
Gurchetan Singh <gurchetansingh@...omium.org>, Chia-I Wu
<olvaffe@...il.com>, David Airlie <airlied@...hat.com>,
Gerd Hoffmann <kraxel@...hat.com>, linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org, amd-gfx@...ts.freedesktop.org,
virtualization@...ts.linux-foundation.org
Cc: Alex Deucher <alexander.deucher@....com>, Daniel Vetter
<daniel@...ll.ch>, David Airlie <airlied@...il.com>,
Erik Faye-Lund <kusmabite@...il.com>, Marek Olšák
<marek.olsak@....com>,
Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
Honglei Huang <honglei1.huang@....com>, Chen Jiqian <Jiqian.Chen@....com>,
Huang Rui <ray.huang@....com>
Subject: Re: [PATCH v2 1/1] drm/virtio: Implement device_attach
Am 29.01.24 um 11:31 schrieb Julia Zhang:
> As vram objects don't have backing pages and thus can't implement
> drm_gem_object_funcs.get_sg_table callback. This removes drm dma-buf
> callbacks in virtgpu_gem_map_dma_buf()/virtgpu_gem_unmap_dma_buf()
> and implement virtgpu specific map/unmap/attach callbacks to support
> both of shmem objects and vram objects.
>
> Signed-off-by: Julia Zhang <julia.zhang@....com>
I need to find more time to look into the code, but of hand I would say
that this is the correct solution.
Regards,
Christian.
> ---
> drivers/gpu/drm/virtio/virtgpu_prime.c | 40 +++++++++++++++++++++++---
> 1 file changed, 36 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
> index 44425f20d91a..b490a5343b06 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_prime.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
> @@ -49,11 +49,26 @@ virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
> {
> struct drm_gem_object *obj = attach->dmabuf->priv;
> struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
> + struct sg_table *sgt;
> + int ret;
>
> if (virtio_gpu_is_vram(bo))
> return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
>
> - return drm_gem_map_dma_buf(attach, dir);
> + sgt = drm_prime_pages_to_sg(obj->dev,
> + to_drm_gem_shmem_obj(obj)->pages,
> + obj->size >> PAGE_SHIFT);
> + if (IS_ERR(sgt))
> + return sgt;
> +
> + ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
> + if (ret) {
> + sg_free_table(sgt);
> + kfree(sgt);
> + return ERR_PTR(ret);
> + }
> +
> + return sgt;
> }
>
> static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
> @@ -63,12 +78,29 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
> struct drm_gem_object *obj = attach->dmabuf->priv;
> struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
>
> + if (!sgt)
> + return;
> +
> if (virtio_gpu_is_vram(bo)) {
> virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
> - return;
> + } else {
> + dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
> + sg_free_table(sgt);
> + kfree(sgt);
> }
> +}
> +
> +static int virtgpu_gem_device_attach(struct dma_buf *dma_buf,
> + struct dma_buf_attachment *attach)
> +{
> + struct drm_gem_object *obj = attach->dmabuf->priv;
> + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
> + int ret = 0;
> +
> + if (!virtio_gpu_is_vram(bo) && obj->funcs->pin)
> + ret = obj->funcs->pin(obj);
>
> - drm_gem_unmap_dma_buf(attach, sgt, dir);
> + return ret;
> }
>
> static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
> @@ -83,7 +115,7 @@ static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
> .vmap = drm_gem_dmabuf_vmap,
> .vunmap = drm_gem_dmabuf_vunmap,
> },
> - .device_attach = drm_gem_map_attach,
> + .device_attach = virtgpu_gem_device_attach,
> .get_uuid = virtgpu_virtio_get_uuid,
> };
>
Powered by blists - more mailing lists