[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1488fe4b-011c-8be4-5fca-b7fd4e418085@amd.com>
Date: Tue, 16 Sep 2025 09:55:15 -0700
From: Lizhi Hou <lizhi.hou@....com>
To: "Falkowski, Maciej" <maciej.falkowski@...ux.intel.com>,
<ogabbay@...nel.org>, <quic_jhugo@...cinc.com>,
<dri-devel@...ts.freedesktop.org>
CC: <linux-kernel@...r.kernel.org>, <max.zhen@....com>,
<sonal.santan@....com>, <mario.limonciello@....com>
Subject: Re: [RESEND PATCH V1] accel/amdxdna: Call dma_buf_vmap_unlocked() for
imported object
On 9/16/25 08:55, Falkowski, Maciej wrote:
> On 9/15/2025 6:10 PM, Lizhi Hou wrote:
>
>> In amdxdna_gem_obj_vmap(), calling dma_buf_vmap() triggers a kernel
>> warning if LOCKDEP is enabled. So for imported object, use
>> dma_buf_vmap_unlocked(). Then, use drm_gem_vmap() for other objects.
>> The similar change applies to vunmap code.
>>
>> Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated
>> buffer")
>> Signed-off-by: Lizhi Hou <lizhi.hou@....com>
>> ---
>> drivers/accel/amdxdna/amdxdna_gem.c | 38 +++++++++++------------------
>> 1 file changed, 14 insertions(+), 24 deletions(-)
>>
>> diff --git a/drivers/accel/amdxdna/amdxdna_gem.c
>> b/drivers/accel/amdxdna/amdxdna_gem.c
>> index d407a36eb412..50950be189ae 100644
>> --- a/drivers/accel/amdxdna/amdxdna_gem.c
>> +++ b/drivers/accel/amdxdna/amdxdna_gem.c
>> @@ -392,35 +392,25 @@ static const struct dma_buf_ops
>> amdxdna_dmabuf_ops = {
>> .vunmap = drm_gem_dmabuf_vunmap,
>> };
>> -static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct
>> iosys_map *map)
>> +static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, struct
>> iosys_map *map)
>> {
>> - struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
>> -
>> iosys_map_clear(map);
>> - dma_resv_assert_held(obj->resv);
>> -
>> if (is_import_bo(abo))
>> - dma_buf_vmap(abo->dma_buf, map);
>> - else
>> - drm_gem_shmem_object_vmap(obj, map);
>> -
>> - if (!map->vaddr)
>> - return -ENOMEM;
>> + return dma_buf_vmap_unlocked(abo->dma_buf, map);
> Hi,
>
> The dma_buf_vmap_unlocked() will call iosys_map_clear at its start so
> that
> in this case it will be called twice. Probably it will be optimize
> out, but maybe
> its something to better omit.
Ok. I will remove iosys_map_clear. The caller uses cleared map even
drm_gem_vmap might not clear it.
>> - return 0;
>> + return drm_gem_vmap(to_gobj(abo), map);
>> }
>> -static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj,
>> struct iosys_map *map)
>> +static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo,
>> struct iosys_map *map)
>> {
>> - struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
>> -
>> - dma_resv_assert_held(obj->resv);
>> + if (iosys_map_is_null(map))
>> + return;
>> if (is_import_bo(abo))
>> - dma_buf_vunmap(abo->dma_buf, map);
>> - else
>> - drm_gem_shmem_object_vunmap(obj, map);
>> + return dma_buf_vunmap_unlocked(abo->dma_buf, map);
> I do also wonder what is the convention here on clearing iosys_map
> when returning.
> The function drm_gem_vunmap will clear the map for callers while the
> other not.
> I think at least comment explaining the logic will be necessary.
The input map will not be re-used, so it does not mater if it is cleared
or not when returning. I will add a comment.
Thanks,
Lizhi
>
> Best regards,
> Maciej
>
>> +
>> + return drm_gem_vunmap(to_gobj(abo), map);
>> }
>> static struct dma_buf *amdxdna_gem_prime_export(struct
>> drm_gem_object *gobj, int flags)
>> @@ -468,7 +458,7 @@ static void amdxdna_gem_obj_free(struct
>> drm_gem_object *gobj)
>> if (abo->type == AMDXDNA_BO_DEV_HEAP)
>> drm_mm_takedown(&abo->mm);
>> - drm_gem_vunmap(gobj, &map);
>> + amdxdna_gem_obj_vunmap(abo, &map);
>> mutex_destroy(&abo->lock);
>> if (is_import_bo(abo)) {
>> @@ -489,8 +479,8 @@ static const struct drm_gem_object_funcs
>> amdxdna_gem_shmem_funcs = {
>> .pin = drm_gem_shmem_object_pin,
>> .unpin = drm_gem_shmem_object_unpin,
>> .get_sg_table = drm_gem_shmem_object_get_sg_table,
>> - .vmap = amdxdna_gem_obj_vmap,
>> - .vunmap = amdxdna_gem_obj_vunmap,
>> + .vmap = drm_gem_shmem_object_vmap,
>> + .vunmap = drm_gem_shmem_object_vunmap,
>> .mmap = amdxdna_gem_obj_mmap,
>> .vm_ops = &drm_gem_shmem_vm_ops,
>> .export = amdxdna_gem_prime_export,
>> @@ -692,7 +682,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
>> abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
>> drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
>> - ret = drm_gem_vmap(to_gobj(abo), &map);
>> + ret = amdxdna_gem_obj_vmap(abo, &map);
>> if (ret) {
>> XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
>> goto release_obj;
>> @@ -770,7 +760,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
>> abo->type = AMDXDNA_BO_CMD;
>> abo->client = filp->driver_priv;
>> - ret = drm_gem_vmap(to_gobj(abo), &map);
>> + ret = amdxdna_gem_obj_vmap(abo, &map);
>> if (ret) {
>> XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
>> goto release_obj;
Powered by blists - more mailing lists