[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250905194136.2644350-1-lizhi.hou@amd.com>
Date: Fri, 5 Sep 2025 12:41:36 -0700
From: Lizhi Hou <lizhi.hou@....com>
To: <ogabbay@...nel.org>, <quic_jhugo@...cinc.com>,
<jacek.lawrynowicz@...ux.intel.com>, <dri-devel@...ts.freedesktop.org>
CC: Lizhi Hou <lizhi.hou@....com>, <linux-kernel@...r.kernel.org>,
<max.zhen@....com>, <sonal.santan@....com>, <mario.limonciello@....com>
Subject: [PATCH V1] accel/amdxdna: Call dma_buf_vmap_unlocked() for imported object
In amdxdna_gem_obj_vmap(), calling dma_buf_vmap() triggers a kernel
warning if LOCKDEP is enabled. So for imported object, use
dma_buf_vmap_unlocked(). Then, use drm_gem_vmap() for other objects.
The similar change applies to vunmap code.
Fixes: bd72d4acda10 ("accel/amdxdna: Support user space allocated buffer")
Signed-off-by: Lizhi Hou <lizhi.hou@....com>
---
drivers/accel/amdxdna/amdxdna_gem.c | 38 +++++++++++------------------
1 file changed, 14 insertions(+), 24 deletions(-)
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index d407a36eb412..50950be189ae 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -392,35 +392,25 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
-static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, struct iosys_map *map)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
-
iosys_map_clear(map);
- dma_resv_assert_held(obj->resv);
-
if (is_import_bo(abo))
- dma_buf_vmap(abo->dma_buf, map);
- else
- drm_gem_shmem_object_vmap(obj, map);
-
- if (!map->vaddr)
- return -ENOMEM;
+ return dma_buf_vmap_unlocked(abo->dma_buf, map);
- return 0;
+ return drm_gem_vmap(to_gobj(abo), map);
}
-static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo, struct iosys_map *map)
{
- struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
-
- dma_resv_assert_held(obj->resv);
+ if (iosys_map_is_null(map))
+ return;
if (is_import_bo(abo))
- dma_buf_vunmap(abo->dma_buf, map);
- else
- drm_gem_shmem_object_vunmap(obj, map);
+ return dma_buf_vunmap_unlocked(abo->dma_buf, map);
+
+ return drm_gem_vunmap(to_gobj(abo), map);
}
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
@@ -468,7 +458,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
- drm_gem_vunmap(gobj, &map);
+ amdxdna_gem_obj_vunmap(abo, &map);
mutex_destroy(&abo->lock);
if (is_import_bo(abo)) {
@@ -489,8 +479,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
- .vmap = amdxdna_gem_obj_vmap,
- .vunmap = amdxdna_gem_obj_vunmap,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
.mmap = amdxdna_gem_obj_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
.export = amdxdna_gem_prime_export,
@@ -692,7 +682,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &map);
if (ret) {
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
goto release_obj;
@@ -770,7 +760,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &map);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
--
2.34.1
Powered by blists - more mailing lists