[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250814-gpuva-mutex-in-gem-v1-2-e202cbfe6d77@google.com>
Date: Thu, 14 Aug 2025 13:53:15 +0000
From: Alice Ryhl <aliceryhl@...gle.com>
To: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>, Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>, David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>,
Danilo Krummrich <dakr@...nel.org>, Boris Brezillon <boris.brezillon@...labora.com>,
Daniel Almeida <daniel.almeida@...labora.com>, Steven Price <steven.price@....com>,
Liviu Dudau <liviu.dudau@....com>, Rob Clark <robin.clark@....qualcomm.com>,
Rob Herring <robh@...nel.org>
Cc: Miguel Ojeda <ojeda@...nel.org>, Boqun Feng <boqun.feng@...il.com>, Gary Guo <gary@...yguo.net>,
"Björn Roy Baron" <bjorn3_gh@...tonmail.com>, Benno Lossin <lossin@...nel.org>,
Andreas Hindborg <a.hindborg@...nel.org>, Trevor Gross <tmgross@...ch.edu>,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
rust-for-linux@...r.kernel.org, Alice Ryhl <aliceryhl@...gle.com>
Subject: [PATCH 2/2] panthor: use drm_gem_object.gpuva.lock instead of gpuva_list_lock
Now that drm_gem_object has a dedicated mutex for the gpuva list that is
intended to be used in cases that must be fence signalling safe, use it
in Panthor.
Signed-off-by: Alice Ryhl <aliceryhl@...gle.com>
---
drivers/gpu/drm/panthor/panthor_gem.c | 4 +---
drivers/gpu/drm/panthor/panthor_gem.h | 12 ------------
drivers/gpu/drm/panthor/panthor_mmu.c | 16 ++++++++--------
3 files changed, 9 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index a123bc740ba1460f96882206f598b148b64dc5f6..c654a3377903cf7e067becdb481fb14895a4eaa5 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -74,7 +74,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
mutex_destroy(&bo->label.lock);
drm_gem_free_mmap_offset(&bo->base.base);
- mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
drm_gem_object_put(vm_root_gem);
}
@@ -246,8 +245,7 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
- mutex_init(&obj->gpuva_list_lock);
- drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
+ drm_gem_gpuva_set_lock(&obj->base.base, &obj->base.base.gpuva.lock);
mutex_init(&obj->label.lock);
panthor_gem_debugfs_bo_init(obj);
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 8fc7215e9b900ed162e03aebeae999fda00eeb7a..80c6e24112d0bd0f1561ae4d2224842afb735a59 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -79,18 +79,6 @@ struct panthor_gem_object {
*/
struct drm_gem_object *exclusive_vm_root_gem;
- /**
- * @gpuva_list_lock: Custom GPUVA lock.
- *
- * Used to protect insertion of drm_gpuva elements to the
- * drm_gem_object.gpuva.list list.
- *
- * We can't use the GEM resv for that, because drm_gpuva_link() is
- * called in a dma-signaling path, where we're not allowed to take
- * resv locks.
- */
- struct mutex gpuva_list_lock;
-
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 4140f697ba5af5769492d3bbb378e18aec8ade98..49ca416c7c2c5a01ab0513029697ba9c7a35832d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1074,9 +1074,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
* GEM vm_bo list.
*/
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
unpin = drm_gpuvm_bo_put(vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(drm_gpuvm_resv(vm));
/* If the vm_bo object was destroyed, release the pin reference that
@@ -1249,9 +1249,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
@@ -2003,10 +2003,10 @@ static void panthor_vma_link(struct panthor_vm *vm,
{
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_link(&vma->base, vm_bo);
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
}
static void panthor_vma_unlink(struct panthor_vm *vm,
@@ -2015,9 +2015,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm,
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_unlink(&vma->base);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
* when entering this function, so we can implement deferred VMA
--
2.51.0.rc0.215.g125493bb4a-goog
Powered by blists - more mailing lists