lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250821160545.70ca8d02@fedora>
Date: Thu, 21 Aug 2025 16:05:45 +0200
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Alice Ryhl <aliceryhl@...gle.com>
Cc: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>, Maxime Ripard
 <mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>, David Airlie
 <airlied@...il.com>, Simona Vetter <simona@...ll.ch>, Danilo Krummrich
 <dakr@...nel.org>, Daniel Almeida <daniel.almeida@...labora.com>, Steven
 Price <steven.price@....com>, Liviu Dudau <liviu.dudau@....com>, Rob Clark
 <robin.clark@....qualcomm.com>, Rob Herring <robh@...nel.org>, Miguel Ojeda
 <ojeda@...nel.org>, Boqun Feng <boqun.feng@...il.com>, Gary Guo
 <gary@...yguo.net>, "Björn Roy Baron"
 <bjorn3_gh@...tonmail.com>, Benno Lossin <lossin@...nel.org>, Andreas
 Hindborg <a.hindborg@...nel.org>, Trevor Gross <tmgross@...ch.edu>,
 dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
 rust-for-linux@...r.kernel.org
Subject: Re: [PATCH 2/2] panthor: use drm_gem_object.gpuva.lock instead of
 gpuva_list_lock

On Thu, 14 Aug 2025 13:53:15 +0000
Alice Ryhl <aliceryhl@...gle.com> wrote:

> Now that drm_gem_object has a dedicated mutex for the gpuva list that is
> intended to be used in cases that must be fence signalling safe, use it
> in Panthor.
> 
> Signed-off-by: Alice Ryhl <aliceryhl@...gle.com>
> ---
>  drivers/gpu/drm/panthor/panthor_gem.c |  4 +---
>  drivers/gpu/drm/panthor/panthor_gem.h | 12 ------------
>  drivers/gpu/drm/panthor/panthor_mmu.c | 16 ++++++++--------
>  3 files changed, 9 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
> index a123bc740ba1460f96882206f598b148b64dc5f6..c654a3377903cf7e067becdb481fb14895a4eaa5 100644
> --- a/drivers/gpu/drm/panthor/panthor_gem.c
> +++ b/drivers/gpu/drm/panthor/panthor_gem.c
> @@ -74,7 +74,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
>  	mutex_destroy(&bo->label.lock);
>  
>  	drm_gem_free_mmap_offset(&bo->base.base);
> -	mutex_destroy(&bo->gpuva_list_lock);
>  	drm_gem_shmem_free(&bo->base);
>  	drm_gem_object_put(vm_root_gem);
>  }
> @@ -246,8 +245,7 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
>  
>  	obj->base.base.funcs = &panthor_gem_funcs;
>  	obj->base.map_wc = !ptdev->coherent;
> -	mutex_init(&obj->gpuva_list_lock);
> -	drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
> +	drm_gem_gpuva_set_lock(&obj->base.base, &obj->base.base.gpuva.lock);

I guess this will go away in the previous patch in you follow Danilo's
advice to get rid of drm_gem_gpuva_set_lock(). The rest looks good to
me, so feel free to add

Reviewed-by: Boris Brezillon <boris.brezillon@...labora.com>

on the next version.

>  	mutex_init(&obj->label.lock);
>  
>  	panthor_gem_debugfs_bo_init(obj);
> diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
> index 8fc7215e9b900ed162e03aebeae999fda00eeb7a..80c6e24112d0bd0f1561ae4d2224842afb735a59 100644
> --- a/drivers/gpu/drm/panthor/panthor_gem.h
> +++ b/drivers/gpu/drm/panthor/panthor_gem.h
> @@ -79,18 +79,6 @@ struct panthor_gem_object {
>  	 */
>  	struct drm_gem_object *exclusive_vm_root_gem;
>  
> -	/**
> -	 * @gpuva_list_lock: Custom GPUVA lock.
> -	 *
> -	 * Used to protect insertion of drm_gpuva elements to the
> -	 * drm_gem_object.gpuva.list list.
> -	 *
> -	 * We can't use the GEM resv for that, because drm_gpuva_link() is
> -	 * called in a dma-signaling path, where we're not allowed to take
> -	 * resv locks.
> -	 */
> -	struct mutex gpuva_list_lock;
> -
>  	/** @flags: Combination of drm_panthor_bo_flags flags. */
>  	u32 flags;
>  
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 4140f697ba5af5769492d3bbb378e18aec8ade98..49ca416c7c2c5a01ab0513029697ba9c7a35832d 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -1074,9 +1074,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
>  	 * GEM vm_bo list.
>  	 */
>  	dma_resv_lock(drm_gpuvm_resv(vm), NULL);
> -	mutex_lock(&bo->gpuva_list_lock);
> +	mutex_lock(&bo->base.base.gpuva.lock);
>  	unpin = drm_gpuvm_bo_put(vm_bo);
> -	mutex_unlock(&bo->gpuva_list_lock);
> +	mutex_unlock(&bo->base.base.gpuva.lock);
>  	dma_resv_unlock(drm_gpuvm_resv(vm));
>  
>  	/* If the vm_bo object was destroyed, release the pin reference that
> @@ -1249,9 +1249,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
>  	 * calling this function.
>  	 */
>  	dma_resv_lock(panthor_vm_resv(vm), NULL);
> -	mutex_lock(&bo->gpuva_list_lock);
> +	mutex_lock(&bo->base.base.gpuva.lock);
>  	op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
> -	mutex_unlock(&bo->gpuva_list_lock);
> +	mutex_unlock(&bo->base.base.gpuva.lock);
>  	dma_resv_unlock(panthor_vm_resv(vm));
>  
>  	/* If the a vm_bo for this <VM,BO> combination exists, it already
> @@ -2003,10 +2003,10 @@ static void panthor_vma_link(struct panthor_vm *vm,
>  {
>  	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
>  
> -	mutex_lock(&bo->gpuva_list_lock);
> +	mutex_lock(&bo->base.base.gpuva.lock);
>  	drm_gpuva_link(&vma->base, vm_bo);
>  	drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
> -	mutex_unlock(&bo->gpuva_list_lock);
> +	mutex_unlock(&bo->base.base.gpuva.lock);
>  }
>  
>  static void panthor_vma_unlink(struct panthor_vm *vm,
> @@ -2015,9 +2015,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm,
>  	struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
>  	struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
>  
> -	mutex_lock(&bo->gpuva_list_lock);
> +	mutex_lock(&bo->base.base.gpuva.lock);
>  	drm_gpuva_unlink(&vma->base);
> -	mutex_unlock(&bo->gpuva_list_lock);
> +	mutex_unlock(&bo->base.base.gpuva.lock);
>  
>  	/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
>  	 * when entering this function, so we can implement deferred VMA
> 


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ