lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <be93d9ef-3d3e-4262-a280-d2922b983ca1@amd.com>
Date:   Fri, 3 Nov 2023 08:18:35 +0100
From:   Christian König <christian.koenig@....com>
To:     Danilo Krummrich <dakr@...hat.com>, airlied@...il.com,
        daniel@...ll.ch, matthew.brost@...el.com,
        thomas.hellstrom@...ux.intel.com, sarah.walker@...tec.com,
        donald.robson@...tec.com, boris.brezillon@...labora.com,
        faith@...strand.net
Cc:     dri-devel@...ts.freedesktop.org, nouveau@...ts.freedesktop.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH drm-misc-next v8 09/12] drm/gpuvm: reference count
 drm_gpuvm structures

Am 02.11.23 um 00:31 schrieb Danilo Krummrich:
> Implement reference counting for struct drm_gpuvm.

 From the design point of view what is that good for?

Background is that the most common use case I see is that this object is 
embedded into something else and a reference count is then not really a 
good idea.

Thanks,
Christian.

>
> Signed-off-by: Danilo Krummrich <dakr@...hat.com>
> ---
>   drivers/gpu/drm/drm_gpuvm.c            | 44 +++++++++++++++++++-------
>   drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 +++++++++---
>   include/drm/drm_gpuvm.h                | 31 +++++++++++++++++-
>   3 files changed, 78 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
> index 53e2c406fb04..6a88eafc5229 100644
> --- a/drivers/gpu/drm/drm_gpuvm.c
> +++ b/drivers/gpu/drm/drm_gpuvm.c
> @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
>   	gpuvm->rb.tree = RB_ROOT_CACHED;
>   	INIT_LIST_HEAD(&gpuvm->rb.list);
>   
> +	kref_init(&gpuvm->kref);
> +
>   	gpuvm->name = name ? name : "unknown";
>   	gpuvm->flags = flags;
>   	gpuvm->ops = ops;
> @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
>   }
>   EXPORT_SYMBOL_GPL(drm_gpuvm_init);
>   
> -/**
> - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm
> - * @gpuvm: pointer to the &drm_gpuvm to clean up
> - *
> - * Note that it is a bug to call this function on a manager that still
> - * holds GPU VA mappings.
> - */
> -void
> -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
> +static void
> +drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
>   {
>   	gpuvm->name = NULL;
>   
> @@ -790,7 +785,33 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
>   
>   	drm_gem_object_put(gpuvm->r_obj);
>   }
> -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
> +
> +static void
> +drm_gpuvm_free(struct kref *kref)
> +{
> +	struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
> +
> +	if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
> +		return;
> +
> +	drm_gpuvm_fini(gpuvm);
> +
> +	gpuvm->ops->vm_free(gpuvm);
> +}
> +
> +/**
> + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm reference
> + * @gpuvm: the &drm_gpuvm to release the reference of
> + *
> + * This releases a reference to @gpuvm.
> + */
> +void
> +drm_gpuvm_put(struct drm_gpuvm *gpuvm)
> +{
> +	if (gpuvm)
> +		kref_put(&gpuvm->kref, drm_gpuvm_free);
> +}
> +EXPORT_SYMBOL_GPL(drm_gpuvm_put);
>   
>   static int
>   __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
> @@ -843,7 +864,7 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm,
>   	if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
>   		return -EINVAL;
>   
> -	return __drm_gpuva_insert(gpuvm, va);
> +	return __drm_gpuva_insert(drm_gpuvm_get(gpuvm), va);
>   }
>   EXPORT_SYMBOL_GPL(drm_gpuva_insert);
>   
> @@ -876,6 +897,7 @@ drm_gpuva_remove(struct drm_gpuva *va)
>   	}
>   
>   	__drm_gpuva_remove(va);
> +	drm_gpuvm_put(va->vm);
>   }
>   EXPORT_SYMBOL_GPL(drm_gpuva_remove);
>   
> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> index 54be12c1272f..cb2f06565c46 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
> @@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
>   	}
>   }
>   
> +static void
> +nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
> +{
> +	struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
> +
> +	kfree(uvmm);
> +}
> +
> +static const struct drm_gpuvm_ops gpuvm_ops = {
> +	.vm_free = nouveau_uvmm_free,
> +};
> +
>   int
>   nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
>   			   void *data,
> @@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
>   		       NOUVEAU_VA_SPACE_END,
>   		       init->kernel_managed_addr,
>   		       init->kernel_managed_size,
> -		       NULL);
> +		       &gpuvm_ops);
>   	/* GPUVM takes care from here on. */
>   	drm_gem_object_put(r_obj);
>   
> @@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
>   	return 0;
>   
>   out_gpuvm_fini:
> -	drm_gpuvm_destroy(&uvmm->base);
> -	kfree(uvmm);
> +	drm_gpuvm_put(&uvmm->base);
>   out_unlock:
>   	mutex_unlock(&cli->mutex);
>   	return ret;
> @@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
>   
>   	mutex_lock(&cli->mutex);
>   	nouveau_vmm_fini(&uvmm->vmm);
> -	drm_gpuvm_destroy(&uvmm->base);
> -	kfree(uvmm);
> +	drm_gpuvm_put(&uvmm->base);
>   	mutex_unlock(&cli->mutex);
>   }
> diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
> index 0c2e24155a93..4e6e1fd3485a 100644
> --- a/include/drm/drm_gpuvm.h
> +++ b/include/drm/drm_gpuvm.h
> @@ -247,6 +247,11 @@ struct drm_gpuvm {
>   		struct list_head list;
>   	} rb;
>   
> +	/**
> +	 * @kref: reference count of this object
> +	 */
> +	struct kref kref;
> +
>   	/**
>   	 * @kernel_alloc_node:
>   	 *
> @@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
>   		    u64 start_offset, u64 range,
>   		    u64 reserve_offset, u64 reserve_range,
>   		    const struct drm_gpuvm_ops *ops);
> -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
> +
> +/**
> + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
> + * @gpuvm: the &drm_gpuvm to acquire the reference of
> + *
> + * This function acquires an additional reference to @gpuvm. It is illegal to
> + * call this without already holding a reference. No locks required.
> + */
> +static inline struct drm_gpuvm *
> +drm_gpuvm_get(struct drm_gpuvm *gpuvm)
> +{
> +	kref_get(&gpuvm->kref);
> +
> +	return gpuvm;
> +}
> +
> +void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
>   
>   bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
>   bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
> @@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
>    * operations to drivers.
>    */
>   struct drm_gpuvm_ops {
> +	/**
> +	 * @vm_free: called when the last reference of a struct drm_gpuvm is
> +	 * dropped
> +	 *
> +	 * This callback is mandatory.
> +	 */
> +	void (*vm_free)(struct drm_gpuvm *gpuvm);
> +
>   	/**
>   	 * @op_alloc: called when the &drm_gpuvm allocates
>   	 * a struct drm_gpuva_op

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ