lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 5 Apr 2018 14:33:48 +0200
From:   Christian König <christian.koenig@....com>
To:     Eric Anholt <eric@...olt.net>, dri-devel@...ts.freedesktop.org
Cc:     linux-kernel@...r.kernel.org, Lucas Stach <l.stach@...gutronix.de>,
        Alex Deucher <alexander.deucher@....com>
Subject: Re: [PATCH] drm/sched: Extend the documentation.

Am 05.04.2018 um 00:32 schrieb Eric Anholt:
> These comments answer all the questions I had for myself when
> implementing a driver using the GPU scheduler.
>
> Signed-off-by: Eric Anholt <eric@...olt.net>

Reviewed-by: Christian König <christian.koenig@....com>

Already pushed to amd-staging-drm-next as well.

Thanks,
Christian.

> ---
>   include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>   1 file changed, 42 insertions(+), 4 deletions(-)
>
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index dfd54fb94e10..c053a32341bf 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>   };
>   
>   /**
> - * A scheduler entity is a wrapper around a job queue or a group
> - * of other entities. Entities take turns emitting jobs from their
> - * job queues to corresponding hardware ring based on scheduling
> - * policy.
> + * drm_sched_entity - A wrapper around a job queue (typically attached
> + * to the DRM file_priv).
> + *
> + * Entities will emit jobs in order to their corresponding hardware
> + * ring, and the scheduler will alternate between entities based on
> + * scheduling policy.
>   */
>   struct drm_sched_entity {
>   	struct list_head		list;
> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>   
>   struct drm_sched_fence {
>   	struct dma_fence		scheduled;
> +
> +	/* This fence is what will be signaled by the scheduler when
> +	 * the job is completed.
> +	 *
> +	 * When setting up an out fence for the job, you should use
> +	 * this, since it's available immediately upon
> +	 * drm_sched_job_init(), and the fence returned by the driver
> +	 * from run_job() won't be created until the dependencies have
> +	 * resolved.
> +	 */
>   	struct dma_fence		finished;
> +
>   	struct dma_fence_cb		cb;
>   	struct dma_fence		*parent;
>   	struct drm_gpu_scheduler	*sched;
> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>   
>   struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>   
> +/**
> + * drm_sched_job - A job to be run by an entity.
> + *
> + * A job is created by the driver using drm_sched_job_init(), and
> + * should call drm_sched_entity_push_job() once it wants the scheduler
> + * to schedule the job.
> + */
>   struct drm_sched_job {
>   	struct spsc_node		queue_node;
>   	struct drm_gpu_scheduler	*sched;
> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>    * these functions should be implemented in driver side
>   */
>   struct drm_sched_backend_ops {
> +	/* Called when the scheduler is considering scheduling this
> +	 * job next, to get another struct dma_fence for this job to
> +	 * block on.  Once it returns NULL, run_job() may be called.
> +	 */
>   	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>   					struct drm_sched_entity *s_entity);
> +
> +	/* Called to execute the job once all of the dependencies have
> +	 * been resolved.  This may be called multiple times, if
> +	 * timedout_job() has happened and drm_sched_job_recovery()
> +	 * decides to try it again.
> +	 */
>   	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
> +
> +	/* Called when a job has taken too long to execute, to trigger
> +	 * GPU recovery.
> +	 */
>   	void (*timedout_job)(struct drm_sched_job *sched_job);
> +
> +	/* Called once the job's finished fence has been signaled and
> +	 * it's time to clean it up.
> +	 */
>   	void (*free_job)(struct drm_sched_job *sched_job);
>   };
>   

Powered by blists - more mailing lists