[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a3f41155-3c32-40dd-b750-1023d94e2923@amd.com>
Date: Wed, 29 Oct 2025 14:05:57 +0100
From: Christian König <christian.koenig@....com>
To: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
Matthew Brost <matthew.brost@...el.com>, Danilo Krummrich <dakr@...nel.org>,
Philipp Stanner <phasta@...nel.org>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>,
Sumit Semwal <sumit.semwal@...aro.org>
Cc: Christian König <christian.koenig@....com>,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
linux-media@...r.kernel.org, linaro-mm-sig@...ts.linaro.org
Subject: Re: [PATCH v1] drm/sched: fix deadlock in
drm_sched_entity_kill_jobs_cb
On 10/29/25 10:11, Pierre-Eric Pelloux-Prayer wrote:
> https://gitlab.freedesktop.org/mesa/mesa/-/issues/13908 pointed out
> a possible deadlock:
>
> [ 1231.611031] Possible interrupt unsafe locking scenario:
>
> [ 1231.611033] CPU0 CPU1
> [ 1231.611034] ---- ----
> [ 1231.611035] lock(&xa->xa_lock#17);
> [ 1231.611038] local_irq_disable();
> [ 1231.611039] lock(&fence->lock);
> [ 1231.611041] lock(&xa->xa_lock#17);
> [ 1231.611044] <Interrupt>
> [ 1231.611045] lock(&fence->lock);
> [ 1231.611047]
> *** DEADLOCK ***
>
> My initial fix was to replace xa_erase by xa_erase_irq, but Christian
> pointed out that calling dma_fence_add_callback from a callback can
> also deadlock if the signalling fence and the one passed to
> dma_fence_add_callback share the same lock.
>
> To fix both issues, the code iterating on dependencies and re-arming them
> is moved out to drm_sched_entity_kill_jobs_work.
>
> Suggested-by: Christian König <christian.koenig@....com>
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>
Reviewed-by: Christian König <christian.koenig@....com>
> ---
> drivers/gpu/drm/scheduler/sched_entity.c | 34 +++++++++++++-----------
> 1 file changed, 19 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index c8e949f4a568..fe174a4857be 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -173,26 +173,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity)
> }
> EXPORT_SYMBOL(drm_sched_entity_error);
>
> +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> + struct dma_fence_cb *cb);
> +
> static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
> {
> struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
> -
> - drm_sched_fence_scheduled(job->s_fence, NULL);
> - drm_sched_fence_finished(job->s_fence, -ESRCH);
> - WARN_ON(job->s_fence->parent);
> - job->sched->ops->free_job(job);
> -}
> -
> -/* Signal the scheduler finished fence when the entity in question is killed. */
> -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> - struct dma_fence_cb *cb)
> -{
> - struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
> - finish_cb);
> + struct dma_fence *f;
> unsigned long index;
>
> - dma_fence_put(f);
> -
> /* Wait for all dependencies to avoid data corruptions */
> xa_for_each(&job->dependencies, index, f) {
> struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
> @@ -220,6 +209,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> dma_fence_put(f);
> }
>
> + drm_sched_fence_scheduled(job->s_fence, NULL);
> + drm_sched_fence_finished(job->s_fence, -ESRCH);
> + WARN_ON(job->s_fence->parent);
> + job->sched->ops->free_job(job);
> +}
> +
> +/* Signal the scheduler finished fence when the entity in question is killed. */
> +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> + struct dma_fence_cb *cb)
> +{
> + struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
> + finish_cb);
> +
> + dma_fence_put(f);
> +
> INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
> schedule_work(&job->work);
> }
Powered by blists - more mailing lists