[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <936703a3-15b6-4ade-b9e5-5554dffc4430@arm.com>
Date: Thu, 9 Oct 2025 16:17:21 +0100
From: Steven Price <steven.price@....com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>,
linux-kernel@...r.kernel.org
Cc: dri-devel@...ts.freedesktop.org,
Boris Brezillon <boris.brezillon@...labora.com>, kernel@...labora.com,
Rob Herring <robh@...nel.org>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>
Subject: Re: [PATCH v5 03/12] drm/panfrost: Handle job HW submit errors
On 07/10/2025 16:01, Adrián Larumbe wrote:
> Avoid waiting for the DRM scheduler job timedout handler, and instead, let
> the DRM scheduler core signal the error fence immediately when HW job
> submission fails.
>
> That means we must also decrement the runtime-PM refcnt for the device,
> because the job will never be enqueued or inflight.
>
> Reviewed-by: Boris Brezillon <boris.brezillon@...labora.com>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
Reviewed-by: Steven Price <steven.price@....com>
> ---
> drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++++++++++++------
> 1 file changed, 18 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index a0123d0a1b7d..ea3f2878fd15 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -196,7 +196,7 @@ panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
> return 1;
> }
>
> -static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
> +static int panfrost_job_hw_submit(struct panfrost_job *job, int js)
> {
> struct panfrost_device *pfdev = job->pfdev;
> unsigned int subslot;
> @@ -204,18 +204,19 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
> u64 jc_head = job->jc;
> int ret;
>
> - panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
> -
> ret = pm_runtime_get_sync(pfdev->base.dev);
> if (ret < 0)
> - return;
> + goto err_hwsubmit;
>
> if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
> - return;
> + ret = -EINVAL;
> + goto err_hwsubmit;
> }
>
> cfg = panfrost_mmu_as_get(pfdev, job->mmu);
>
> + panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
> +
> job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
> job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
>
> @@ -262,6 +263,12 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
> job, js, subslot, jc_head, cfg & 0xf);
> }
> spin_unlock(&pfdev->js->job_lock);
> +
> + return 0;
> +
> +err_hwsubmit:
> + pm_runtime_put_autosuspend(pfdev->base.dev);
> + return ret;
> }
>
> static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
> @@ -384,6 +391,7 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
> struct panfrost_device *pfdev = job->pfdev;
> int slot = panfrost_job_get_slot(job);
> struct dma_fence *fence = NULL;
> + int ret;
>
> if (job->ctx->destroyed)
> return ERR_PTR(-ECANCELED);
> @@ -405,7 +413,11 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
> dma_fence_put(job->done_fence);
> job->done_fence = dma_fence_get(fence);
>
> - panfrost_job_hw_submit(job, slot);
> + ret = panfrost_job_hw_submit(job, slot);
> + if (ret) {
> + dma_fence_put(fence);
> + return ERR_PTR(ret);
> + }
>
> return fence;
> }
Powered by blists - more mailing lists