[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241014233758.994861-4-adrian.larumbe@collabora.com>
Date: Tue, 15 Oct 2024 00:31:39 +0100
From: Adrián Larumbe <adrian.larumbe@...labora.com>
To: Boris Brezillon <boris.brezillon@...labora.com>,
Rob Herring <robh@...nel.org>,
Steven Price <steven.price@....com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
Philipp Zabel <p.zabel@...gutronix.de>
Cc: kernel@...labora.com,
Adrián Larumbe <adrian.larumbe@...labora.com>,
dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 4/9] drm/panfrost: handle job hw submit errors
Avoid waiting for the DRM scheduler job timeout handler and let the DRM
scheduler core signal the error fence immediately instead when HW job
submission fails.
Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
---
drivers/gpu/drm/panfrost/panfrost_job.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index f0a4690bcdf9..52ec9dc2397c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -195,7 +195,7 @@ panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
return 1;
}
-static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+static int panfrost_job_hw_submit(struct panfrost_job *job, int js)
{
struct panfrost_device *pfdev = job->pfdev;
unsigned int subslot;
@@ -207,15 +207,15 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
ret = pm_runtime_get_sync(pfdev->base.dev);
if (ret < 0)
- return;
+ return ret;
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
- return;
+ return -EINVAL;
}
ret = panfrost_mmu_as_get(pfdev, job->mmu, &cfg);
if (ret)
- return;
+ return ret;
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
@@ -263,6 +263,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
job, js, subslot, jc_head, cfg & 0xf);
}
spin_unlock(&pfdev->js->job_lock);
+
+ return 0;
}
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
@@ -384,6 +386,7 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL;
+ int ret;
if (unlikely(job->base.s_fence->finished.error))
return NULL;
@@ -402,7 +405,11 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
dma_fence_put(job->done_fence);
job->done_fence = dma_fence_get(fence);
- panfrost_job_hw_submit(job, slot);
+ ret = panfrost_job_hw_submit(job, slot);
+ if (ret) {
+ dma_fence_put(job->done_fence);
+ return ERR_PTR(ret);
+ }
return fence;
}
--
2.46.2
Powered by blists - more mailing lists