[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220411215849.297838-3-robdclark@gmail.com>
Date: Mon, 11 Apr 2022 14:58:31 -0700
From: Rob Clark <robdclark@...il.com>
To: dri-devel@...ts.freedesktop.org
Cc: linux-arm-msm@...r.kernel.org, freedreno@...ts.freedesktop.org,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Dmitry Osipenko <dmitry.osipenko@...labora.com>,
Rob Clark <robdclark@...omium.org>,
Rob Clark <robdclark@...il.com>, Sean Paul <sean@...rly.run>,
Abhinav Kumar <quic_abhinavk@...cinc.com>,
David Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel@...ll.ch>,
Jordan Crouse <jordan@...micpenguin.net>,
Akhil P Oommen <quic_akhilpo@...cinc.com>,
Vladimir Lypak <vladimir.lypak@...il.com>,
Viresh Kumar <viresh.kumar@...aro.org>,
Jonathan Marek <jonathan@...ek.ca>,
Yangtao Li <tiny.windzz@...il.com>,
Emma Anholt <emma@...olt.net>,
Christian König <christian.koenig@....com>,
Dan Carpenter <dan.carpenter@...cle.com>,
linux-kernel@...r.kernel.org (open list)
Subject: [PATCH v4 02/10] drm/msm/gpu: Drop duplicate fence counter
From: Rob Clark <robdclark@...omium.org>
The ring seqno counter duplicates the fence-context last_fence counter.
They end up getting incremented in lock-step, on the same scheduler
thread, but the split just makes things less obvious.
Signed-off-by: Rob Clark <robdclark@...omium.org>
---
drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/adreno_gpu.c | 4 ++--
drivers/gpu/drm/msm/msm_gpu.c | 8 ++++----
drivers/gpu/drm/msm/msm_gpu.h | 2 +-
drivers/gpu/drm/msm/msm_ringbuffer.h | 1 -
6 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 407f50a15faa..d31aa87c6c8d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1235,7 +1235,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
return;
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- ring ? ring->id : -1, ring ? ring->seqno : 0,
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 83c31b2ad865..17de46fc4bf2 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1390,7 +1390,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- ring ? ring->id : -1, ring ? ring->seqno : 0,
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 45f2c6084aa7..6385ab06632f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -578,7 +578,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
state->ring[i].fence = gpu->rb[i]->memptrs->fence;
state->ring[i].iova = gpu->rb[i]->iova;
- state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
state->ring[i].wptr = get_wptr(gpu->rb[i]);
@@ -828,7 +828,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence,
- ring->seqno);
+ ring->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring));
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 747b89aa9d13..9480bdf875db 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -534,7 +534,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence_before(fence, ring->seqno)) {
+ } else if (fence_before(fence, ring->fctx->last_fence)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@@ -542,13 +542,13 @@ static void hangcheck_handler(struct timer_list *t)
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, ring->seqno);
+ gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (fence_after(ring->seqno, ring->hangcheck_fence))
+ if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -770,7 +770,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gpu_hw_init(gpu);
- submit->seqno = ++ring->seqno;
+ submit->seqno = submit->hw_fence->seqno;
msm_rd_dump_submit(priv->rd, submit, NULL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2c0203fd6ce3..e47a42b1244a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -291,7 +291,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
- if (fence_after(ring->seqno, ring->memptrs->fence))
+ if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
return true;
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index d8c63df4e9ca..2a5045abe46e 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -59,7 +59,6 @@ struct msm_ringbuffer {
spinlock_t submit_lock;
uint64_t iova;
- uint32_t seqno;
uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova;
--
2.35.1
Powered by blists - more mailing lists