[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <afdf991d-0012-609b-b0e6-232cc8e9f3f0@codeaurora.org>
Date: Mon, 4 Oct 2021 21:48:03 +0530
From: Akhil P Oommen <akhilpo@...eaurora.org>
To: Rob Clark <robdclark@...il.com>, dri-devel@...ts.freedesktop.org
Cc: freedreno@...ts.freedesktop.org, linux-arm-msm@...r.kernel.org,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
Rob Clark <robdclark@...omium.org>,
Sean Paul <sean@...rly.run>, David Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel@...ll.ch>,
Jonathan Marek <jonathan@...ek.ca>,
Eric Anholt <eric@...olt.net>,
Jordan Crouse <jordan@...micpenguin.net>,
Viresh Kumar <viresh.kumar@...aro.org>,
Sai Prakash Ranjan <saiprakash.ranjan@...eaurora.org>,
Sharat Masetty <smasetty@...eaurora.org>,
Douglas Anderson <dianders@...omium.org>,
open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] drm/msm/a6xx: Serialize GMU communication
On 10/2/2021 1:02 AM, Rob Clark wrote:
> From: Rob Clark <robdclark@...omium.org>
>
> I've seen some crashes in our crash reporting that *look* like multiple
> threads stomping on each other while communicating with GMU. So wrap
> all those paths in a lock.
>
> Signed-off-by: Rob Clark <robdclark@...omium.org>
> ---
> drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 6 ++++
> drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 3 ++
> drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 40 +++++++++++++++++++++++----
> 3 files changed, 43 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> index a7c58018959f..8b73f70766a4 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> @@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
> u32 val;
> int request, ack;
>
> + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
> +
> if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
> return -EINVAL;
>
> @@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
> {
> int bit;
>
> + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
> +
> if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
> return;
>
> @@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
> if (!pdev)
> return -ENODEV;
>
> + mutex_init(&gmu->lock);
> +
> gmu->dev = &pdev->dev;
>
> of_dma_configure(gmu->dev, node, true);
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
> index 3c74f64e3126..84bd516f01e8 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
> @@ -44,6 +44,9 @@ struct a6xx_gmu_bo {
> struct a6xx_gmu {
> struct device *dev;
>
> + /* For serializing communication with the GMU: */
> + struct mutex lock;
> +
> struct msm_gem_address_space *aspace;
>
> void * __iomem mmio;
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> index f6a4dbef796b..bd7bdeff5d6f 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> @@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
> A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
> A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
>
> -static int a6xx_hw_init(struct msm_gpu *gpu)
> +static int hw_init(struct msm_gpu *gpu)
> {
> struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> @@ -1135,6 +1135,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
> return ret;
> }
>
> +static int a6xx_hw_init(struct msm_gpu *gpu)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> + int ret;
> +
> + mutex_lock(&a6xx_gpu->gmu.lock);
> + ret = hw_init(gpu);
> + mutex_unlock(&a6xx_gpu->gmu.lock);
> +
> + return ret;
> +}
> +
> static void a6xx_dump(struct msm_gpu *gpu)
> {
> DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
> @@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
>
> trace_msm_gpu_resume(0);
>
> + mutex_lock(&a6xx_gpu->gmu.lock);
> ret = a6xx_gmu_resume(a6xx_gpu);
> + mutex_unlock(&a6xx_gpu->gmu.lock);
> if (ret)
> return ret;
>
> @@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
>
> msm_devfreq_suspend(gpu);
>
> + mutex_lock(&a6xx_gpu->gmu.lock);
> ret = a6xx_gmu_stop(a6xx_gpu);
> + mutex_unlock(&a6xx_gpu->gmu.lock);
> if (ret)
> return ret;
>
> @@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
> {
> struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> - static DEFINE_MUTEX(perfcounter_oob);
>
> - mutex_lock(&perfcounter_oob);
> + mutex_lock(&a6xx_gpu->gmu.lock);
>
> /* Force the GPU power on so we can read this register */
> a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
>
> *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
> - REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
> + REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
>
> a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
> - mutex_unlock(&perfcounter_oob);
> +
> + mutex_unlock(&a6xx_gpu->gmu.lock);
> +
> return 0;
> }
>
> @@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
> return (unsigned long)busy_time;
> }
>
> +void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
> +{
> + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> +
> + mutex_lock(&a6xx_gpu->gmu.lock);
> + a6xx_gmu_set_freq(gpu, opp);
> + mutex_unlock(&a6xx_gpu->gmu.lock);
> +}
> +
> static struct msm_gem_address_space *
> a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
> {
> @@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = {
> #endif
> .gpu_busy = a6xx_gpu_busy,
> .gpu_get_freq = a6xx_gmu_get_freq,
> - .gpu_set_freq = a6xx_gmu_set_freq,
> + .gpu_set_freq = a6xx_gpu_set_freq,
> #if defined(CONFIG_DRM_MSM_GPU_STATE)
> .gpu_state_get = a6xx_gpu_state_get,
> .gpu_state_put = a6xx_gpu_state_put,
>
I think I overlooked this because every hw access is serialized in the
downstream driver.
Reviewed-by: Akhil P Oommen <akhilpo@...eaurora.org>
-Akhil
Powered by blists - more mailing lists