[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CADnq5_PbJrJUiJ0h+ORZKoeRjP=xY70_UpKHdTE7uQUHpX1dkA@mail.gmail.com>
Date: Tue, 3 Jul 2018 16:45:02 -0400
From: Alex Deucher <alexdeucher@...il.com>
To: Eric Anholt <eric@...olt.net>
Cc: Maling list - DRI developers <dri-devel@...ts.freedesktop.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 1/4] drm/v3d: Delay the scheduler timeout if we're still
making progress.
On Tue, Jul 3, 2018 at 1:05 PM, Eric Anholt <eric@...olt.net> wrote:
> GTF-GLES2.gtf.GL.acos.acos_float_vert_xvary submits jobs that take 4
> seconds at maximum resolution, but we still want to reset quickly if a
> job is really hung. Sample the CL's current address and the return
> address (since we call into tile lists repeatedly) and if either has
> changed then assume we've made progress.
>
> Signed-off-by: Eric Anholt <eric@...olt.net>
> Cc: Lucas Stach <l.stach@...gutronix.de>
Series is:
Reviewed-by: Alex Deucher <alexander.deucher@....com>
> ---
> drivers/gpu/drm/v3d/v3d_drv.h | 2 ++
> drivers/gpu/drm/v3d/v3d_regs.h | 1 +
> drivers/gpu/drm/v3d/v3d_sched.c | 18 ++++++++++++++++++
> 3 files changed, 21 insertions(+)
>
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
> index f546e0ab9562..a5d96d823416 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.h
> +++ b/drivers/gpu/drm/v3d/v3d_drv.h
> @@ -189,6 +189,8 @@ struct v3d_job {
>
> /* GPU virtual addresses of the start/end of the CL job. */
> u32 start, end;
> +
> + u32 timedout_ctca, timedout_ctra;
> };
>
> struct v3d_exec_info {
> diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
> index fc13282dfc2f..854046565989 100644
> --- a/drivers/gpu/drm/v3d/v3d_regs.h
> +++ b/drivers/gpu/drm/v3d/v3d_regs.h
> @@ -222,6 +222,7 @@
> #define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
> #define V3D_CLE_CT0RA 0x00118
> #define V3D_CLE_CT1RA 0x0011c
> +#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n)
> #define V3D_CLE_CT0LC 0x00120
> #define V3D_CLE_CT1LC 0x00124
> #define V3D_CLE_CT0PC 0x00128
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 808bc901f567..00667c733dca 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
> struct v3d_job *job = to_v3d_job(sched_job);
> struct v3d_exec_info *exec = job->exec;
> struct v3d_dev *v3d = exec->v3d;
> + enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
> enum v3d_queue q;
> + u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
> + u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
> +
> + /* If the current address or return address have changed, then
> + * the GPU has probably made progress and we should delay the
> + * reset. This could fail if the GPU got in an infinite loop
> + * in the CL, but that is pretty unlikely outside of an i-g-t
> + * testcase.
> + */
> + if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
> + job->timedout_ctca = ctca;
> + job->timedout_ctra = ctra;
> +
> + schedule_delayed_work(&job->base.work_tdr,
> + job->base.sched->timeout);
> + return;
> + }
>
> mutex_lock(&v3d->reset_lock);
>
> --
> 2.18.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@...ts.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Powered by blists - more mailing lists