[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c6d5c64b-11ea-453a-b7fe-4071dd11d7ea@ursulin.net>
Date: Thu, 20 Mar 2025 10:49:41 +0000
From: Tvrtko Ursulin <tursulin@...ulin.net>
To: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>,
Matthew Brost <matthew.brost@...el.com>, Danilo Krummrich <dakr@...nel.org>,
Philipp Stanner <phasta@...nel.org>,
Christian König <ckoenig.leichtzumerken@...il.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>
Cc: dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 07/10] drm/sched: cleanup event names
On 20/03/2025 09:58, Pierre-Eric Pelloux-Prayer wrote:
> All events now start with the same prefix (drm_sched_job_).
>
> drm_sched_job_wait_dep was misleading because it wasn't waiting
> at all. It's now replaced by trace_drm_sched_job_unschedulable,
> which is only traced if the job cannot be scheduled.
> For moot dependencies, nothing is traced.
>
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@....com>
> ---
> drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 8 ++++----
> drivers/gpu/drm/scheduler/sched_entity.c | 8 ++++----
> drivers/gpu/drm/scheduler/sched_main.c | 4 ++--
> 3 files changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> index 38cdd659a286..4ce53e493fef 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> @@ -63,17 +63,17 @@ DECLARE_EVENT_CLASS(drm_sched_job,
> __entry->job_count, __entry->hw_job_count, __entry->client_id)
> );
>
> -DEFINE_EVENT(drm_sched_job, drm_sched_job,
> +DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
> TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
> TP_ARGS(sched_job, entity)
> );
>
> -DEFINE_EVENT(drm_sched_job, drm_run_job,
> +DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
> TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
> TP_ARGS(sched_job, entity)
> );
>
> -TRACE_EVENT(drm_sched_process_job,
> +TRACE_EVENT(drm_sched_job_done,
> TP_PROTO(struct drm_sched_fence *fence),
> TP_ARGS(fence),
> TP_STRUCT__entry(
> @@ -112,7 +112,7 @@ TRACE_EVENT(drm_sched_job_add_dep,
> __entry->ctx, __entry->seqno)
> );
>
> -TRACE_EVENT(drm_sched_job_wait_dep,
> +TRACE_EVENT(drm_sched_job_unschedulable,
> TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
> TP_ARGS(sched_job, fence),
> TP_STRUCT__entry(
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index 047e42cfb129..0bad247f783e 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -470,10 +470,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>
> while ((entity->dependency =
> drm_sched_job_dependency(sched_job, entity))) {
> - trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
> -
> - if (drm_sched_entity_add_dependency_cb(entity))
> + if (drm_sched_entity_add_dependency_cb(entity)) {
> + trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
> return NULL;
> + }
> }
>
> /* skip jobs from entity that marked guilty */
> @@ -579,7 +579,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
> bool first;
> ktime_t submit_ts;
>
> - trace_drm_sched_job(sched_job, entity);
> + trace_drm_sched_job_queue(sched_job, entity);
>
> if (trace_drm_sched_job_add_dep_enabled()) {
> struct dma_fence *entry;
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 501eae13acb3..85c2111e5500 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -401,7 +401,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
> atomic_sub(s_job->credits, &sched->credit_count);
> atomic_dec(sched->score);
>
> - trace_drm_sched_process_job(s_fence);
> + trace_drm_sched_job_done(s_fence);
>
> dma_fence_get(&s_fence->finished);
> drm_sched_fence_finished(s_fence, result);
> @@ -1221,7 +1221,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
> atomic_add(sched_job->credits, &sched->credit_count);
> drm_sched_job_begin(sched_job);
>
> - trace_drm_run_job(sched_job, entity);
> + trace_drm_sched_job_run(sched_job, entity);
> fence = sched->ops->run_job(sched_job);
> complete_all(&entity->entity_idle);
> drm_sched_fence_scheduled(s_fence, fence);
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@...lia.com>
Regards,
Tvrtko
Powered by blists - more mailing lists