[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <x5ziofhtmnqt22b5nlmnsomyc7qp4bvrhjrlnif6r3mw4xp3xm@ke75kmsh3i3w>
Date: Tue, 4 Feb 2025 09:40:32 -0600
From: Lucas De Marchi <lucas.demarchi@...el.com>
To: Maarten Lankhorst <dev@...khorst.se>
CC: <intel-xe@...ts.freedesktop.org>, <dri-devel@...ts.freedesktop.org>,
<linux-kernel@...r.kernel.org>, Ingo Molnar <mingo@...nel.org>, David Lechner
<dlechner@...libre.com>, Peter Zijlstra <peterz@...radead.org>, Will Deacon
<will@...nel.org>, Waiman Long <longman@...hat.com>, Boqun Feng
<boqun.feng@...il.com>
Subject: Re: [PATCH-resent-to-correct-ml 5/8] drm/xe/coredump: Use guard
helpers for xe_force_wake.
On Tue, Feb 04, 2025 at 02:22:34PM +0100, Maarten Lankhorst wrote:
>---
> drivers/gpu/drm/xe/xe_devcoredump.c | 36 ++++++++++++++---------------
> 1 file changed, 17 insertions(+), 19 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
>index 39fe485d20858..afe229fba8a9c 100644
>--- a/drivers/gpu/drm/xe/xe_devcoredump.c
>+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
>@@ -233,7 +233,6 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
> struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
> struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
> struct xe_device *xe = coredump_to_xe(coredump);
>- unsigned int fw_ref;
>
> /*
> * NB: Despite passing a GFP_ flags parameter here, more allocations are done
>@@ -247,12 +246,13 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
> xe_pm_runtime_get(xe);
>
> /* keep going if fw fails as we still want to save the memory and SW data */
>- fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
>- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
>- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
>- xe_vm_snapshot_capture_delayed(ss->vm);
>- xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
>- xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
>+ scoped_guard(xe_force_wake, gt_to_fw(ss->gt), XE_FORCEWXE_FORCEWAKE_ALLAKE_ALL) {
>+ if (!xe_force_wake_scope_has_domain(XE_FORCEWAKE_ALL))
>+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
not sure why we are emitting a xe_gt_info() to the kernel log and just
letting the 2 calls above add garbage to the devcoredump - whoever is
processing the devcoredump later may have no clue about that log
message.. but I'm also not seeing why we need XE_FORCEWAKE_ALL in those
calls. Aren't they just reading the memory?
Lucas De Marchi
>+
>+ xe_vm_snapshot_capture_delayed(ss->vm);
>+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
>+ }
>
> xe_pm_runtime_put(xe);
>
>@@ -277,7 +277,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
> u32 width_mask = (0x1 << q->width) - 1;
> const char *process_name = "no process";
>
>- unsigned int fw_ref;
> bool cookie;
> int i;
>
>@@ -305,20 +304,19 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
> }
>
> /* keep going if fw fails as we still want to save the memory and SW data */
>- fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
>-
>- ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
>- ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
>- ss->ge = xe_guc_exec_queue_snapshot_capture(q);
>- if (job)
>- ss->job = xe_sched_job_snapshot_capture(job);
>- ss->vm = xe_vm_snapshot_capture(q->vm);
>-
>- xe_engine_snapshot_capture_for_queue(q);
>+ scoped_guard(xe_force_wake, gt_to_fw(ss->gt), XE_FORCEWAKE_ALL) {
>+ ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
>+ ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
>+ ss->ge = xe_guc_exec_queue_snapshot_capture(q);
>+ if (job)
>+ ss->job = xe_sched_job_snapshot_capture(job);
>+ ss->vm = xe_vm_snapshot_capture(q->vm);
>+
>+ xe_engine_snapshot_capture_for_queue(q);
>+ }
>
> queue_work(system_unbound_wq, &ss->work);
>
>- xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
> dma_fence_end_signalling(cookie);
> }
>
>--
>2.47.1
>
Powered by blists - more mailing lists