lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240813210555.607641-6-daniel.almeida@collabora.com>
Date: Tue, 13 Aug 2024 18:05:47 -0300
From: Daniel Almeida <daniel.almeida@...labora.com>
To: liviu.dudau@....com,
	steven.price@....com,
	carsten.haitzler@....com,
	boris.brezillon@...labora.com,
	robh@...nel.org,
	faith.ekstrand@...labora.com
Cc: Daniel Almeida <daniel.almeida@...labora.com>,
	linux-kernel@...r.kernel.org
Subject: [PATCH v2 4/5] drm: panthor: add debugfs knob to dump successful jobs

It can be advantageous for userspace to have access to successful jobs.

Allow this as an opt-in through a debugfs file.

Note that the devcoredump infrastructure will discard new dumps if a
previous dump hasn't been read. A future patch will add support for
multi-job dumps which will work around this limitation.

Signed-off-by: Daniel Almeida <daniel.almeida@...labora.com>
---
 drivers/gpu/drm/panthor/panthor_sched.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index afd644c7d9f1..ea2696c1075a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -10,6 +10,7 @@
 
 #include <linux/build_bug.h>
 #include <linux/clk.h>
+#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-resv.h>
@@ -317,6 +318,9 @@ struct panthor_scheduler {
 		 */
 		struct list_head stopped_groups;
 	} reset;
+
+	/** @dump_successful_jobs: whether to dump successful jobs through coredumpv */
+	bool dump_successful_jobs;
 };
 
 /**
@@ -2946,6 +2950,16 @@ queue_run_job(struct drm_sched_job *sched_job)
 	queue->iface.input->extract = queue->iface.output->extract;
 	queue->iface.input->insert = job->ringbuf.end;
 
+	if (sched->dump_successful_jobs) {
+		struct panthor_core_dump_args core_dump_args = {
+			.ptdev = ptdev,
+			.group_vm = job->group->vm,
+			.group = job->group,
+		};
+
+		panthor_core_dump(&core_dump_args);
+	}
+
 	if (group->csg_id < 0) {
 		/* If the queue is blocked, we want to keep the timeout running, so we
 		 * can detect unbounded waits and kill the group when that happens.
@@ -3609,5 +3623,8 @@ void panthor_sched_debugfs_init(struct drm_minor *minor)
 	struct panthor_device *ptdev =
 		container_of(minor->dev, struct panthor_device, base);
 	struct panthor_scheduler *sched = ptdev->scheduler;
+
+	debugfs_create_bool("dump_successful_jobs", 0644, minor->debugfs_root,
+			    &sched->dump_successful_jobs);
 }
 #endif /* CONFIG_DEBUG_FS */
-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ