[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250210124203.124191-1-adrian.larumbe@collabora.com>
Date: Mon, 10 Feb 2025 12:41:59 +0000
From: Adrián Larumbe <adrian.larumbe@...labora.com>
To: Boris Brezillon <boris.brezillon@...labora.com>,
Steven Price <steven.price@....com>,
Liviu Dudau <liviu.dudau@....com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
Adrián Larumbe <adrian.larumbe@...labora.com>
Cc: kernel@...labora.com,
dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 1/2] drm/panthor: Replace sleep locks with spinlocks in fdinfo path
Panthor's fdinfo handler is routed through the /proc file system, which
executes in an atomic context. That means we cannot use mutexes because
they might sleep.
This bug was uncovered by enabling some of the kernel's mutex-debugging
features:
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_MUTEXES=y
Replace Panthor's group fdinfo data mutex with a guarded spinlock.
Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
Fixes: e16635d88fa0 ("drm/panthor: add DRM fdinfo support")
---
drivers/gpu/drm/panthor/panthor_sched.c | 26 ++++++++++++-------------
1 file changed, 12 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 0b93bf83a9b2..7a267d1efeb6 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -9,6 +9,7 @@
#include <drm/panthor_drm.h>
#include <linux/build_bug.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -631,10 +632,10 @@ struct panthor_group {
struct panthor_gpu_usage data;
/**
- * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
- * and job post-completion processing function
+ * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
+ * callback and job post-completion processing function
*/
- struct mutex lock;
+ spinlock_t lock;
/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
size_t kbo_sizes;
@@ -910,8 +911,6 @@ static void group_release_work(struct work_struct *work)
release_work);
u32 i;
- mutex_destroy(&group->fdinfo.lock);
-
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);
@@ -2861,12 +2860,12 @@ static void update_fdinfo_stats(struct panthor_job *job)
struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
- mutex_lock(&group->fdinfo.lock);
- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
- fdinfo->cycles += data->cycles.after - data->cycles.before;
- if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
- fdinfo->time += data->time.after - data->time.before;
- mutex_unlock(&group->fdinfo.lock);
+ scoped_guard(spinlock, &group->fdinfo.lock) {
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ fdinfo->cycles += data->cycles.after - data->cycles.before;
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
+ fdinfo->time += data->time.after - data->time.before;
+ }
}
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
@@ -2880,12 +2879,11 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
xa_lock(&gpool->xa);
xa_for_each(&gpool->xa, i, group) {
- mutex_lock(&group->fdinfo.lock);
+ guard(spinlock)(&group->fdinfo.lock);
pfile->stats.cycles += group->fdinfo.data.cycles;
pfile->stats.time += group->fdinfo.data.time;
group->fdinfo.data.cycles = 0;
group->fdinfo.data.time = 0;
- mutex_unlock(&group->fdinfo.lock);
}
xa_unlock(&gpool->xa);
}
@@ -3531,7 +3529,7 @@ int panthor_group_create(struct panthor_file *pfile,
mutex_unlock(&sched->reset.lock);
add_group_kbo_sizes(group->ptdev, group);
- mutex_init(&group->fdinfo.lock);
+ spin_lock_init(&group->fdinfo.lock);
return gid;
base-commit: 2eca617f12586abff62038db1c14cb3aa60a15aa
prerequisite-patch-id: 7e787ce5973b5fc7e9f69f26aa4d7e5ec03d5caa
prerequisite-patch-id: 03a619b8c741444b28435850e23d9ec463171c13
prerequisite-patch-id: 290e1053f8bf4a8b80fb037a87cae7e096b5aa96
prerequisite-patch-id: bc49bb8c29905650fb4788acc528bb44013c0240
prerequisite-patch-id: 46cab4c980824c03e5164afc43085fec23e1cba5
--
2.47.1
Powered by blists - more mailing lists