[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <6218ded2-dd8b-4761-b4f3-975107a1f7c4@arm.com>
Date: Fri, 28 Feb 2025 16:53:17 +0000
From: Steven Price <steven.price@....com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>,
Boris Brezillon <boris.brezillon@...labora.com>,
Liviu Dudau <liviu.dudau@....com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>
Cc: kernel@...labora.com, dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 1/2] drm/panthor: Replace sleep locks with spinlocks in
fdinfo path
On 27/02/2025 23:16, Adrián Larumbe wrote:
> Commit 0590c94c3596 ("drm/panthor: Fix race condition when gathering fdinfo
> group samples") introduced an xarray lock to deal with potential
> use-after-free errors when accessing groups fdinfo figures. However, this
> toggles the kernel's atomic context status, so the next nested mutex lock
> will raise a warning when the kernel is compiled with mutex debug options:
>
> CONFIG_DEBUG_RT_MUTEXES=y
> CONFIG_DEBUG_MUTEXES=y
>
> Replace Panthor's group fdinfo data mutex with a guarded spinlock.
>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
> 0590c94c3596 ("drm/panthor: Fix race condition when gathering fdinfo group samples")
Missing "Fixes:" prefix
> Reviewed-by: Liviu Dudau <liviu.dudau@....com>
> Reviewed-by: Boris Brezillon <boris.brezillon@...labora.com>
Otherwise
Reviewed-by: Steven Price <steven.price@....com>
> ---
> drivers/gpu/drm/panthor/panthor_sched.c | 26 ++++++++++++-------------
> 1 file changed, 12 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
> index 1a276db095ff..4d31d1967716 100644
> --- a/drivers/gpu/drm/panthor/panthor_sched.c
> +++ b/drivers/gpu/drm/panthor/panthor_sched.c
> @@ -9,6 +9,7 @@
> #include <drm/panthor_drm.h>
>
> #include <linux/build_bug.h>
> +#include <linux/cleanup.h>
> #include <linux/clk.h>
> #include <linux/delay.h>
> #include <linux/dma-mapping.h>
> @@ -631,10 +632,10 @@ struct panthor_group {
> struct panthor_gpu_usage data;
>
> /**
> - * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
> - * and job post-completion processing function
> + * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
> + * callback and job post-completion processing function
> */
> - struct mutex lock;
> + spinlock_t lock;
>
> /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
> size_t kbo_sizes;
> @@ -910,8 +911,6 @@ static void group_release_work(struct work_struct *work)
> release_work);
> u32 i;
>
> - mutex_destroy(&group->fdinfo.lock);
> -
> for (i = 0; i < group->queue_count; i++)
> group_free_queue(group, group->queues[i]);
>
> @@ -2861,12 +2860,12 @@ static void update_fdinfo_stats(struct panthor_job *job)
> struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
> struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
>
> - mutex_lock(&group->fdinfo.lock);
> - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
> - fdinfo->cycles += data->cycles.after - data->cycles.before;
> - if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
> - fdinfo->time += data->time.after - data->time.before;
> - mutex_unlock(&group->fdinfo.lock);
> + scoped_guard(spinlock, &group->fdinfo.lock) {
> + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
> + fdinfo->cycles += data->cycles.after - data->cycles.before;
> + if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
> + fdinfo->time += data->time.after - data->time.before;
> + }
> }
>
> void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
> @@ -2880,12 +2879,11 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
>
> xa_lock(&gpool->xa);
> xa_for_each(&gpool->xa, i, group) {
> - mutex_lock(&group->fdinfo.lock);
> + guard(spinlock)(&group->fdinfo.lock);
> pfile->stats.cycles += group->fdinfo.data.cycles;
> pfile->stats.time += group->fdinfo.data.time;
> group->fdinfo.data.cycles = 0;
> group->fdinfo.data.time = 0;
> - mutex_unlock(&group->fdinfo.lock);
> }
> xa_unlock(&gpool->xa);
> }
> @@ -3537,7 +3535,7 @@ int panthor_group_create(struct panthor_file *pfile,
> mutex_unlock(&sched->reset.lock);
>
> add_group_kbo_sizes(group->ptdev, group);
> - mutex_init(&group->fdinfo.lock);
> + spin_lock_init(&group->fdinfo.lock);
>
> return gid;
>
Powered by blists - more mailing lists