[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y9kYKBMBEa7j+O3n@hirez.programming.kicks-ass.net>
Date: Tue, 31 Jan 2023 14:31:20 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: James Clark <james.clark@....com>
Cc: linux-perf-users@...r.kernel.org, ravi.bangoria@....com,
syzbot+697196bc0265049822bd@...kaller.appspotmail.com,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/1] perf: Fix warning from concurrent read/write of
perf_event_pmu_context
On Fri, Jan 27, 2023 at 02:31:41PM +0000, James Clark wrote:
> @@ -4897,32 +4895,32 @@ static void free_epc_rcu(struct rcu_head *head)
> static void put_pmu_ctx(struct perf_event_pmu_context *epc)
> {
> unsigned long flags;
> + struct perf_event_context *ctx = epc->ctx;
>
> - if (!atomic_dec_and_test(&epc->refcount))
> + /*
> + * XXX
> + *
> + * lockdep_assert_held(&ctx->mutex);
> + *
> + * can't because of the call-site in _free_event()/put_event()
> + * which isn't always called under ctx->mutex.
> + */
> + raw_spin_lock_irqsave(&ctx->lock, flags);
> + if (!atomic_dec_and_test(&epc->refcount)) {
> + raw_spin_unlock_irqrestore(&ctx->lock, flags);
> return;
> + }
This is a bit of an anti-pattern and better donw using dec_and_lock. As
such, I'll fold the below into it.
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -476,6 +476,15 @@ extern int _atomic_dec_and_lock_irqsave(
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
+#define atomic_dec_and_raw_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+
+extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4901,11 +4901,8 @@ static void put_pmu_ctx(struct perf_even
* can't because of the call-site in _free_event()/put_event()
* which isn't always called under ctx->mutex.
*/
- raw_spin_lock_irqsave(&ctx->lock, flags);
- if (!atomic_dec_and_test(&epc->refcount)) {
- raw_spin_unlock_irqrestore(&ctx->lock, flags);
+ if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
return;
- }
WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
+
+int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock(lock);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+
+int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
Powered by blists - more mailing lists