lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a5d842e2-a877-14fa-77a3-2d991da105ee@arm.com>
Date:   Wed, 1 Feb 2023 10:49:22 +0000
From:   James Clark <james.clark@....com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     linux-perf-users@...r.kernel.org, ravi.bangoria@....com,
        syzbot+697196bc0265049822bd@...kaller.appspotmail.com,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...nel.org>,
        Namhyung Kim <namhyung@...nel.org>,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/1] perf: Fix warning from concurrent read/write of
 perf_event_pmu_context



On 31/01/2023 13:31, Peter Zijlstra wrote:
> On Fri, Jan 27, 2023 at 02:31:41PM +0000, James Clark wrote:
> 
>> @@ -4897,32 +4895,32 @@ static void free_epc_rcu(struct rcu_head *head)
>>  static void put_pmu_ctx(struct perf_event_pmu_context *epc)
>>  {
>>  	unsigned long flags;
>> +	struct perf_event_context *ctx = epc->ctx;
>>  
>> -	if (!atomic_dec_and_test(&epc->refcount))
>> +	/*
>> +	 * XXX
>> +	 *
>> +	 * lockdep_assert_held(&ctx->mutex);
>> +	 *
>> +	 * can't because of the call-site in _free_event()/put_event()
>> +	 * which isn't always called under ctx->mutex.
>> +	 */
>> +	raw_spin_lock_irqsave(&ctx->lock, flags);
>> +	if (!atomic_dec_and_test(&epc->refcount)) {
>> +		raw_spin_unlock_irqrestore(&ctx->lock, flags);
>>  		return;
>> +	}
> 
> This is a bit of an anti-pattern and better donw using dec_and_lock. As
> such, I'll fold the below into it.
> 

Yep that looks better. Thanks Peter.

> --- a/include/linux/spinlock.h
> +++ b/include/linux/spinlock.h
> @@ -476,6 +476,15 @@ extern int _atomic_dec_and_lock_irqsave(
>  #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
>  		__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
>  
> +extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
> +#define atomic_dec_and_raw_lock(atomic, lock) \
> +		__cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
> +
> +extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
> +					unsigned long *flags);
> +#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
> +		__cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
> +
>  int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
>  			     size_t max_size, unsigned int cpu_mult,
>  			     gfp_t gfp, const char *name,
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -4901,11 +4901,8 @@ static void put_pmu_ctx(struct perf_even
>  	 * can't because of the call-site in _free_event()/put_event()
>  	 * which isn't always called under ctx->mutex.
>  	 */
> -	raw_spin_lock_irqsave(&ctx->lock, flags);
> -	if (!atomic_dec_and_test(&epc->refcount)) {
> -		raw_spin_unlock_irqrestore(&ctx->lock, flags);
> +	if (!atomic_dec_and_raw_lock_irqsave(&epc->refcount, &ctx->lock, flags))
>  		return;
> -	}
>  
>  	WARN_ON_ONCE(list_empty(&epc->pmu_ctx_entry));
>  
> --- a/lib/dec_and_lock.c
> +++ b/lib/dec_and_lock.c
> @@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_
>  	return 0;
>  }
>  EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
> +
> +int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
> +{
> +	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
> +	if (atomic_add_unless(atomic, -1, 1))
> +		return 0;
> +
> +	/* Otherwise do it the slow way */
> +	raw_spin_lock(lock);
> +	if (atomic_dec_and_test(atomic))
> +		return 1;
> +	raw_spin_unlock(lock);
> +	return 0;
> +}
> +EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
> +
> +int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
> +				     unsigned long *flags)
> +{
> +	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
> +	if (atomic_add_unless(atomic, -1, 1))
> +		return 0;
> +
> +	/* Otherwise do it the slow way */
> +	raw_spin_lock_irqsave(lock, *flags);
> +	if (atomic_dec_and_test(atomic))
> +		return 1;
> +	raw_spin_unlock_irqrestore(lock, *flags);
> +	return 0;
> +}
> +EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ