lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 23 Aug 2023 03:14:35 +0200
From:   Mateusz Guzik <mjguzik@...il.com>
To:     linux-kernel@...r.kernel.org
Cc:     dennis@...nel.org, tj@...nel.org, cl@...ux.com,
        akpm@...ux-foundation.org, shakeelb@...gle.com, linux-mm@...ck.org,
        Mateusz Guzik <mjguzik@...il.com>
Subject: Re: [PATCH v2 1/2] pcpcntr: add group allocation/free

On 8/22/23, Mateusz Guzik <mjguzik@...il.com> wrote:
> Allocations and frees are globally serialized on the pcpu lock (and the
> CPU hotplug lock if enabled, which is the case on Debian).
>
> At least one frequent consumer allocates 4 back-to-back counters (and
> frees them in the same manner), exacerbating the problem.
>
> While this does not fully remedy scalability issues, it is a step
> towards that goal and provides immediate relief.
>

I just found I'm going to have to send a v3 to handle !CONFIG_SMP.
Temporarily I can't even compile-test that, so for now I'm just asking
if this v2 looks fine (modulo the !smp problem).

Sorry for the spam.

> Signed-off-by: Mateusz Guzik <mjguzik@...il.com>
> ---
>  include/linux/percpu_counter.h | 20 ++++++++---
>  lib/percpu_counter.c           | 61 +++++++++++++++++++++++-----------
>  2 files changed, 57 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/percpu_counter.h
> b/include/linux/percpu_counter.h
> index 75b73c83bc9d..518a4088b964 100644
> --- a/include/linux/percpu_counter.h
> +++ b/include/linux/percpu_counter.h
> @@ -30,17 +30,27 @@ struct percpu_counter {
>
>  extern int percpu_counter_batch;
>
> -int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t
> gfp,
> -			  struct lock_class_key *key);
> +int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
> gfp_t gfp,
> +			  u32 nr_counters, struct lock_class_key *key);
>
> -#define percpu_counter_init(fbc, value, gfp)				\
> +#define percpu_counter_init_many(fbc, value, gfp, nr_counters)		\
>  	({								\
>  		static struct lock_class_key __key;			\
>  									\
> -		__percpu_counter_init(fbc, value, gfp, &__key);		\
> +		__percpu_counter_init_many(fbc, value, gfp, nr_counters,\
> +					   &__key);			\
>  	})
>
> -void percpu_counter_destroy(struct percpu_counter *fbc);
> +
> +#define percpu_counter_init(fbc, value, gfp)				\
> +	percpu_counter_init_many(fbc, value, gfp, 1)
> +
> +void percpu_counter_destroy_many(struct percpu_counter *fbc, u32
> nr_counters);
> +static inline void percpu_counter_destroy(struct percpu_counter *fbc)
> +{
> +	percpu_counter_destroy_many(fbc, 1);
> +}
> +
>  void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
>  void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
>  			      s32 batch);
> diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
> index 5004463c4f9f..9338b27f1cdd 100644
> --- a/lib/percpu_counter.c
> +++ b/lib/percpu_counter.c
> @@ -151,48 +151,71 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
>  }
>  EXPORT_SYMBOL(__percpu_counter_sum);
>
> -int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t
> gfp,
> -			  struct lock_class_key *key)
> +int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
> gfp_t gfp,
> +			  u32 nr_counters, struct lock_class_key *key)
>  {
>  	unsigned long flags __maybe_unused;
> -
> -	raw_spin_lock_init(&fbc->lock);
> -	lockdep_set_class(&fbc->lock, key);
> -	fbc->count = amount;
> -	fbc->counters = alloc_percpu_gfp(s32, gfp);
> -	if (!fbc->counters)
> +	size_t counter_size;
> +	s32 __percpu *counters;
> +	u32 i;
> +
> +	counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
> +	counters = __alloc_percpu_gfp(nr_counters * counter_size,
> +				      __alignof__(*counters), gfp);
> +	if (!counters) {
> +		fbc[0].counters = NULL;
>  		return -ENOMEM;
> +	}
>
> -	debug_percpu_counter_activate(fbc);
> +	for (i = 0; i < nr_counters; i++) {
> +		raw_spin_lock_init(&fbc[i].lock);
> +		lockdep_set_class(&fbc[i].lock, key);
> +#ifdef CONFIG_HOTPLUG_CPU
> +		INIT_LIST_HEAD(&fbc[i].list);
> +#endif
> +		fbc[i].count = amount;
> +		fbc[i].counters = (void *)counters + (i * counter_size);
> +
> +		debug_percpu_counter_activate(&fbc[i]);
> +	}
>
>  #ifdef CONFIG_HOTPLUG_CPU
> -	INIT_LIST_HEAD(&fbc->list);
>  	spin_lock_irqsave(&percpu_counters_lock, flags);
> -	list_add(&fbc->list, &percpu_counters);
> +	for (i = 0; i < nr_counters; i++)
> +		list_add(&fbc[i].list, &percpu_counters);
>  	spin_unlock_irqrestore(&percpu_counters_lock, flags);
>  #endif
>  	return 0;
>  }
> -EXPORT_SYMBOL(__percpu_counter_init);
> +EXPORT_SYMBOL(__percpu_counter_init_many);
>
> -void percpu_counter_destroy(struct percpu_counter *fbc)
> +void percpu_counter_destroy_many(struct percpu_counter *fbc, u32
> nr_counters)
>  {
>  	unsigned long flags __maybe_unused;
> +	u32 i;
> +
> +	if (WARN_ON_ONCE(!fbc))
> +		return;
>
> -	if (!fbc->counters)
> +	if (!fbc[0].counters)
>  		return;
>
> -	debug_percpu_counter_deactivate(fbc);
> +	for (i = 0; i < nr_counters; i++)
> +		debug_percpu_counter_deactivate(&fbc[i]);
>
>  #ifdef CONFIG_HOTPLUG_CPU
>  	spin_lock_irqsave(&percpu_counters_lock, flags);
> -	list_del(&fbc->list);
> +	for (i = 0; i < nr_counters; i++)
> +		list_del(&fbc[i].list);
>  	spin_unlock_irqrestore(&percpu_counters_lock, flags);
>  #endif
> -	free_percpu(fbc->counters);
> -	fbc->counters = NULL;
> +
> +	free_percpu(fbc[0].counters);
> +
> +	for (i = 0; i < nr_counters; i++)
> +		fbc[i].counters = NULL;
>  }
> -EXPORT_SYMBOL(percpu_counter_destroy);
> +EXPORT_SYMBOL(percpu_counter_destroy_many);
>
>  int percpu_counter_batch __read_mostly = 32;
>  EXPORT_SYMBOL(percpu_counter_batch);
> --
> 2.39.2
>
>


-- 
Mateusz Guzik <mjguzik gmail.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ