[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aPpXDLxcRq-TZMxL@yury>
Date: Thu, 23 Oct 2025 12:25:48 -0400
From: Yury Norov <yury.norov@...il.com>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Gabriele Monaco <gmonaco@...hat.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Michael Jeanson <mjeanson@...icios.com>,
Jens Axboe <axboe@...nel.dk>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Gautham R. Shenoy" <gautham.shenoy@....com>,
Florian Weimer <fweimer@...hat.com>,
Tim Chen <tim.c.chen@...el.com>
Subject: Re: [patch V2 09/20] cpumask: Cache num_possible_cpus()
On Wed, Oct 22, 2025 at 02:55:30PM +0200, Thomas Gleixner wrote:
> Reevaluating num_possible_cpus() over and over does not make sense. That
> becomes a constant after init as cpu_possible_mask is marked ro_after_init.
>
> Cache the value during initialization and provide that for consumption.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> Cc: Yury Norov <yury.norov@...il.com>
> ---
> V2: New patch
> ---
> include/linux/cpumask.h | 10 ++++++++--
> kernel/cpu.c | 15 +++++++++++++++
> 2 files changed, 23 insertions(+), 2 deletions(-)
>
> --- a/include/linux/cpumask.h
> +++ b/include/linux/cpumask.h
> @@ -126,6 +126,7 @@ extern struct cpumask __cpu_dying_mask;
> #define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
>
> extern atomic_t __num_online_cpus;
> +extern unsigned int __num_possible_cpus;
>
> extern cpumask_t cpus_booted_once_mask;
>
> @@ -1152,13 +1153,13 @@ void init_cpu_possible(const struct cpum
> #define __assign_cpu(cpu, mask, val) \
> __assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
>
> -#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
> #define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
> #define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
> #define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
> #define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
>
> void set_cpu_online(unsigned int cpu, bool online);
> +void set_cpu_possible(unsigned int cpu, bool possible);
>
> /**
> * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
> @@ -1211,7 +1212,12 @@ static __always_inline unsigned int num_
> {
> return raw_atomic_read(&__num_online_cpus);
> }
> -#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
> +
> +static __always_inline unsigned int num_possible_cpus(void)
> +{
> + return __num_possible_cpus;
> +}
> +
> #define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
> #define num_present_cpus() cpumask_weight(cpu_present_mask)
> #define num_active_cpus() cpumask_weight(cpu_active_mask)
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -3108,6 +3108,9 @@ EXPORT_SYMBOL(__cpu_dying_mask);
> atomic_t __num_online_cpus __read_mostly;
> EXPORT_SYMBOL(__num_online_cpus);
>
> +unsigned int __num_possible_cpus __ro_after_init = NR_CPUS;
> +EXPORT_SYMBOL(__num_possible_cpus);
> +
> void init_cpu_present(const struct cpumask *src)
> {
> cpumask_copy(&__cpu_present_mask, src);
> @@ -3116,6 +3119,7 @@ void init_cpu_present(const struct cpuma
> void init_cpu_possible(const struct cpumask *src)
> {
> cpumask_copy(&__cpu_possible_mask, src);
> + __num_possible_cpus = cpumask_weight(&__cpu_possible_mask);
> }
>
> void set_cpu_online(unsigned int cpu, bool online)
> @@ -3139,6 +3143,17 @@ void set_cpu_online(unsigned int cpu, bo
> }
> }
>
> +void set_cpu_possible(unsigned int cpu, bool possible)
> +{
> + if (possible) {
> + if (!cpumask_test_and_set_cpu(cpu, &__cpu_possible_mask))
> + __num_possible_cpus++;
> + } else {
> + if (cpumask_test_and_clear_cpu(cpu, &__cpu_possible_mask))
> + __num_possible_cpus--;
> + }
> +}
You can save a couple conditionals:
if (possible)
__num_possible_cpus += !cpumask_test_and_set_cpu(cpu, &__cpu_possible_mask));
else
__num_possible_cpus -= cpumask_test_and_clear_cpu(cpu, &__cpu_possible_mask));
Otherwise,
Reviewed-by: Yury Norov (NVIDIA) <yury.norov@...il.com>
Powered by blists - more mailing lists