[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250826041319.1284-3-kprateek.nayak@amd.com>
Date: Tue, 26 Aug 2025 04:13:13 +0000
From: K Prateek Nayak <kprateek.nayak@....com>
To: Madhavan Srinivasan <maddy@...ux.ibm.com>, Michael Ellerman
<mpe@...erman.id.au>, Nicholas Piggin <npiggin@...il.com>, Christophe Leroy
<christophe.leroy@...roup.eu>, Heiko Carstens <hca@...ux.ibm.com>, "Vasily
Gorbik" <gor@...ux.ibm.com>, Alexander Gordeev <agordeev@...ux.ibm.com>,
Christian Borntraeger <borntraeger@...ux.ibm.com>, Sven Schnelle
<svens@...ux.ibm.com>, Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar
<mingo@...hat.com>, Borislav Petkov <bp@...en8.de>, Dave Hansen
<dave.hansen@...ux.intel.com>, <x86@...nel.org>, "H. Peter Anvin"
<hpa@...or.com>, Peter Zijlstra <peterz@...radead.org>, Juri Lelli
<juri.lelli@...hat.com>, Vincent Guittot <vincent.guittot@...aro.org>,
<linuxppc-dev@...ts.ozlabs.org>, <linux-kernel@...r.kernel.org>,
<linux-s390@...r.kernel.org>
CC: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt
<rostedt@...dmis.org>, Ben Segall <bsegall@...gle.com>, Mel Gorman
<mgorman@...e.de>, Valentin Schneider <vschneid@...hat.com>, K Prateek Nayak
<kprateek.nayak@....com>, <thomas.weissschuh@...utronix.de>, Li Chen
<chenl311@...natelecom.cn>, Bibo Mao <maobibo@...ngson.cn>, Mete Durlu
<meted@...ux.ibm.com>, Tobias Huschle <huschle@...ux.ibm.com>, "Easwar
Hariharan" <easwar.hariharan@...ux.microsoft.com>, Guo Weikang
<guoweikang.kernel@...il.com>, "Rafael J. Wysocki"
<rafael.j.wysocki@...el.com>, Brian Gerst <brgerst@...il.com>, Patryk Wlazlyn
<patryk.wlazlyn@...ux.intel.com>, Swapnil Sapkal <swapnil.sapkal@....com>,
"Yury Norov [NVIDIA]" <yury.norov@...il.com>, Sudeep Holla
<sudeep.holla@....com>, Jonathan Cameron <Jonathan.Cameron@...wei.com>,
Andrea Righi <arighi@...dia.com>, Yicong Yang <yangyicong@...ilicon.com>,
Ricardo Neri <ricardo.neri-calderon@...ux.intel.com>, Tim Chen
<tim.c.chen@...ux.intel.com>, Vinicius Costa Gomes <vinicius.gomes@...el.com>
Subject: [PATCH v7 2/8] powerpc/smp: Rename cpu_corgroup_* to cpu_corgrp_*
Rename cpu_corgroup_{map,mask} to cpu_corgrp_{map,mask} to free up the
cpu_corgroup_* namespace. cpu_corgroup_mask() will be added back in the
subsequent commit for CONFIG_SCHED_MC enablement.
No functional changes intended.
Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
arch/powerpc/kernel/smp.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 862f50c09539..4f48262658cc 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -87,7 +87,7 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
-static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
+static DEFINE_PER_CPU(cpumask_var_t, cpu_corgrp_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
@@ -1045,9 +1045,9 @@ static const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl,
}
#endif
-static struct cpumask *cpu_coregroup_mask(int cpu)
+static struct cpumask *cpu_corgrp_mask(int cpu)
{
- return per_cpu(cpu_coregroup_map, cpu);
+ return per_cpu(cpu_corgrp_map, cpu);
}
static bool has_coregroup_support(void)
@@ -1061,7 +1061,7 @@ static bool has_coregroup_support(void)
static const struct cpumask *cpu_mc_mask(struct sched_domain_topology_level *tl, int cpu)
{
- return cpu_coregroup_mask(cpu);
+ return cpu_corgrp_mask(cpu);
}
static const struct cpumask *cpu_pkg_mask(struct sched_domain_topology_level *tl, int cpu)
@@ -1124,7 +1124,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
if (has_coregroup_support())
- zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
+ zalloc_cpumask_var_node(&per_cpu(cpu_corgrp_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
#ifdef CONFIG_NUMA
@@ -1145,7 +1145,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (has_coregroup_support())
- cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
+ cpumask_set_cpu(boot_cpuid, cpu_corgrp_mask(boot_cpuid));
init_big_cores();
if (has_big_cores) {
@@ -1510,8 +1510,8 @@ static void remove_cpu_from_masks(int cpu)
set_cpus_unrelated(cpu, i, cpu_core_mask);
if (has_coregroup_support()) {
- for_each_cpu(i, cpu_coregroup_mask(cpu))
- set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
+ for_each_cpu(i, cpu_corgrp_mask(cpu))
+ set_cpus_unrelated(cpu, i, cpu_corgrp_mask);
}
}
#endif
@@ -1543,7 +1543,7 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
if (!*mask) {
/* Assume only siblings are part of this CPU's coregroup */
for_each_cpu(i, submask_fn(cpu))
- set_cpus_related(cpu, i, cpu_coregroup_mask);
+ set_cpus_related(cpu, i, cpu_corgrp_mask);
return;
}
@@ -1551,18 +1551,18 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
cpumask_and(*mask, cpu_online_mask, cpu_node_mask(cpu));
/* Update coregroup mask with all the CPUs that are part of submask */
- or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
+ or_cpumasks_related(cpu, cpu, submask_fn, cpu_corgrp_mask);
/* Skip all CPUs already part of coregroup mask */
- cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
+ cpumask_andnot(*mask, *mask, cpu_corgrp_mask(cpu));
for_each_cpu(i, *mask) {
/* Skip all CPUs not part of this coregroup */
if (coregroup_id == cpu_to_coregroup_id(i)) {
- or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
+ or_cpumasks_related(cpu, i, submask_fn, cpu_corgrp_mask);
cpumask_andnot(*mask, *mask, submask_fn(i));
} else {
- cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
+ cpumask_andnot(*mask, *mask, cpu_corgrp_mask(i));
}
}
}
--
2.34.1
Powered by blists - more mailing lists