[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1607533700-5546-3-git-send-email-ego@linux.vnet.ibm.com>
Date: Wed, 9 Dec 2020 22:38:17 +0530
From: "Gautham R. Shenoy" <ego@...ux.vnet.ibm.com>
To: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>,
Anton Blanchard <anton@...abs.org>,
Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>,
Michael Neuling <mikey@...ling.org>,
Nicholas Piggin <npiggin@...il.com>,
Nathan Lynch <nathanl@...ux.ibm.com>,
Peter Zijlstra <peterz@...radead.org>,
Valentin Schneider <valentin.schneider@....com>
Cc: linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org,
"Gautham R. Shenoy" <ego@...ux.vnet.ibm.com>
Subject: [PATCH v2 2/5] powerpc/smp: Rename cpu_l1_cache_map as thread_group_l1_cache_map
From: "Gautham R. Shenoy" <ego@...ux.vnet.ibm.com>
On platforms which have the "ibm,thread-groups" property, the per-cpu
variable cpu_l1_cache_map keeps a track of which group of threads
within the same core share the L1 cache, Instruction and Data flow.
This patch renames the variable to "thread_group_l1_cache_map" to make
it consistent with a subsequent patch which will introduce
thread_group_l2_cache_map.
This patch introduces no functional change.
Signed-off-by: Gautham R. Shenoy <ego@...ux.vnet.ibm.com>
---
arch/powerpc/kernel/smp.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 88d88ad..f3290d5 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -116,10 +116,10 @@ struct thread_groups_list {
static struct thread_groups_list tgl[NR_CPUS] __initdata;
/*
- * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
+ * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
* the set its siblings that share the L1-cache.
*/
-DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
+DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
@@ -866,7 +866,7 @@ static struct thread_groups *__init get_thread_groups(int cpu,
return tg;
}
-static int init_cpu_l1_cache_map(int cpu)
+static int init_thread_group_l1_cache_map(int cpu)
{
int first_thread = cpu_first_thread_sibling(cpu);
@@ -885,7 +885,7 @@ static int init_cpu_l1_cache_map(int cpu)
return -ENODATA;
}
- zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
+ zalloc_cpumask_var_node(&per_cpu(thread_group_l1_cache_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++) {
@@ -897,7 +897,7 @@ static int init_cpu_l1_cache_map(int cpu)
}
if (i_group_start == cpu_group_start)
- cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
+ cpumask_set_cpu(i, per_cpu(thread_group_l1_cache_map, cpu));
}
return 0;
@@ -976,7 +976,7 @@ static int init_big_cores(void)
int cpu;
for_each_possible_cpu(cpu) {
- int err = init_cpu_l1_cache_map(cpu);
+ int err = init_thread_group_l1_cache_map(cpu);
if (err)
return err;
@@ -1372,7 +1372,7 @@ static inline void add_cpu_to_smallcore_masks(int cpu)
cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
- for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) {
+ for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
if (cpu_online(i))
set_cpus_related(i, cpu, cpu_smallcore_mask);
}
--
1.9.4
Powered by blists - more mailing lists