[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240806085320.63514-4-yangyicong@huawei.com>
Date: Tue, 6 Aug 2024 16:53:19 +0800
From: Yicong Yang <yangyicong@...wei.com>
To: <catalin.marinas@....com>, <will@...nel.org>, <sudeep.holla@....com>,
<tglx@...utronix.de>, <peterz@...radead.org>, <mpe@...erman.id.au>,
<linux-arm-kernel@...ts.infradead.org>, <mingo@...hat.com>, <bp@...en8.de>,
<dave.hansen@...ux.intel.com>
CC: <linuxppc-dev@...ts.ozlabs.org>, <x86@...nel.org>,
<linux-kernel@...r.kernel.org>, <dietmar.eggemann@....com>,
<gregkh@...uxfoundation.org>, <rafael@...nel.org>,
<jonathan.cameron@...wei.com>, <prime.zeng@...ilicon.com>,
<linuxarm@...wei.com>, <yangyicong@...ilicon.com>, <xuwei5@...wei.com>,
<guohanjun@...wei.com>
Subject: [PATCH v5 3/4] arm64: topology: Support SMT control on ACPI based system
From: Yicong Yang <yangyicong@...ilicon.com>
For ACPI we'll build the topology from PPTT and we cannot directly
get the SMT number of each core. Instead using a temporary xarray
to record the SMT number of each core when building the topology
and we can know the largest SMT number in the system. Then we can
enable the support of SMT control.
Signed-off-by: Yicong Yang <yangyicong@...ilicon.com>
---
arch/arm64/kernel/topology.c | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 1a2c72f3e7f8..f72e1e55b05e 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -15,8 +15,10 @@
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/xarray.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -43,11 +45,16 @@ static bool __init acpi_cpu_is_threaded(int cpu)
*/
int __init parse_acpi_topology(void)
{
+ int thread_num, max_smt_thread_num = 1;
+ struct xarray core_threads;
int cpu, topology_id;
+ void *entry;
if (acpi_disabled)
return 0;
+ xa_init(&core_threads);
+
for_each_possible_cpu(cpu) {
topology_id = find_acpi_cpu_topology(cpu, 0);
if (topology_id < 0)
@@ -57,6 +64,20 @@ int __init parse_acpi_topology(void)
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
+
+ entry = xa_load(&core_threads, topology_id);
+ if (!entry) {
+ xa_store(&core_threads, topology_id,
+ xa_mk_value(1), GFP_KERNEL);
+ } else {
+ thread_num = xa_to_value(entry);
+ thread_num++;
+ xa_store(&core_threads, topology_id,
+ xa_mk_value(thread_num), GFP_KERNEL);
+
+ if (thread_num > max_smt_thread_num)
+ max_smt_thread_num = thread_num;
+ }
} else {
cpu_topology[cpu].thread_id = -1;
cpu_topology[cpu].core_id = topology_id;
@@ -67,6 +88,9 @@ int __init parse_acpi_topology(void)
cpu_topology[cpu].package_id = topology_id;
}
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
+ xa_destroy(&core_threads);
return 0;
}
#endif
--
2.24.0
Powered by blists - more mailing lists