[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230807135028.538458200@linutronix.de>
Date: Mon, 7 Aug 2023 15:53:36 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Tom Lendacky <thomas.lendacky@....com>,
Andrew Cooper <andrew.cooper3@...rix.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Huang Rui <ray.huang@....com>, Juergen Gross <jgross@...e.com>,
Dimitri Sivanich <dimitri.sivanich@....com>,
Michael Kelley <mikelley@...rosoft.com>,
Sohil Mehta <sohil.mehta@...el.com>,
K Prateek Nayak <kprateek.nayak@....com>,
Kan Liang <kan.liang@...ux.intel.com>,
Zhang Rui <rui.zhang@...el.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
Feng Tang <feng.tang@...el.com>,
Andy Shevchenko <andy@...radead.org>
Subject: [patch 40/53] x86/cpu/topology: Assign hotpluggable CPUIDs during
init
There is no point in assigning the CPU numbers during ACPI physical
hotplug. The number of possible hotplug CPUs is known when the possible map
is initialized, so the CPU numbers can be associated to the registered
non-present APIC IDs right there.
This allows to put more code into the __init section and makes the related
data __ro_after_init.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/kernel/cpu/topology.c | 28 +++++++++++++++++-----------
1 file changed, 17 insertions(+), 11 deletions(-)
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -45,7 +45,7 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_a
DECLARE_BITMAP(phys_cpu_present_map, MAX_LOCAL_APIC) __read_mostly;
/* Used for CPU number allocation and parallel CPU bringup */
-u32 cpuid_to_apicid[] __read_mostly = { [0 ... NR_CPUS - 1] = BAD_APICID, };
+u32 cpuid_to_apicid[] __ro_after_init = { [0 ... NR_CPUS - 1] = BAD_APICID, };
/* Bitmaps to mark registered APICs at each topology domain */
static struct { DECLARE_BITMAP(map, MAX_LOCAL_APIC); } apic_maps[TOPO_MAX_DOMAIN] __ro_after_init;
@@ -60,7 +60,7 @@ struct {
unsigned int nr_rejected_cpus;
u32 boot_cpu_apic_id;
u32 real_bsp_apic_id;
-} topo_info __read_mostly = {
+} topo_info __ro_after_init = {
.nr_assigned_cpus = 1,
.boot_cpu_apic_id = BAD_APICID,
.real_bsp_apic_id = BAD_APICID,
@@ -143,7 +143,7 @@ static int topo_lookup_cpuid(u32 apic_id
return -ENODEV;
}
-static int topo_assign_cpunr(u32 apic_id)
+static __init int topo_assign_cpunr(u32 apic_id)
{
int cpu = topo_lookup_cpuid(apic_id);
@@ -159,8 +159,6 @@ static void topo_set_cpuids(unsigned int
early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
#endif
- cpuid_to_apicid[cpu] = apic_id;
-
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
@@ -205,6 +203,8 @@ void __init topology_register_apic(u32 a
cpu = 0;
else
cpu = topo_assign_cpunr(apic_id);
+
+ cpuid_to_apicid[cpu] = apic_id;
topo_set_cpuids(cpu, apic_id, acpi_id);
} else {
topo_info.nr_disabled_cpus++;
@@ -247,12 +247,9 @@ int topology_hotplug_apic(u32 apic_id, u
return -ENODEV;
cpu = topo_lookup_cpuid(apic_id);
- if (cpu < 0) {
- if (topo_info.nr_assigned_cpus >= nr_cpu_ids)
- return -ENOSPC;
+ if (cpu < 0)
+ return -ENOSPC;
- cpu = topo_assign_cpunr(apic_id);
- }
set_bit(apic_id, phys_cpu_present_map);
topo_set_cpuids(cpu, apic_id, acpi_id);
return cpu;
@@ -352,6 +349,7 @@ void __init topology_init_possible_cpus(
unsigned int disabled = topo_info.nr_disabled_cpus;
unsigned int total = assigned + disabled;
unsigned int cpu, dom, allowed = 1;
+ u32 apicid;
if (!restrict_to_up()) {
if (total > 1)
@@ -386,8 +384,16 @@ void __init topology_init_possible_cpus(
init_cpu_present(cpumask_of(0));
init_cpu_possible(cpumask_of(0));
+ for (apicid = 0; disabled; disabled--, apicid++) {
+ apicid = find_next_andnot_bit(apic_maps[TOPO_SMT_DOMAIN].map, phys_cpu_present_map,
+ MAX_LOCAL_APIC, apicid);
+ if (apicid >= MAX_LOCAL_APIC)
+ break;
+ cpuid_to_apicid[topo_info.nr_assigned_cpus++] = apicid;
+ }
+
for (cpu = 0; cpu < allowed; cpu++) {
- u32 apicid = cpuid_to_apicid[cpu];
+ apicid = cpuid_to_apicid[cpu];
/*
* In case of a kdump() kernel, don't mark the real BSP in
Powered by blists - more mailing lists