[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230812195729.332293834@linutronix.de>
Date: Sat, 12 Aug 2023 21:59:24 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Borislav Petkov <bp@...en8.de>,
Ashok Raj <ashok.raj@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Nikolay Borisov <nik.borisov@...e.com>
Subject: [patch V2 31/37] x86/microcode: Replace the all in one rendevouz
handler
From: Thomas Gleixner <tglx@...utronix.de>
with a new handler which just separates the control flow of primary and
secondary CPUs.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/kernel/cpu/microcode/core.c | 51 ++++++-----------------------------
1 file changed, 9 insertions(+), 42 deletions(-)
---
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -337,7 +337,7 @@ struct ucode_ctrl {
};
static DEFINE_PER_CPU(struct ucode_ctrl, ucode_ctrl);
-static atomic_t late_cpus_in, late_cpus_out;
+static atomic_t late_cpus_in;
static bool wait_for_cpus(atomic_t *cnt)
{
@@ -371,7 +371,7 @@ static bool wait_for_ctrl(void)
return false;
}
-static __maybe_unused void ucode_load_secondary(unsigned int cpu)
+static void ucode_load_secondary(unsigned int cpu)
{
unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu);
enum ucode_state ret;
@@ -407,7 +407,7 @@ static __maybe_unused void ucode_load_se
this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
}
-static __maybe_unused void ucode_load_primary(unsigned int cpu)
+static void ucode_load_primary(unsigned int cpu)
{
struct cpumask *secondaries = topology_sibling_cpumask(cpu);
enum sibling_ctrl ctrl;
@@ -445,46 +445,14 @@ static __maybe_unused void ucode_load_pr
static int ucode_load_cpus_stopped(void *unused)
{
- int cpu = smp_processor_id();
- enum ucode_state ret;
-
- /*
- * Wait for all CPUs to arrive. A load will not be attempted unless all
- * CPUs show up.
- * */
- if (!wait_for_cpus(&late_cpus_in)) {
- this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
- return 0;
- }
-
- /*
- * On an SMT system, it suffices to load the microcode on one sibling of
- * the core because the microcode engine is shared between the threads.
- * Synchronization still needs to take place so that no concurrent
- * loading attempts happen on multiple threads of an SMT core. See
- * below.
- */
- if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
- goto wait_for_siblings;
+ unsigned int cpu = smp_processor_id();
- ret = microcode_ops->apply_microcode(cpu);
- this_cpu_write(ucode_ctrl.result, ret);
-
-wait_for_siblings:
- if (!wait_for_cpus(&late_cpus_out))
- panic("Timeout during microcode update!\n");
-
- /*
- * At least one thread has completed update on each core.
- * For others, simply call the update to make sure the
- * per-cpu cpuinfo can be updated with right microcode
- * revision.
- */
- if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
- return 0;
+ if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu)
+ ucode_load_primary(cpu);
+ else
+ ucode_load_secondary(cpu);
- ret = microcode_ops->apply_microcode(cpu);
- this_cpu_write(ucode_ctrl.result, ret);
+ /* No point to wait here. The CPUs will all wait in stop_machine(). */
return 0;
}
@@ -498,7 +466,6 @@ static int ucode_load_late_stop_cpus(voi
pr_err("You should switch to early loading, if possible.\n");
atomic_set(&late_cpus_in, num_online_cpus());
- atomic_set(&late_cpus_out, num_online_cpus());
/*
* Take a snapshot before the microcode update in order to compare and
Powered by blists - more mailing lists