[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20240813094831.678729-1-maobibo@loongson.cn>
Date: Tue, 13 Aug 2024 17:48:31 +0800
From: Bibo Mao <maobibo@...ngson.cn>
To: Huacai Chen <chenhuacai@...nel.org>
Cc: WANG Xuerui <kernel@...0n.name>,
loongarch@...ts.linux.dev,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux.dev
Subject: [PATCH] LoongArch: Fix AP booting issue in VM mode
If paravirt IPI is used, native IPI is still necessary for AP booting,
which is boot interface between OS and BIOS firmware, and the paravirt ipi
is used inside OS.
During AP boot stage, AP executes idle instruction and waits for interrupt
or SW events, if AP is woke up, it clears IPI interrupt and jumps to kernel
entry from HW mailbox. BP writes kernel entry address in the HW mailbox of
AP and sends IPI interrupt to notify AP.
Between BP writes HW mailbox and is ready to send IPI to AP, AP is woken
up by SW events and jumps to kernel entry, so ACTION_BOOT_CPU IPI
interrupt sent from BP will keep pending during AP booting. And native IPI
interrupt handler needs be registered so that it can clear pending native
IPI, else there will be endless IRQ handling during AP boot stage.
Here native ipi interrupt is initialized even if paravirt IPI is used.
Fixes: 74c16b2e2b0c ("LoongArch: KVM: Add PV IPI support on guest side")
Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
---
arch/loongarch/kernel/paravirt.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index 9c9b75b76f62..348920b25460 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -13,6 +13,9 @@ static int has_steal_clock;
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+#ifdef CONFIG_SMP
+static struct smp_ops old_ops;
+#endif
static u64 native_steal_clock(int cpu)
{
@@ -55,6 +58,11 @@ static void pv_send_ipi_single(int cpu, unsigned int action)
int min, old;
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ old_ops.send_ipi_single(cpu, action);
+ return;
+ }
+
old = atomic_fetch_or(BIT(action), &info->message);
if (old)
return;
@@ -71,6 +79,12 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
__uint128_t bitmap = 0;
irq_cpustat_t *info;
+ if (unlikely(action == ACTION_BOOT_CPU)) {
+ /* Use native IPI to boot AP */
+ old_ops.send_ipi_mask(mask, action);
+ return;
+ }
+
if (cpumask_empty(mask))
return;
@@ -141,6 +155,8 @@ static void pv_init_ipi(void)
{
int r, swi;
+ /* Init native ipi irq since AP booting uses it */
+ old_ops.init_ipi();
swi = get_percpu_irq(INT_SWI0);
if (swi < 0)
panic("SWI0 IRQ mapping failed\n");
@@ -179,6 +195,9 @@ int __init pv_ipi_init(void)
return 0;
#ifdef CONFIG_SMP
+ old_ops.init_ipi = mp_ops.init_ipi;
+ old_ops.send_ipi_single = mp_ops.send_ipi_single;
+ old_ops.send_ipi_mask = mp_ops.send_ipi_mask;
mp_ops.init_ipi = pv_init_ipi;
mp_ops.send_ipi_single = pv_send_ipi_single;
mp_ops.send_ipi_mask = pv_send_ipi_mask;
base-commit: 7c626ce4bae1ac14f60076d00eafe71af30450ba
--
2.39.3
Powered by blists - more mailing lists