[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251118080656.2012805-3-maobibo@loongson.cn>
Date: Tue, 18 Nov 2025 16:06:55 +0800
From: Bibo Mao <maobibo@...ngson.cn>
To: Paolo Bonzini <pbonzini@...hat.com>,
Huacai Chen <chenhuacai@...nel.org>,
WANG Xuerui <kernel@...0n.name>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
Waiman Long <longman@...hat.com>,
Juergen Gross <jgross@...e.com>,
Ajay Kaher <ajay.kaher@...adcom.com>,
Alexey Makhalov <alexey.makhalov@...adcom.com>,
Broadcom internal kernel review list <bcm-kernel-feedback-list@...adcom.com>
Cc: kvm@...r.kernel.org,
loongarch@...ts.linux.dev,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux.dev,
x86@...nel.org
Subject: [PATCH 2/3] LoongArch: Add paravirt support with vcpu_is_preempted()
Function vcpu_is_preempted() is used to check whether vCPU is preempted
or not. Here add implementation with vcpu_is_preempted() when option
CONFIG_PARAVIRT is enabled.
Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
---
arch/loongarch/include/asm/smp.h | 1 +
arch/loongarch/include/asm/spinlock.h | 5 +++++
arch/loongarch/kernel/paravirt.c | 16 ++++++++++++++++
arch/loongarch/kernel/smp.c | 6 ++++++
4 files changed, 28 insertions(+)
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 3a47f52959a8..5b37f7bf2060 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -18,6 +18,7 @@ struct smp_ops {
void (*init_ipi)(void);
void (*send_ipi_single)(int cpu, unsigned int action);
void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
+ bool (*vcpu_is_preempted)(int cpu);
};
extern struct smp_ops mp_ops;
diff --git a/arch/loongarch/include/asm/spinlock.h b/arch/loongarch/include/asm/spinlock.h
index 7cb3476999be..c001cef893aa 100644
--- a/arch/loongarch/include/asm/spinlock.h
+++ b/arch/loongarch/include/asm/spinlock.h
@@ -5,6 +5,11 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
+#ifdef CONFIG_PARAVIRT
+#define vcpu_is_preempted vcpu_is_preempted
+bool vcpu_is_preempted(int cpu);
+#endif
+
#include <asm/processor.h>
#include <asm/qspinlock.h>
#include <asm/qrwlock.h>
diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
index b1b51f920b23..b99404b6b13f 100644
--- a/arch/loongarch/kernel/paravirt.c
+++ b/arch/loongarch/kernel/paravirt.c
@@ -52,6 +52,13 @@ static u64 paravt_steal_clock(int cpu)
#ifdef CONFIG_SMP
static struct smp_ops native_ops;
+static bool pv_vcpu_is_preempted(int cpu)
+{
+ struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+
+ return !!(src->preempted & KVM_VCPU_PREEMPTED);
+}
+
static void pv_send_ipi_single(int cpu, unsigned int action)
{
int min, old;
@@ -308,6 +315,9 @@ int __init pv_time_init(void)
pr_err("Failed to install cpu hotplug callbacks\n");
return r;
}
+
+ if (kvm_para_has_feature(KVM_FEATURE_PREEMPT_HINT))
+ mp_ops.vcpu_is_preempted = pv_vcpu_is_preempted;
#endif
static_call_update(pv_steal_clock, paravt_steal_clock);
@@ -332,3 +342,9 @@ int __init pv_spinlock_init(void)
return 0;
}
+
+bool notrace vcpu_is_preempted(int cpu)
+{
+ return mp_ops.vcpu_is_preempted(cpu);
+}
+EXPORT_SYMBOL(vcpu_is_preempted);
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 46036d98da75..f04192fedf8d 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -307,10 +307,16 @@ static void loongson_init_ipi(void)
panic("IPI IRQ request failed\n");
}
+static bool loongson_vcpu_is_preempted(int cpu)
+{
+ return false;
+}
+
struct smp_ops mp_ops = {
.init_ipi = loongson_init_ipi,
.send_ipi_single = loongson_send_ipi_single,
.send_ipi_mask = loongson_send_ipi_mask,
+ .vcpu_is_preempted = loongson_vcpu_is_preempted,
};
static void __init fdt_smp_setup(void)
--
2.39.3
Powered by blists - more mailing lists