[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAAhV-H4m0SqQ06eqnJi_LwFxuYSFY4Xi1FSD1dd1w3258NEHew@mail.gmail.com>
Date: Wed, 19 Nov 2025 15:36:39 +0800
From: Huacai Chen <chenhuacai@...nel.org>
To: Bibo Mao <maobibo@...ngson.cn>
Cc: Paolo Bonzini <pbonzini@...hat.com>, WANG Xuerui <kernel@...0n.name>,
Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>, Waiman Long <longman@...hat.com>,
Juergen Gross <jgross@...e.com>, Ajay Kaher <ajay.kaher@...adcom.com>,
Alexey Makhalov <alexey.makhalov@...adcom.com>,
Broadcom internal kernel review list <bcm-kernel-feedback-list@...adcom.com>, kvm@...r.kernel.org,
loongarch@...ts.linux.dev, linux-kernel@...r.kernel.org,
virtualization@...ts.linux.dev, x86@...nel.org
Subject: Re: [PATCH 2/3] LoongArch: Add paravirt support with vcpu_is_preempted()
On Wed, Nov 19, 2025 at 10:53 AM Bibo Mao <maobibo@...ngson.cn> wrote:
>
>
>
> On 2025/11/18 下午8:48, Huacai Chen wrote:
> > Hi, Bibo,
> >
> > On Tue, Nov 18, 2025 at 4:07 PM Bibo Mao <maobibo@...ngson.cn> wrote:
> >>
> >> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> >> or not. Here add implementation with vcpu_is_preempted() when option
> >> CONFIG_PARAVIRT is enabled.
> >>
> >> Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
> >> ---
> >> arch/loongarch/include/asm/smp.h | 1 +
> >> arch/loongarch/include/asm/spinlock.h | 5 +++++
> >> arch/loongarch/kernel/paravirt.c | 16 ++++++++++++++++
> >> arch/loongarch/kernel/smp.c | 6 ++++++
> >> 4 files changed, 28 insertions(+)
> >>
> >> diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
> >> index 3a47f52959a8..5b37f7bf2060 100644
> >> --- a/arch/loongarch/include/asm/smp.h
> >> +++ b/arch/loongarch/include/asm/smp.h
> >> @@ -18,6 +18,7 @@ struct smp_ops {
> >> void (*init_ipi)(void);
> >> void (*send_ipi_single)(int cpu, unsigned int action);
> >> void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
> >> + bool (*vcpu_is_preempted)(int cpu);
> >> };
> >> extern struct smp_ops mp_ops;
> >>
> >> diff --git a/arch/loongarch/include/asm/spinlock.h b/arch/loongarch/include/asm/spinlock.h
> >> index 7cb3476999be..c001cef893aa 100644
> >> --- a/arch/loongarch/include/asm/spinlock.h
> >> +++ b/arch/loongarch/include/asm/spinlock.h
> >> @@ -5,6 +5,11 @@
> >> #ifndef _ASM_SPINLOCK_H
> >> #define _ASM_SPINLOCK_H
> >>
> >> +#ifdef CONFIG_PARAVIRT
> >> +#define vcpu_is_preempted vcpu_is_preempted
> >> +bool vcpu_is_preempted(int cpu);
> >> +#endif
> > Maybe paravirt.h is a better place?
> how about put it in asm/qspinlock.h since it is included by header file
> asm/spinlock.h already?
qspinlock.h is better than spinlock.h
Huacai
>
> >
> >> +
> >> #include <asm/processor.h>
> >> #include <asm/qspinlock.h>
> >> #include <asm/qrwlock.h>
> >> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> >> index b1b51f920b23..b99404b6b13f 100644
> >> --- a/arch/loongarch/kernel/paravirt.c
> >> +++ b/arch/loongarch/kernel/paravirt.c
> >> @@ -52,6 +52,13 @@ static u64 paravt_steal_clock(int cpu)
> >> #ifdef CONFIG_SMP
> >> static struct smp_ops native_ops;
> >>
> >> +static bool pv_vcpu_is_preempted(int cpu)
> >> +{
> >> + struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
> >> +
> >> + return !!(src->preempted & KVM_VCPU_PREEMPTED);
> >> +}
> >> +
> >> static void pv_send_ipi_single(int cpu, unsigned int action)
> >> {
> >> int min, old;
> >> @@ -308,6 +315,9 @@ int __init pv_time_init(void)
> >> pr_err("Failed to install cpu hotplug callbacks\n");
> >> return r;
> >> }
> >> +
> >> + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT_HINT))
> >> + mp_ops.vcpu_is_preempted = pv_vcpu_is_preempted;
> >> #endif
> >>
> >> static_call_update(pv_steal_clock, paravt_steal_clock);
> >> @@ -332,3 +342,9 @@ int __init pv_spinlock_init(void)
> >>
> >> return 0;
> >> }
> >> +
> >> +bool notrace vcpu_is_preempted(int cpu)
> >> +{
> >> + return mp_ops.vcpu_is_preempted(cpu);
> >> +}
> >
> > We can simplify the whole patch like this, then we don't need to touch
> > smp.c, and we can merge Patch-2/3.
> >
> > +bool notrace vcpu_is_preempted(int cpu)
> > +{
> > + if (!kvm_para_has_feature(KVM_FEATURE_PREEMPT_HINT))
> > + return false;
> > + else {
> > + struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
> > + return !!(src->preempted & KVM_VCPU_PREEMPTED);
> > + }
> > +}
> > Huacai
> >
> >> +EXPORT_SYMBOL(vcpu_is_preempted);
> >> diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
> >> index 46036d98da75..f04192fedf8d 100644
> >> --- a/arch/loongarch/kernel/smp.c
> >> +++ b/arch/loongarch/kernel/smp.c
> >> @@ -307,10 +307,16 @@ static void loongson_init_ipi(void)
> >> panic("IPI IRQ request failed\n");
> >> }
> >>
> >> +static bool loongson_vcpu_is_preempted(int cpu)
> >> +{
> >> + return false;
> >> +}
> >> +
> >> struct smp_ops mp_ops = {
> >> .init_ipi = loongson_init_ipi,
> >> .send_ipi_single = loongson_send_ipi_single,
> >> .send_ipi_mask = loongson_send_ipi_mask,
> >> + .vcpu_is_preempted = loongson_vcpu_is_preempted,
> >> };
> >>
> >> static void __init fdt_smp_setup(void)
> >> --
> >> 2.39.3
> >>
> >>
>
>
Powered by blists - more mailing lists