[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAAhV-H6D_XxGTgWjzO26JtBcNeaouqrzH1wTaCn7xK3HGtZ55w@mail.gmail.com>
Date: Sat, 6 Dec 2025 21:04:28 +0800
From: Huacai Chen <chenhuacai@...nel.org>
To: Bibo Mao <maobibo@...ngson.cn>
Cc: Paolo Bonzini <pbonzini@...hat.com>, WANG Xuerui <kernel@...0n.name>,
Juergen Gross <jgross@...e.com>, Ajay Kaher <ajay.kaher@...adcom.com>,
Alexey Makhalov <alexey.makhalov@...adcom.com>,
Broadcom internal kernel review list <bcm-kernel-feedback-list@...adcom.com>, kvm@...r.kernel.org,
loongarch@...ts.linux.dev, linux-kernel@...r.kernel.org,
virtualization@...ts.linux.dev, x86@...nel.org
Subject: Re: [PATCH v3 2/2] LoongArch: Add paravirt support with
vcpu_is_preempted() in guest side
Hi, Bibo,
On Tue, Dec 2, 2025 at 10:48 AM Bibo Mao <maobibo@...ngson.cn> wrote:
>
> Function vcpu_is_preempted() is used to check whether vCPU is preempted
> or not. Here add implementation with vcpu_is_preempted() when option
> CONFIG_PARAVIRT is enabled.
>
> Signed-off-by: Bibo Mao <maobibo@...ngson.cn>
> ---
> arch/loongarch/include/asm/qspinlock.h | 3 +++
> arch/loongarch/kernel/paravirt.c | 23 ++++++++++++++++++++++-
> 2 files changed, 25 insertions(+), 1 deletion(-)
>
> diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
> index e76d3aa1e1eb..fa3eaf7e48f2 100644
> --- a/arch/loongarch/include/asm/qspinlock.h
> +++ b/arch/loongarch/include/asm/qspinlock.h
> @@ -34,6 +34,9 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
> return true;
> }
>
> +#define vcpu_is_preempted vcpu_is_preempted
> +bool vcpu_is_preempted(int cpu);
> +
> #endif /* CONFIG_PARAVIRT */
>
> #include <asm-generic/qspinlock.h>
> diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c
> index b1b51f920b23..b61a93c6aec8 100644
> --- a/arch/loongarch/kernel/paravirt.c
> +++ b/arch/loongarch/kernel/paravirt.c
> @@ -246,6 +246,7 @@ static void pv_disable_steal_time(void)
> }
>
> #ifdef CONFIG_SMP
> +static DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
> static int pv_time_cpu_online(unsigned int cpu)
> {
> unsigned long flags;
> @@ -267,6 +268,18 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
>
> return 0;
> }
> +
> +bool notrace vcpu_is_preempted(int cpu)
> +{
> + struct kvm_steal_time *src;
> +
> + if (!static_branch_unlikely(&virt_preempt_key))
> + return false;
> +
> + src = &per_cpu(steal_time, cpu);
> + return !!(src->preempted & KVM_VCPU_PREEMPTED);
> +}
> +EXPORT_SYMBOL(vcpu_is_preempted);
> #endif
>
> static void pv_cpu_reboot(void *unused)
> @@ -308,6 +321,9 @@ int __init pv_time_init(void)
> pr_err("Failed to install cpu hotplug callbacks\n");
> return r;
> }
> +
> + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT))
> + static_branch_enable(&virt_preempt_key);
> #endif
>
> static_call_update(pv_steal_clock, paravt_steal_clock);
> @@ -318,7 +334,12 @@ int __init pv_time_init(void)
> static_key_slow_inc(¶virt_steal_rq_enabled);
> #endif
>
> - pr_info("Using paravirt steal-time\n");
> +#ifdef CONFIG_SMP
Linux kernel is removing non-SMP step by step [1].
https://kernelnewbies.org/Linux_6.17#Unconditionally_compile_task_scheduler_with_SMP_support
Though we cannot remove all "#ifdef CONFIG_SMP" at present, we can at
least stop adding more.
So I prefer to make this whole patch out of CONFIG_SMP. But if you
don't like this, you can at least move the virt_preempt_key
declaration out of "#ifdef CONFIG_SMP", then the #ifdefs here can be
removed.
Huacai
> + if (static_key_enabled(&virt_preempt_key))
> + pr_info("Using paravirt steal-time with preempt enabled\n");
> + else
> +#endif
> + pr_info("Using paravirt steal-time with preempt disabled\n");
>
> return 0;
> }
> --
> 2.39.3
>
>
Powered by blists - more mailing lists