[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250911063433.13783-15-jgross@suse.com>
Date: Thu, 11 Sep 2025 08:34:33 +0200
From: Juergen Gross <jgross@...e.com>
To: linux-kernel@...r.kernel.org,
x86@...nel.org,
virtualization@...ts.linux.dev
Cc: Juergen Gross <jgross@...e.com>,
Ajay Kaher <ajay.kaher@...adcom.com>,
Alexey Makhalov <alexey.makhalov@...adcom.com>,
Broadcom internal kernel review list <bcm-kernel-feedback-list@...adcom.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH 14/14] x86/pvlocks: move paravirt spinlock functions into qspinlock.h
Instead of having the pv spinlock function definitions in paravirt.h,
move them into qspinlock.h where the arch specific functions for
qspinlocks are defined already.
Signed-off-by: Juergen Gross <jgross@...e.com>
---
arch/x86/include/asm/paravirt.h | 58 --------------------------------
arch/x86/include/asm/qspinlock.h | 49 +++++++++++++++++++++++++--
2 files changed, 47 insertions(+), 60 deletions(-)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 13c0e4e65467..78097a7e4214 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -20,15 +20,6 @@ struct mm_struct;
#include <linux/cpumask.h>
#include <asm/frame.h>
-__visible void __native_queued_spin_unlock(struct qspinlock *lock);
-bool pv_is_native_spin_unlock(void);
-__visible bool __native_vcpu_is_preempted(long cpu);
-bool pv_is_native_vcpu_is_preempted(void);
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-void __init paravirt_set_cap(void);
-#endif
-
/* The paravirtualized I/O functions */
static inline void slow_down_io(void)
{
@@ -525,43 +516,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
}
#endif
-#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-
-static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
- u32 val)
-{
- PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
-}
-
-static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
-{
- PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
- "movb $0, (%%" _ASM_ARG1 ");",
- ALT_NOT(X86_FEATURE_PVUNLOCK));
-}
-
-static __always_inline void pv_wait(u8 *ptr, u8 val)
-{
- PVOP_VCALL2(lock.wait, ptr, val);
-}
-
-static __always_inline void pv_kick(int cpu)
-{
- PVOP_VCALL1(lock.kick, cpu);
-}
-
-static __always_inline bool pv_vcpu_is_preempted(long cpu)
-{
- return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
- "xor %%" _ASM_AX ", %%" _ASM_AX ";",
- ALT_NOT(X86_FEATURE_VCPUPREEMPT));
-}
-
-void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
-bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
-
-#endif /* SMP && PARAVIRT_SPINLOCKS */
-
#ifdef CONFIG_PARAVIRT_XXL
static __always_inline unsigned long arch_local_save_flags(void)
{
@@ -590,7 +544,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
#endif
extern void default_banner(void);
-void native_pv_lock_init(void) __init;
#else /* __ASSEMBLER__ */
@@ -615,12 +568,6 @@ void native_pv_lock_init(void) __init;
#endif /* __ASSEMBLER__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
-
-#ifndef __ASSEMBLER__
-static inline void native_pv_lock_init(void)
-{
-}
-#endif
#endif /* !CONFIG_PARAVIRT */
#ifndef __ASSEMBLER__
@@ -636,10 +583,5 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
}
#endif
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline void paravirt_set_cap(void)
-{
-}
-#endif
#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 68da67df304d..376f89aa1d4c 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -28,12 +28,33 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo
}
#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init paravirt_set_cap(void);
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
extern void __pv_init_lock_hash(void);
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
extern bool nopvspin;
+static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+ u32 val)
+{
+ PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+ PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
+ "movb $0, (%%" _ASM_ARG1 ");",
+ ALT_NOT(X86_FEATURE_PVUNLOCK));
+}
+
+static __always_inline bool pv_vcpu_is_preempted(long cpu)
+{
+ return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
+ "xor %%" _ASM_AX ", %%" _ASM_AX ";",
+ ALT_NOT(X86_FEATURE_VCPUPREEMPT));
+}
+
#define queued_spin_unlock queued_spin_unlock
/**
* queued_spin_unlock - release a queued spinlock
@@ -62,9 +83,31 @@ static inline bool vcpu_is_preempted(long cpu)
{
return pv_vcpu_is_preempted(cpu);
}
-#endif
+
+#else /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline void paravirt_set_cap(void) { }
+#endif /* !CONFIG_PARAVIRT_SPINLOCKS */
#ifdef CONFIG_PARAVIRT
+void __init native_pv_lock_init(void);
+__visible void __native_queued_spin_unlock(struct qspinlock *lock);
+bool pv_is_native_spin_unlock(void);
+__visible bool __native_vcpu_is_preempted(long cpu);
+bool pv_is_native_vcpu_is_preempted(void);
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ PVOP_VCALL2(lock.wait, ptr, val);
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+ PVOP_VCALL1(lock.kick, cpu);
+}
+
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+
/*
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
*
@@ -109,7 +152,9 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
-#endif /* CONFIG_PARAVIRT */
+#else /* CONFIG_PARAVIRT */
+static inline void native_pv_lock_init(void) { }
+#endif /* !CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
--
2.51.0
Powered by blists - more mailing lists