lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220704143820.3071004-10-npiggin@gmail.com>
Date:   Tue,  5 Jul 2022 00:38:16 +1000
From:   Nicholas Piggin <npiggin@...il.com>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     Nicholas Piggin <npiggin@...il.com>,
        Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
        Waiman Long <longman@...hat.com>,
        Boqun Feng <boqun.feng@...il.com>, linux-kernel@...r.kernel.org
Subject: [PATCH 09/13] locking/qspinlock: rename __pv_init_lock_hash to pv_spinlocks_init

The caller should not have to be aware what the implementation
initialisation does.

Signed-off-by: Nicholas Piggin <npiggin@...il.com>
---
 arch/powerpc/include/asm/qspinlock.h | 7 -------
 arch/powerpc/include/asm/spinlock.h  | 2 +-
 arch/x86/hyperv/hv_spinlock.c        | 2 +-
 arch/x86/include/asm/qspinlock.h     | 1 -
 arch/x86/kernel/kvm.c                | 2 +-
 arch/x86/xen/spinlock.c              | 2 +-
 include/asm-generic/qspinlock.h      | 6 ++++++
 kernel/locking/qspinlock.c           | 2 +-
 8 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index dd231c756233..39c1c7f80579 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -53,13 +53,6 @@ static __always_inline void pv_kick(int cpu)
 	prod_cpu(cpu);
 }
 
-extern void __pv_init_lock_hash(void);
-
-static inline void pv_spinlocks_init(void)
-{
-	__pv_init_lock_hash();
-}
-
 #endif
 
 /*
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index bd75872a6334..7dafca8e3f02 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -13,7 +13,7 @@
 /* See include/linux/spinlock.h */
 #define smp_mb__after_spinlock()	smp_mb()
 
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
+#ifndef CONFIG_PPC_QUEUED_SPINLOCKS
 static inline void pv_spinlocks_init(void) { }
 #endif
 
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index 91cfe698bde0..c7b5c3211c79 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -76,7 +76,7 @@ void __init hv_init_spinlocks(void)
 	}
 	pr_info("PV spinlocks enabled\n");
 
-	__pv_init_lock_hash();
+	pv_spinlocks_init();
 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 	pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 	pv_ops.lock.wait = hv_qlock_wait;
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 603ad61e9dfe..9a03fcc1b2b7 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -29,7 +29,6 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo
 
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
-extern void __pv_init_lock_hash(void);
 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
 extern bool nopvspin;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 1a3658f7e6d9..98a2c4d3e91d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -1106,7 +1106,7 @@ void __init kvm_spinlock_init(void)
 
 	pr_info("PV spinlocks enabled\n");
 
-	__pv_init_lock_hash();
+	pv_spinlocks_init();
 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 	pv_ops.lock.queued_spin_unlock =
 		PV_CALLEE_SAVE(__pv_queued_spin_unlock);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 043c73dfd2c9..5145c4aec4ea 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -135,7 +135,7 @@ void __init xen_init_spinlocks(void)
 	}
 	printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
 
-	__pv_init_lock_hash();
+	pv_spinlocks_init();
 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 	pv_ops.lock.queued_spin_unlock =
 		PV_CALLEE_SAVE(__pv_queued_spin_unlock);
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 995513fa2690..e0fb29ee1adc 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -147,4 +147,10 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 #define arch_spin_trylock(l)		queued_spin_trylock(l)
 #define arch_spin_unlock(l)		queued_spin_unlock(l)
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void pv_spinlocks_init(void);
+#else
+static inline void pv_spinlocks_init(void) { }
+#endif
+
 #endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 412b83040bac..1d5b3443772c 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -403,7 +403,7 @@ static unsigned int pv_lock_hash_bits __read_mostly;
  * This function should be called from the paravirt spinlock initialization
  * routine.
  */
-void __init __pv_init_lock_hash(void)
+void __init pv_spinlocks_init(void)
 {
 	int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
 
-- 
2.35.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ