[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJF2gTQ0cmGJHPR=TzeDJDigiEgBL5-KabbR2WkS=dGJV7jSJA@mail.gmail.com>
Date: Mon, 4 Jul 2022 21:13:40 +0800
From: Guo Ren <guoren@...nel.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Palmer Dabbelt <palmer@...osinc.com>,
Arnd Bergmann <arnd@...db.de>, Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
linux-riscv <linux-riscv@...ts.infradead.org>,
linux-arch <linux-arch@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Guo Ren <guoren@...ux.alibaba.com>
Subject: Re: [PATCH V7 4/5] asm-generic: spinlock: Add combo spinlock (ticket
& queued)
On Mon, Jul 4, 2022 at 5:58 PM Peter Zijlstra <peterz@...radead.org> wrote:
>
> On Tue, Jun 28, 2022 at 04:17:06AM -0400, guoren@...nel.org wrote:
> > From: Guo Ren <guoren@...ux.alibaba.com>
> >
> > Some architecture has a flexible requirement on the type of spinlock.
> > Some LL/SC architectures of ISA don't force micro-arch to give a strong
> > forward guarantee. Thus different kinds of memory model micro-arch would
> > come out in one ISA. The ticket lock is suitable for exclusive monitor
> > designed LL/SC micro-arch with limited cores and "!NUMA". The
> > queue-spinlock could deal with NUMA/large-scale scenarios with a strong
> > forward guarantee designed LL/SC micro-arch.
> >
> > So, make the spinlock a combo with feature.
> >
> > Signed-off-by: Guo Ren <guoren@...ux.alibaba.com>
> > Signed-off-by: Guo Ren <guoren@...nel.org>
> > Cc: Peter Zijlstra (Intel) <peterz@...radead.org>
> > Cc: Arnd Bergmann <arnd@...db.de>
> > Cc: Palmer Dabbelt <palmer@...osinc.com>
> > ---
> > include/asm-generic/spinlock.h | 43 ++++++++++++++++++++++++++++++++--
> > kernel/locking/qspinlock.c | 2 ++
> > 2 files changed, 43 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
> > index f41dc7c2b900..a9b43089bf99 100644
> > --- a/include/asm-generic/spinlock.h
> > +++ b/include/asm-generic/spinlock.h
> > @@ -28,34 +28,73 @@
> > #define __ASM_GENERIC_SPINLOCK_H
> >
> > #include <asm-generic/ticket_spinlock.h>
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > +#include <linux/jump_label.h>
> > +#include <asm-generic/qspinlock.h>
> > +
> > +DECLARE_STATIC_KEY_TRUE(use_qspinlock_key);
> > +#endif
> > +
> > +#undef arch_spin_is_locked
> > +#undef arch_spin_is_contended
> > +#undef arch_spin_value_unlocked
> > +#undef arch_spin_lock
> > +#undef arch_spin_trylock
> > +#undef arch_spin_unlock
> >
> > static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
> > {
> > - ticket_spin_lock(lock);
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > + if (static_branch_likely(&use_qspinlock_key))
> > + queued_spin_lock(lock);
> > + else
> > +#endif
> > + ticket_spin_lock(lock);
> > }
> >
> > static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
> > {
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > + if (static_branch_likely(&use_qspinlock_key))
> > + return queued_spin_trylock(lock);
> > +#endif
> > return ticket_spin_trylock(lock);
> > }
> >
> > static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
> > {
> > - ticket_spin_unlock(lock);
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > + if (static_branch_likely(&use_qspinlock_key))
> > + queued_spin_unlock(lock);
> > + else
> > +#endif
> > + ticket_spin_unlock(lock);
> > }
> >
> > static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
> > {
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > + if (static_branch_likely(&use_qspinlock_key))
> > + return queued_spin_is_locked(lock);
> > +#endif
> > return ticket_spin_is_locked(lock);
> > }
> >
> > static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
> > {
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > + if (static_branch_likely(&use_qspinlock_key))
> > + return queued_spin_is_contended(lock);
> > +#endif
> > return ticket_spin_is_contended(lock);
> > }
> >
> > static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
> > {
> > +#ifdef CONFIG_ARCH_USE_QUEUED_SPINLOCKS
> > + if (static_branch_likely(&use_qspinlock_key))
> > + return queued_spin_value_unlocked(lock);
> > +#endif
> > return ticket_spin_value_unlocked(lock);
> > }
>
> Urggghhhh....
>
> I really don't think you want this in generic code. Also, I'm thinking
> any arch that does this wants to make sure it doesn't inline any of this
Your advice is the same with Arnd, I would move static_branch out of generic.
> stuff. That is, said arch must not have ARCH_INLINE_SPIN_*
What do you mean? I've tested with ARCH_INLINE_SPIN_* and it's okay
with EXPORT_SYMBOL(use_qspinlock_key).
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 47e12ab9c822..4587fb544326 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -32,6 +32,32 @@ config RISCV
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_INLINE_READ_LOCK if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
+ select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
Shall I add the above diff in the next version of the patch series?
>
> And if you're going to force things out of line, then I think you can
> get better code using static_call().
Good point, thx.
>
> *shudder*...
--
Best Regards
Guo Ren
ML: https://lore.kernel.org/linux-csky/
Powered by blists - more mailing lists