lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJF2gTSWCE-MCpwbiax79njqUUSQgLo_w=7SF7ejWavQQKCKZQ@mail.gmail.com>
Date:   Fri, 7 Aug 2020 08:23:53 +0800
From:   Guo Ren <guoren@...nel.org>
To:     Peter Zijlstra <peterz@...radead.org>
Cc:     Ren Guo <ren_guo@...ky.com>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        linux-csky@...r.kernel.org, mathieu.desnoyers@...icios.com,
        Will Deacon <will@...nel.org>
Subject: Re: csky: smp_mb__after_spinlock

Acked-by: Guo Ren <guoren@...nel.org>

On Thu, Aug 6, 2020 at 3:55 AM <peterz@...radead.org> wrote:
>
> On Wed, Aug 05, 2020 at 12:41:46PM +0200, peterz@...radead.org wrote:
> > Hi,
> >
> > While doing an audit of smp_mb__after_spinlock, I found that csky
> > defines it, why?
> >
> > CSKY only has smp_mb(), it doesn't override __atomic_acquire_fence or
> > otherwise special cases it's atomic*_acquire() primitives. It has an
> > explicit smp_mb() in its arch_spin_lock().
>
> Also, why have two implementations of all the locking ?
>
> ---
> diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
> index bd31ab12f77d..332738e93e57 100644
> --- a/arch/csky/Kconfig
> +++ b/arch/csky/Kconfig
> @@ -7,7 +7,7 @@ config CSKY
>         select ARCH_HAS_SYNC_DMA_FOR_CPU
>         select ARCH_HAS_SYNC_DMA_FOR_DEVICE
>         select ARCH_USE_BUILTIN_BSWAP
> -       select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
> +       select ARCH_USE_QUEUED_RWLOCKS
>         select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
>         select COMMON_CLK
>         select CLKSRC_MMIO
> diff --git a/arch/csky/include/asm/spinlock.h b/arch/csky/include/asm/spinlock.h
> index 7cf3f2b34cea..69f5aa249c5f 100644
> --- a/arch/csky/include/asm/spinlock.h
> +++ b/arch/csky/include/asm/spinlock.h
> @@ -6,8 +6,6 @@
>  #include <linux/spinlock_types.h>
>  #include <asm/barrier.h>
>
> -#ifdef CONFIG_QUEUED_RWLOCKS
> -
>  /*
>   * Ticket-based spin-locking.
>   */
> @@ -88,169 +86,4 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
>
>  #include <asm/qrwlock.h>
>
> -/* See include/linux/spinlock.h */
> -#define smp_mb__after_spinlock()       smp_mb()
> -
> -#else /* CONFIG_QUEUED_RWLOCKS */
> -
> -/*
> - * Test-and-set spin-locking.
> - */
> -static inline void arch_spin_lock(arch_spinlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       bnez            %0, 1b   \n"
> -               "       movi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -       smp_mb();
> -}
> -
> -static inline void arch_spin_unlock(arch_spinlock_t *lock)
> -{
> -       smp_mb();
> -       WRITE_ONCE(lock->lock, 0);
> -}
> -
> -static inline int arch_spin_trylock(arch_spinlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       bnez            %0, 2f   \n"
> -               "       movi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               "       movi            %0, 0    \n"
> -               "2:                              \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -
> -       if (!tmp)
> -               smp_mb();
> -
> -       return !tmp;
> -}
> -
> -#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
> -
> -/*
> - * read lock/unlock/trylock
> - */
> -static inline void arch_read_lock(arch_rwlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       blz             %0, 1b   \n"
> -               "       addi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -       smp_mb();
> -}
> -
> -static inline void arch_read_unlock(arch_rwlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       smp_mb();
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       subi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -}
> -
> -static inline int arch_read_trylock(arch_rwlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       blz             %0, 2f   \n"
> -               "       addi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               "       movi            %0, 0    \n"
> -               "2:                              \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -
> -       if (!tmp)
> -               smp_mb();
> -
> -       return !tmp;
> -}
> -
> -/*
> - * write lock/unlock/trylock
> - */
> -static inline void arch_write_lock(arch_rwlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       bnez            %0, 1b   \n"
> -               "       subi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -       smp_mb();
> -}
> -
> -static inline void arch_write_unlock(arch_rwlock_t *lock)
> -{
> -       smp_mb();
> -       WRITE_ONCE(lock->lock, 0);
> -}
> -
> -static inline int arch_write_trylock(arch_rwlock_t *lock)
> -{
> -       u32 *p = &lock->lock;
> -       u32 tmp;
> -
> -       asm volatile (
> -               "1:     ldex.w          %0, (%1) \n"
> -               "       bnez            %0, 2f   \n"
> -               "       subi            %0, 1    \n"
> -               "       stex.w          %0, (%1) \n"
> -               "       bez             %0, 1b   \n"
> -               "       movi            %0, 0    \n"
> -               "2:                              \n"
> -               : "=&r" (tmp)
> -               : "r"(p)
> -               : "cc");
> -
> -       if (!tmp)
> -               smp_mb();
> -
> -       return !tmp;
> -}
> -
> -#endif /* CONFIG_QUEUED_RWLOCKS */
>  #endif /* __ASM_CSKY_SPINLOCK_H */



-- 
Best Regards
 Guo Ren

ML: https://lore.kernel.org/linux-csky/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ