lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 16 May 2021 06:47:55 +0900
From:   Stafford Horne <shorne@...il.com>
To:     Mark Rutland <mark.rutland@....com>
Cc:     linux-kernel@...r.kernel.org, will@...nel.org,
        boqun.feng@...il.com, peterz@...radead.org, aou@...s.berkeley.edu,
        arnd@...db.de, bcain@...eaurora.org, benh@...nel.crashing.org,
        chris@...kel.net, dalias@...c.org, davem@...emloft.net,
        deanbo422@...il.com, deller@....de, geert@...ux-m68k.org,
        green.hu@...il.com, guoren@...nel.org, ink@...assic.park.msu.ru,
        James.Bottomley@...senpartnership.com, jcmvbkbc@...il.com,
        jonas@...thpole.se, ley.foon.tan@...el.com, linux@...linux.org.uk,
        mattst88@...il.com, monstr@...str.eu, mpe@...erman.id.au,
        nickhu@...estech.com, palmer@...belt.com, paulus@...ba.org,
        paul.walmsley@...ive.com, rth@...ddle.net,
        stefan.kristiansson@...nalahti.fi, tsbogend@...ha.franken.de,
        vgupta@...opsys.com, ysato@...rs.sourceforge.jp
Subject: Re: [PATCH 25/33] locking/atomic: openrisc: move to ARCH_ATOMIC

On Mon, May 10, 2021 at 10:37:45AM +0100, Mark Rutland wrote:
> We'd like all architectures to convert to ARCH_ATOMIC, as once all
> architectures are converted it will be possible to make significant
> cleanups to the atomics headers, and this will make it much easier to
> generically enable atomic functionality (e.g. debug logic in the
> instrumented wrappers).
> 
> As a step towards that, this patch migrates openrisc to ARCH_ATOMIC. The
> arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
> code wraps these with optional instrumentation to provide the regular
> functions.
> 
> Signed-off-by: Mark Rutland <mark.rutland@....com>
> Cc: Boqun Feng <boqun.feng@...il.com>
> Cc: Jonas Bonn <jonas@...thpole.se>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Stafford Horne <shorne@...il.com>
> Cc: Stefan Kristiansson <stefan.kristiansson@...nalahti.fi>
> Cc: Will Deacon <will@...nel.org>
> ---
>  arch/openrisc/Kconfig               |  1 +
>  arch/openrisc/include/asm/atomic.h  | 42 ++++++++++++++++++++-----------------
>  arch/openrisc/include/asm/cmpxchg.h |  4 ++--
>  3 files changed, 26 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
> index 591acc5990dc..8c50bc9674f5 100644
> --- a/arch/openrisc/Kconfig
> +++ b/arch/openrisc/Kconfig
> @@ -7,6 +7,7 @@
>  config OPENRISC
>  	def_bool y
>  	select ARCH_32BIT_OFF_T
> +	select ARCH_ATOMIC
>  	select ARCH_HAS_DMA_SET_UNCACHED
>  	select ARCH_HAS_DMA_CLEAR_UNCACHED
>  	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
> diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
> index cb86970d3859..326167e4783a 100644
> --- a/arch/openrisc/include/asm/atomic.h
> +++ b/arch/openrisc/include/asm/atomic.h
> @@ -13,7 +13,7 @@
>  
>  /* Atomically perform op with v->counter and i */
>  #define ATOMIC_OP(op)							\
> -static inline void atomic_##op(int i, atomic_t *v)			\
> +static inline void arch_atomic_##op(int i, atomic_t *v)			\
>  {									\
>  	int tmp;							\
>  									\
> @@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v)			\
>  
>  /* Atomically perform op with v->counter and i, return the result */
>  #define ATOMIC_OP_RETURN(op)						\
> -static inline int atomic_##op##_return(int i, atomic_t *v)		\
> +static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
>  {									\
>  	int tmp;							\
>  									\
> @@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
>  
>  /* Atomically perform op with v->counter and i, return orig v->counter */
>  #define ATOMIC_FETCH_OP(op)						\
> -static inline int atomic_fetch_##op(int i, atomic_t *v)			\
> +static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
>  {									\
>  	int tmp, old;							\
>  									\
> @@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
>  ATOMIC_FETCH_OP(or)
>  ATOMIC_FETCH_OP(xor)
>  
> +ATOMIC_OP(add)
> +ATOMIC_OP(sub)
>  ATOMIC_OP(and)
>  ATOMIC_OP(or)
>  ATOMIC_OP(xor)
> @@ -83,16 +85,18 @@ ATOMIC_OP(xor)
>  #undef ATOMIC_OP_RETURN
>  #undef ATOMIC_OP
>  
> -#define atomic_add_return	atomic_add_return
> -#define atomic_sub_return	atomic_sub_return
> -#define atomic_fetch_add	atomic_fetch_add
> -#define atomic_fetch_sub	atomic_fetch_sub
> -#define atomic_fetch_and	atomic_fetch_and
> -#define atomic_fetch_or		atomic_fetch_or
> -#define atomic_fetch_xor	atomic_fetch_xor
> -#define atomic_and	atomic_and
> -#define atomic_or	atomic_or
> -#define atomic_xor	atomic_xor
> +#define arch_atomic_add_return	arch_atomic_add_return
> +#define arch_atomic_sub_return	arch_atomic_sub_return
> +#define arch_atomic_fetch_add	arch_atomic_fetch_add
> +#define arch_atomic_fetch_sub	arch_atomic_fetch_sub
> +#define arch_atomic_fetch_and	arch_atomic_fetch_and
> +#define arch_atomic_fetch_or	arch_atomic_fetch_or
> +#define arch_atomic_fetch_xor	arch_atomic_fetch_xor
> +#define arch_atomic_add		arch_atomic_add
> +#define arch_atomic_sub		arch_atomic_sub
> +#define arch_atomic_and		arch_atomic_and
> +#define arch_atomic_or		arch_atomic_or
> +#define arch_atomic_xor		arch_atomic_xor
>  
>  /*
>   * Atomically add a to v->counter as long as v is not already u.
> @@ -100,7 +104,7 @@ ATOMIC_OP(xor)
>   *
>   * This is often used through atomic_inc_not_zero()
>   */
> -static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
> +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
>  {
>  	int old, tmp;
>  
> @@ -119,14 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
>  
>  	return old;
>  }
> -#define atomic_fetch_add_unless	atomic_fetch_add_unless
> +#define arch_atomic_fetch_add_unless	arch_atomic_fetch_add_unless
>  
> -#define atomic_read(v)			READ_ONCE((v)->counter)
> -#define atomic_set(v,i)			WRITE_ONCE((v)->counter, (i))
> +#define arch_atomic_read(v)		READ_ONCE((v)->counter)
> +#define arch_atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
>  
>  #include <asm/cmpxchg.h>
>  
> -#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
> -#define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
> +#define arch_atomic_xchg(ptr, v)		(arch_xchg(&(ptr)->counter, (v)))
> +#define arch_atomic_cmpxchg(v, old, new)	(arch_cmpxchg(&((v)->counter), (old), (new)))
>  
>  #endif /* __ASM_OPENRISC_ATOMIC_H */
> diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
> index f9cd43a39d72..79fd16162ccb 100644
> --- a/arch/openrisc/include/asm/cmpxchg.h
> +++ b/arch/openrisc/include/asm/cmpxchg.h
> @@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
>  	}
>  }
>  
> -#define cmpxchg(ptr, o, n)						\
> +#define arch_cmpxchg(ptr, o, n)						\
>  	({								\
>  		(__typeof__(*(ptr))) __cmpxchg((ptr),			\
>  					       (unsigned long)(o),	\
> @@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
>  	}
>  }
>  
> -#define xchg(ptr, with) 						\
> +#define arch_xchg(ptr, with) 						\
>  	({								\
>  		(__typeof__(*(ptr))) __xchg((ptr),			\
>  					    (unsigned long)(with),	\

I was questioning some of this so I took your branch for a spin.  This builds
and boots and all looks good to me on OpenRISC.

The changes all look good, I was just wondering if all of the refactors were
done correctly for OpenRISC.

Acked-by: Stafford Horne <shorne@...il.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ