lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 22 May 2019 12:06:31 -0700 (PDT)
From:   Palmer Dabbelt <palmer@...ive.com>
To:     mark.rutland@....com
CC:     linux-kernel@...r.kernel.org, peterz@...radead.org,
        Will Deacon <will.deacon@....com>, aou@...s.berkeley.edu,
        Arnd Bergmann <arnd@...db.de>, bp@...en8.de,
        catalin.marinas@....com, davem@...emloft.net, fenghua.yu@...el.com,
        heiko.carstens@...ibm.com, herbert@...dor.apana.org.au,
        ink@...assic.park.msu.ru, jhogan@...nel.org, linux@...linux.org.uk,
        mark.rutland@....com, mattst88@...il.com, mingo@...nel.org,
        mpe@...erman.id.au, paul.burton@...s.com, paulus@...ba.org,
        ralf@...ux-mips.org, rth@...ddle.net, stable@...r.kernel.org,
        tglx@...utronix.de, tony.luck@...el.com, vgupta@...opsys.com
Subject:     Re: [PATCH 12/18] locking/atomic: riscv: use s64 for atomic64

On Wed, 22 May 2019 06:22:44 PDT (-0700), mark.rutland@....com wrote:
> As a step towards making the atomic64 API use consistent types treewide,
> let's have the s390 atomic64 implementation use s64 as the underlying

and apparently the RISC-V one as well? :)

> type for atomic64_t, rather than long, matching the generated headers.
>
> As atomic64_read() depends on the generic defintion of atomic64_t, this
> still returns long on 64-bit. This will be converted in a subsequent
> patch.
>
> Otherwise, there should be no functional change as a result of this patch.
>
> Signed-off-by: Mark Rutland <mark.rutland@....com>
> Cc: Albert Ou <aou@...s.berkeley.edu>
> Cc: Palmer Dabbelt <palmer@...ive.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Will Deacon <will.deacon@....com>
> ---
>  arch/riscv/include/asm/atomic.h | 44 +++++++++++++++++++++--------------------
>  1 file changed, 23 insertions(+), 21 deletions(-)
>
> diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> index c9e18289d65c..bffebc57357d 100644
> --- a/arch/riscv/include/asm/atomic.h
> +++ b/arch/riscv/include/asm/atomic.h
> @@ -42,11 +42,11 @@ static __always_inline void atomic_set(atomic_t *v, int i)
>
>  #ifndef CONFIG_GENERIC_ATOMIC64
>  #define ATOMIC64_INIT(i) { (i) }
> -static __always_inline long atomic64_read(const atomic64_t *v)
> +static __always_inline s64 atomic64_read(const atomic64_t *v)
>  {
>  	return READ_ONCE(v->counter);
>  }
> -static __always_inline void atomic64_set(atomic64_t *v, long i)
> +static __always_inline void atomic64_set(atomic64_t *v, s64 i)
>  {
>  	WRITE_ONCE(v->counter, i);
>  }
> @@ -70,11 +70,11 @@ void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
>
>  #ifdef CONFIG_GENERIC_ATOMIC64
>  #define ATOMIC_OPS(op, asm_op, I)					\
> -        ATOMIC_OP (op, asm_op, I, w,  int,   )
> +        ATOMIC_OP (op, asm_op, I, w, int,   )
>  #else
>  #define ATOMIC_OPS(op, asm_op, I)					\
> -        ATOMIC_OP (op, asm_op, I, w,  int,   )				\
> -        ATOMIC_OP (op, asm_op, I, d, long, 64)
> +        ATOMIC_OP (op, asm_op, I, w, int,   )				\
> +        ATOMIC_OP (op, asm_op, I, d, s64, 64)
>  #endif
>
>  ATOMIC_OPS(add, add,  i)
> @@ -131,14 +131,14 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)	\
>
>  #ifdef CONFIG_GENERIC_ATOMIC64
>  #define ATOMIC_OPS(op, asm_op, c_op, I)					\
> -        ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )		\
> -        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )
> +        ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )		\
> +        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )
>  #else
>  #define ATOMIC_OPS(op, asm_op, c_op, I)					\
> -        ATOMIC_FETCH_OP( op, asm_op,       I, w,  int,   )		\
> -        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w,  int,   )		\
> -        ATOMIC_FETCH_OP( op, asm_op,       I, d, long, 64)		\
> -        ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
> +        ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )		\
> +        ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )		\
> +        ATOMIC_FETCH_OP( op, asm_op,       I, d, s64, 64)		\
> +        ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
>  #endif
>
>  ATOMIC_OPS(add, add, +,  i)
> @@ -170,11 +170,11 @@ ATOMIC_OPS(sub, add, +, -i)
>
>  #ifdef CONFIG_GENERIC_ATOMIC64
>  #define ATOMIC_OPS(op, asm_op, I)					\
> -        ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )
> +        ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )
>  #else
>  #define ATOMIC_OPS(op, asm_op, I)					\
> -        ATOMIC_FETCH_OP(op, asm_op, I, w,  int,   )			\
> -        ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
> +        ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )			\
> +        ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
>  #endif
>
>  ATOMIC_OPS(and, and, i)
> @@ -223,9 +223,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
>  #define atomic_fetch_add_unless atomic_fetch_add_unless
>
>  #ifndef CONFIG_GENERIC_ATOMIC64
> -static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
> +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
>  {
> -       long prev, rc;
> +       s64 prev;
> +       long rc;
>
>  	__asm__ __volatile__ (
>  		"0:	lr.d     %[p],  %[c]\n"
> @@ -294,11 +295,11 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)	\
>
>  #ifdef CONFIG_GENERIC_ATOMIC64
>  #define ATOMIC_OPS()							\
> -	ATOMIC_OP( int,   , 4)
> +	ATOMIC_OP(int,   , 4)
>  #else
>  #define ATOMIC_OPS()							\
> -	ATOMIC_OP( int,   , 4)						\
> -	ATOMIC_OP(long, 64, 8)
> +	ATOMIC_OP(int,   , 4)						\
> +	ATOMIC_OP(s64, 64, 8)
>  #endif
>
>  ATOMIC_OPS()
> @@ -336,9 +337,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
>  #define atomic_dec_if_positive(v)	atomic_sub_if_positive(v, 1)
>
>  #ifndef CONFIG_GENERIC_ATOMIC64
> -static __always_inline long atomic64_sub_if_positive(atomic64_t *v, long offset)
> +static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
>  {
> -       long prev, rc;
> +       s64 prev;
> +       long rc;
>
>  	__asm__ __volatile__ (
>  		"0:	lr.d     %[p],  %[c]\n"

Reviwed-by: Palmer Dabbelt <palmer@...ive.com>

Thanks!

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ