lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20160422164157.GB3369@insomnia>
Date:	Sat, 23 Apr 2016 00:41:57 +0800
From:	Boqun Feng <boqun.feng@...il.com>
To:	Peter Zijlstra <peterz@...radead.org>
Cc:	torvalds@...ux-foundation.org, mingo@...nel.org,
	tglx@...utronix.de, will.deacon@....com,
	paulmck@...ux.vnet.ibm.com, waiman.long@....com,
	fweisbec@...il.com, linux-kernel@...r.kernel.org,
	linux-arch@...r.kernel.org, rth@...ddle.net, vgupta@...opsys.com,
	linux@....linux.org.uk, egtvedt@...fundet.no, realmz6@...il.com,
	ysato@...rs.sourceforge.jp, rkuo@...eaurora.org,
	tony.luck@...el.com, geert@...ux-m68k.org, james.hogan@...tec.com,
	ralf@...ux-mips.org, dhowells@...hat.com, jejb@...isc-linux.org,
	mpe@...erman.id.au, schwidefsky@...ibm.com, dalias@...c.org,
	davem@...emloft.net, cmetcalf@...lanox.com, jcmvbkbc@...il.com,
	arnd@...db.de, dbueso@...e.de, fengguang.wu@...el.com
Subject: Re: [RFC][PATCH 18/31] locking,powerpc: Implement
 atomic{,64}_fetch_{add,sub,and,or,xor}{,_relaxed,_acquire,_release}()

On Fri, Apr 22, 2016 at 11:04:31AM +0200, Peter Zijlstra wrote:
> Implement FETCH-OP atomic primitives, these are very similar to the
> existing OP-RETURN primitives we already have, except they return the
> value of the atomic variable _before_ modification.
> 
> This is especially useful for irreversible operations -- such as
> bitops (because it becomes impossible to reconstruct the state prior
> to modification).
> 
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
>  arch/powerpc/include/asm/atomic.h |   83 +++++++++++++++++++++++++++++++++-----
>  1 file changed, 74 insertions(+), 9 deletions(-)
> 
> --- a/arch/powerpc/include/asm/atomic.h
> +++ b/arch/powerpc/include/asm/atomic.h
> @@ -78,21 +78,53 @@ static inline int atomic_##op##_return_r
>  	return t;							\
>  }
>  
> +#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
> +static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
> +{									\
> +	int res, t;							\
> +									\
> +	__asm__ __volatile__(						\
> +"1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
> +	#asm_op " %1,%2,%0\n"						\

Should be

	#asm_op " %1,%3,%0\n"

right? Because %2 is v->counter and %3 is @a.

Regards,
Boqun

> +	PPC405_ERR77(0, %4)						\
> +"	stwcx.	%1,0,%4\n"						\
> +"	bne-	1b\n"							\
> +	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
> +	: "r" (a), "r" (&v->counter)					\
> +	: "cc");							\
> +									\
> +	return res;							\
> +}
> +
>  #define ATOMIC_OPS(op, asm_op)						\
>  	ATOMIC_OP(op, asm_op)						\
> -	ATOMIC_OP_RETURN_RELAXED(op, asm_op)
> +	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
> +	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
>  
>  ATOMIC_OPS(add, add)
>  ATOMIC_OPS(sub, subf)
>  
> -ATOMIC_OP(and, and)
> -ATOMIC_OP(or, or)
> -ATOMIC_OP(xor, xor)
> -
>  #define atomic_add_return_relaxed atomic_add_return_relaxed
>  #define atomic_sub_return_relaxed atomic_sub_return_relaxed
>  
> +#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
> +#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
> +
> +#undef ATOMIC_OPS
> +#define ATOMIC_OPS(op, asm_op)						\
> +	ATOMIC_OP(op, asm_op)						\
> +	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
> +
> +ATOMIC_OPS(and, and)
> +ATOMIC_OPS(or, or)
> +ATOMIC_OPS(xor, xor)
> +
> +#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
> +#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
> +#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
> +
>  #undef ATOMIC_OPS
> +#undef ATOMIC_FETCH_OP_RELAXED
>  #undef ATOMIC_OP_RETURN_RELAXED
>  #undef ATOMIC_OP
>  
> @@ -329,20 +361,53 @@ atomic64_##op##_return_relaxed(long a, a
>  	return t;							\
>  }
>  
> +#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
> +static inline long							\
> +atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
> +{									\
> +	long res, t;							\
> +									\
> +	__asm__ __volatile__(						\
> +"1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
> +	#asm_op " %1,%3,%0\n"						\
> +"	stdcx.	%1,0,%4\n"						\
> +"	bne-	1b\n"							\
> +	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
> +	: "r" (a), "r" (&v->counter)					\
> +	: "cc");							\
> +									\
> +	return t;							\
> +}
> +
>  #define ATOMIC64_OPS(op, asm_op)					\
>  	ATOMIC64_OP(op, asm_op)						\
> -	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
> +	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
> +	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
>  
>  ATOMIC64_OPS(add, add)
>  ATOMIC64_OPS(sub, subf)
> -ATOMIC64_OP(and, and)
> -ATOMIC64_OP(or, or)
> -ATOMIC64_OP(xor, xor)
>  
>  #define atomic64_add_return_relaxed atomic64_add_return_relaxed
>  #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
>  
> +#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
> +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
> +
> +#undef ATOMIC64_OPS
> +#define ATOMIC64_OPS(op, asm_op)					\
> +	ATOMIC64_OP(op, asm_op)						\
> +	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
> +
> +ATOMIC64_OPS(and, and)
> +ATOMIC64_OPS(or, or)
> +ATOMIC64_OPS(xor, xor)
> +
> +#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
> +#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
> +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
> +
>  #undef ATOPIC64_OPS
> +#undef ATOMIC64_FETCH_OP_RELAXED
>  #undef ATOMIC64_OP_RETURN_RELAXED
>  #undef ATOMIC64_OP
>  
> 
> 

Download attachment "signature.asc" of type "application/pgp-signature" (474 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ