lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <tip-b93c7b8c5b281bf3646d6c5b6e05249b98cc5ab7@git.kernel.org> Date: Thu, 14 Aug 2014 10:18:48 -0700 From: tip-bot for Peter Zijlstra <tipbot@...or.com> To: linux-tip-commits@...r.kernel.org Cc: linux-kernel@...r.kernel.org, hpa@...or.com, mingo@...nel.org, torvalds@...ux-foundation.org, peterz@...radead.org, paulmck@...ux.vnet.ibm.com, mattst88@...il.com, ink@...assic.park.msu.ru, tglx@...utronix.de, rth@...ddle.net Subject: [tip:locking/arch] locking,arch,alpha: Fold atomic_ops Commit-ID: b93c7b8c5b281bf3646d6c5b6e05249b98cc5ab7 Gitweb: http://git.kernel.org/tip/b93c7b8c5b281bf3646d6c5b6e05249b98cc5ab7 Author: Peter Zijlstra <peterz@...radead.org> AuthorDate: Sun, 23 Mar 2014 16:25:53 +0100 Committer: Ingo Molnar <mingo@...nel.org> CommitDate: Thu, 14 Aug 2014 12:48:03 +0200 locking,arch,alpha: Fold atomic_ops Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Cc: Matt Turner <mattst88@...il.com> Cc: Richard Henderson <rth@...ddle.net> Cc: Ivan Kokshaysky <ink@...assic.park.msu.ru> Cc: Linus Torvalds <torvalds@...ux-foundation.org> Signed-off-by: Peter Zijlstra <peterz@...radead.org> Cc: Ivan Kokshaysky <ink@...assic.park.msu.ru> Cc: Linus Torvalds <torvalds@...ux-foundation.org> Cc: Matt Turner <mattst88@...il.com> Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com> Cc: Richard Henderson <rth@...ddle.net> Cc: linux-alpha@...r.kernel.org Link: http://lkml.kernel.org/r/20140508135851.832107183@infradead.org Signed-off-by: Ingo Molnar <mingo@...nel.org> --- arch/alpha/include/asm/atomic.h | 213 +++++++++++++++------------------------- 1 file changed, 80 insertions(+), 133 deletions(-) diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index ed60a1e..6fbb53a 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -29,145 +29,92 @@ * branch back to restart the operation. */ -static __inline__ void atomic_add(int i, atomic_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " addl %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subl %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); +#define ATOMIC_OP(op) \ +static __inline__ void atomic_##op(int i, atomic_t * v) \ +{ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "1: ldl_l %0,%1\n" \ + " " #op "l %0,%2,%0\n" \ + " stl_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter) \ + :"Ir" (i), "m" (v->counter)); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + long temp, result; \ + smp_mb(); \ + __asm__ __volatile__( \ + "1: ldl_l %0,%1\n" \ + " " #op "l %0,%3,%2\n" \ + " " #op "l %0,%3,%0\n" \ + " stl_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ + :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ + return result; \ } -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - - -/* - * Same as above, but return the result value - */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " addl %0,%3,%2\n" - " addl %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; +#define ATOMIC64_OP(op) \ +static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +{ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "1: ldq_l %0,%1\n" \ + " " #op "q %0,%2,%0\n" \ + " stq_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter) \ + :"Ir" (i), "m" (v->counter)); \ +} \ + +#define ATOMIC64_OP_RETURN(op) \ +static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +{ \ + long temp, result; \ + smp_mb(); \ + __asm__ __volatile__( \ + "1: ldq_l %0,%1\n" \ + " " #op "q %0,%3,%2\n" \ + " " #op "q %0,%3,%0\n" \ + " stq_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ + :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ + return result; \ } -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " addq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} +#define ATOMIC_OPS(opg) \ + ATOMIC_OP(opg) \ + ATOMIC_OP_RETURN(opg) \ + ATOMIC64_OP(opg) \ + ATOMIC64_OP_RETURN(opg) -static __inline__ long atomic_sub_return(int i, atomic_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subl %0,%3,%2\n" - " subl %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} +ATOMIC_OPS(add) +ATOMIC_OPS(sub) -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " subq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} +#undef ATOMIC_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@...r.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists