[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <C2D7FE5348E1B147BCA15975FBA230751A6AB58E@IN01WEMBXA.internal.synopsys.com>
Date: Fri, 9 May 2014 09:34:51 +0000
From: Vineet Gupta <Vineet.Gupta1@...opsys.com>
To: Peter Zijlstra <peterz@...radead.org>,
"linux-arch@...r.kernel.org" <linux-arch@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: "torvalds@...ux-foundation.org" <torvalds@...ux-foundation.org>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"mingo@...nel.org" <mingo@...nel.org>,
"will.deacon@....com" <will.deacon@....com>,
"paulmck@...ux.vnet.ibm.com" <paulmck@...ux.vnet.ibm.com>
Subject: Re: [PATCH 03/20] arch,arc: Fold atomic_ops
Hi Peter,
Minor point below, otherwise looks good.
On Thursday 08 May 2014 07:30 PM, Peter Zijlstra wrote:
> Many of the atomic op implementations are the same except for one
> instruction; fold the lot into a few CPP macros and reduce LoC.
>
> This also prepares for easy addition of new ops.
>
> Cc: Linus Torvalds <torvalds@...ux-foundation.org>
> Cc: Vineet Gupta <vgupta@...opsys.com>
> Signed-off-by: Peter Zijlstra <peterz@...radead.org>
> ---
> arch/arc/include/asm/atomic.h | 194 ++++++++++++++----------------------------
> 1 file changed, 68 insertions(+), 126 deletions(-)
>
> --- a/arch/arc/include/asm/atomic.h
> +++ b/arch/arc/include/asm/atomic.h
> @@ -25,79 +25,36 @@
>
> #define atomic_set(v, i) (((v)->counter) = (i))
>
> -static inline void atomic_add(int i, atomic_t *v)
> -{
> - unsigned int temp;
> -
> - __asm__ __volatile__(
> - "1: llock %0, [%1] \n"
> - " add %0, %0, %2 \n"
> - " scond %0, [%1] \n"
> - " bnz 1b \n"
> - : "=&r"(temp) /* Early clobber, to prevent reg reuse */
> - : "r"(&v->counter), "ir"(i)
> - : "cc");
> -}
> -
> -static inline void atomic_sub(int i, atomic_t *v)
> -{
> - unsigned int temp;
> -
> - __asm__ __volatile__(
> - "1: llock %0, [%1] \n"
> - " sub %0, %0, %2 \n"
> - " scond %0, [%1] \n"
> - " bnz 1b \n"
> - : "=&r"(temp)
> - : "r"(&v->counter), "ir"(i)
> - : "cc");
> -}
> -
> -/* add and also return the new value */
> -static inline int atomic_add_return(int i, atomic_t *v)
> -{
> - unsigned int temp;
> -
> - __asm__ __volatile__(
> - "1: llock %0, [%1] \n"
> - " add %0, %0, %2 \n"
> - " scond %0, [%1] \n"
> - " bnz 1b \n"
> - : "=&r"(temp)
> - : "r"(&v->counter), "ir"(i)
> - : "cc");
> -
> - return temp;
> -}
> -
> -static inline int atomic_sub_return(int i, atomic_t *v)
> -{
> - unsigned int temp;
> -
> - __asm__ __volatile__(
> - "1: llock %0, [%1] \n"
> - " sub %0, %0, %2 \n"
> - " scond %0, [%1] \n"
> - " bnz 1b \n"
> - : "=&r"(temp)
> - : "r"(&v->counter), "ir"(i)
> - : "cc");
> -
> - return temp;
> -}
> -
> -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
> -{
> - unsigned int temp;
> -
> - __asm__ __volatile__(
> - "1: llock %0, [%1] \n"
> - " bic %0, %0, %2 \n"
> - " scond %0, [%1] \n"
> - " bnz 1b \n"
> - : "=&r"(temp)
> - : "r"(addr), "ir"(mask)
> - : "cc");
> +#define ATOMIC_OP(op, c_op, asm_op) \
> +static inline void atomic_##op(int i, atomic_t *v) \
> +{ \
> + unsigned int temp; \
> + \
> + __asm__ __volatile__( \
> + "1: llock %0, [%1] \n" \
> + " " #asm_op " %0, %0, %2 \n" \
> + " scond %0, [%1] \n" \
> + " bnz 1b \n" \
> + : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
> + : "r"(&v->counter), "ir"(i) \
> + : "cc"); \
> +} \
> +
> +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
> +static inline int atomic_##op##_return(int i, atomic_t *v) \
> +{ \
> + unsigned int temp; \
> + \
> + __asm__ __volatile__( \
> + "1: llock %0, [%1] \n" \
> + " " #asm_op " %0, %0, %2 \n" \
> + " scond %0, [%1] \n" \
> + " bnz 1b \n" \
> + : "=&r"(temp) \
> + : "r"(&v->counter), "ir"(i) \
> + : "cc"); \
> + \
> + return temp; \
> }
>
> #else /* !CONFIG_ARC_HAS_LLSC */
> @@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *
> v->counter = i;
> atomic_ops_unlock(flags);
> }
> +
> #endif
>
> /*
> @@ -133,63 +91,47 @@ static inline void atomic_set(atomic_t *
> * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
> */
>
> -static inline void atomic_add(int i, atomic_t *v)
> -{
> - unsigned long flags;
> -
> - atomic_ops_lock(flags);
> - v->counter += i;
> - atomic_ops_unlock(flags);
> -}
> -
> -static inline void atomic_sub(int i, atomic_t *v)
> -{
> - unsigned long flags;
> -
> - atomic_ops_lock(flags);
> - v->counter -= i;
> - atomic_ops_unlock(flags);
> -}
> -
> -static inline int atomic_add_return(int i, atomic_t *v)
> -{
> - unsigned long flags;
> - unsigned long temp;
> -
> - atomic_ops_lock(flags);
> - temp = v->counter;
> - temp += i;
> - v->counter = temp;
> - atomic_ops_unlock(flags);
> -
> - return temp;
> -}
> -
> -static inline int atomic_sub_return(int i, atomic_t *v)
> -{
> - unsigned long flags;
> - unsigned long temp;
> -
> - atomic_ops_lock(flags);
> - temp = v->counter;
> - temp -= i;
> - v->counter = temp;
> - atomic_ops_unlock(flags);
> -
> - return temp;
> -}
> -
> -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
> -{
> - unsigned long flags;
> -
> - atomic_ops_lock(flags);
> - *addr &= ~mask;
> - atomic_ops_unlock(flags);
> +#define ATOMIC_OP(op, c_op, asm_op) \
> +static inline void atomic_##op(int i, atomic_t *v) \
> +{ \
> + unsigned long flags; \
> + \
> + atomic_ops_lock(flags); \
> + v->counter c_op i; \
> + atomic_ops_unlock(flags); \
> +}
> +
> +#define ATOMIC_OP_RETURN(op, c_op) \
> +static inline int atomic_##op##_return(int i, atomic_t *v) \
> +{ \
> + unsigned long flags; \
> + unsigned long temp; \
> + \
> + atomic_ops_lock(flags); \
> + temp = v->counter; \
> + temp c_op i; \
> + v->counter = temp; \
> + atomic_ops_unlock(flags); \
> + \
> + return temp; \
> }
>
> #endif /* !CONFIG_ARC_HAS_LLSC */
>
> +#define ATOMIC_OPS(op, c_op, asm_op) \
> + ATOMIC_OP(op, c_op, asm_op) \
> + ATOMIC_OP_RETURN(op, c_op, asm_op)
> +
> +ATOMIC_OPS(add, +=, add)
> +ATOMIC_OPS(sub, -=, sub)
> +ATOMIC_OP(and, &=, and)
> +
> +#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
Given that ARC has instruction to do just that, can we keep below instead.
ATOMIC_OP(clear_mask, ~=, bic)
(see asm version of atomic_clear_mask)
-Vineet
> +
> +#undef ATOMIC_OPS
> +#undef ATOMIC_OP_RETURN
> +#undef ATOMIC_OP
> +
> /**
> * __atomic_add_unless - add unless the number is a given value
> * @v: pointer of type atomic_t
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists