[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180222214625.GM2855@linux.vnet.ibm.com>
Date: Thu, 22 Feb 2018 13:46:25 -0800
From: "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To: Andrea Parri <parri.andrea@...il.com>
Cc: Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Alan Stern <stern@...land.harvard.edu>,
Ivan Kokshaysky <ink@...assic.park.msu.ru>,
Matt Turner <mattst88@...il.com>,
Richard Henderson <rth@...ddle.net>,
linux-alpha@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/2] locking/xchg/alpha: Use smp_mb() in place of
__ASM__MB
On Thu, Feb 22, 2018 at 10:24:29AM +0100, Andrea Parri wrote:
> Replace each occurrence of __ASM__MB with a (trailing) smp_mb() in
> xchg(), cmpxchg(), and remove the now unused __ASM__MB definitions;
> this improves readability, with no additional synchronization cost.
>
> Suggested-by: Will Deacon <will.deacon@....com>
> Signed-off-by: Andrea Parri <parri.andrea@...il.com>
I am a bit confused by the use of out-of-line branches to do a backwards
branch, but those were in place to start with. Maybe the point is to
defeat backwards-branch prediction or some such.
Regardless...
Acked-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
> Cc: Alan Stern <stern@...land.harvard.edu>
> Cc: Ivan Kokshaysky <ink@...assic.park.msu.ru>
> Cc: Matt Turner <mattst88@...il.com>
> Cc: Richard Henderson <rth@...ddle.net>
> Cc: linux-alpha@...r.kernel.org
> Cc: linux-kernel@...r.kernel.org
> ---
> arch/alpha/include/asm/cmpxchg.h | 6 ------
> arch/alpha/include/asm/xchg.h | 16 ++++++++--------
> 2 files changed, 8 insertions(+), 14 deletions(-)
>
> diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
> index 46ebf14aed4e5..8a2b331e43feb 100644
> --- a/arch/alpha/include/asm/cmpxchg.h
> +++ b/arch/alpha/include/asm/cmpxchg.h
> @@ -6,7 +6,6 @@
> * Atomic exchange routines.
> */
>
> -#define __ASM__MB
> #define ____xchg(type, args...) __xchg ## type ## _local(args)
> #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
> #include <asm/xchg.h>
> @@ -33,10 +32,6 @@
> cmpxchg_local((ptr), (o), (n)); \
> })
>
> -#ifdef CONFIG_SMP
> -#undef __ASM__MB
> -#define __ASM__MB "\tmb\n"
> -#endif
> #undef ____xchg
> #undef ____cmpxchg
> #define ____xchg(type, args...) __xchg ##type(args)
> @@ -64,7 +59,6 @@
> cmpxchg((ptr), (o), (n)); \
> })
>
> -#undef __ASM__MB
> #undef ____cmpxchg
>
> #endif /* _ALPHA_CMPXCHG_H */
> diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
> index e2660866ce972..e1facf6fc2446 100644
> --- a/arch/alpha/include/asm/xchg.h
> +++ b/arch/alpha/include/asm/xchg.h
> @@ -28,12 +28,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
> " or %1,%2,%2\n"
> " stq_c %2,0(%3)\n"
> " beq %2,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
> : "r" ((long)m), "1" (val) : "memory");
> + smp_mb();
>
> return ret;
> }
> @@ -52,12 +52,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
> " or %1,%2,%2\n"
> " stq_c %2,0(%3)\n"
> " beq %2,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
> : "r" ((long)m), "1" (val) : "memory");
> + smp_mb();
>
> return ret;
> }
> @@ -72,12 +72,12 @@ ____xchg(_u32, volatile int *m, unsigned long val)
> " bis $31,%3,%1\n"
> " stl_c %1,%2\n"
> " beq %1,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (val), "=&r" (dummy), "=m" (*m)
> : "rI" (val), "m" (*m) : "memory");
> + smp_mb();
>
> return val;
> }
> @@ -92,12 +92,12 @@ ____xchg(_u64, volatile long *m, unsigned long val)
> " bis $31,%3,%1\n"
> " stq_c %1,%2\n"
> " beq %1,2f\n"
> - __ASM__MB
> ".subsection 2\n"
> "2: br 1b\n"
> ".previous"
> : "=&r" (val), "=&r" (dummy), "=m" (*m)
> : "rI" (val), "m" (*m) : "memory");
> + smp_mb();
>
> return val;
> }
> @@ -150,12 +150,12 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
> " stq_c %2,0(%4)\n"
> " beq %2,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
> : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
> + smp_mb();
>
> return prev;
> }
> @@ -177,12 +177,12 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
> " stq_c %2,0(%4)\n"
> " beq %2,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
> : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
> + smp_mb();
>
> return prev;
> }
> @@ -200,12 +200,12 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
> " stl_c %1,%2\n"
> " beq %1,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r"(prev), "=&r"(cmp), "=m"(*m)
> : "r"((long) old), "r"(new), "m"(*m) : "memory");
> + smp_mb();
>
> return prev;
> }
> @@ -223,12 +223,12 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
> " stq_c %1,%2\n"
> " beq %1,3f\n"
> "2:\n"
> - __ASM__MB
> ".subsection 2\n"
> "3: br 1b\n"
> ".previous"
> : "=&r"(prev), "=&r"(cmp), "=m"(*m)
> : "r"((long) old), "r"(new), "m"(*m) : "memory");
> + smp_mb();
>
> return prev;
> }
> --
> 2.7.4
>
Powered by blists - more mailing lists