lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 10 May 2021 10:37:29 +0100
From:   Mark Rutland <mark.rutland@....com>
To:     linux-kernel@...r.kernel.org, will@...nel.org,
        boqun.feng@...il.com, peterz@...radead.org
Cc:     aou@...s.berkeley.edu, arnd@...db.de, bcain@...eaurora.org,
        benh@...nel.crashing.org, chris@...kel.net, dalias@...c.org,
        davem@...emloft.net, deanbo422@...il.com, deller@....de,
        geert@...ux-m68k.org, green.hu@...il.com, guoren@...nel.org,
        ink@...assic.park.msu.ru, James.Bottomley@...senPartnership.com,
        jcmvbkbc@...il.com, jonas@...thpole.se, ley.foon.tan@...el.com,
        linux@...linux.org.uk, mark.rutland@....com, mattst88@...il.com,
        monstr@...str.eu, mpe@...erman.id.au, nickhu@...estech.com,
        palmer@...belt.com, paulus@...ba.org, paul.walmsley@...ive.com,
        rth@...ddle.net, shorne@...il.com,
        stefan.kristiansson@...nalahti.fi, tsbogend@...ha.franken.de,
        vgupta@...opsys.com, ysato@...rs.sourceforge.jp
Subject: [PATCH 09/33] locking/atomic: atomic: support ARCH_ATOMIC

We'd like all architectures to convert to ARCH_ATOMIC, as this will
enable functionality, and once all architectures are converted it will
be possible to make significant cleanups to the atomic headers.

A number of architectures use asm-generic/atomic.h, and it's impractical
to convert the header and all these architectures in one go. To make it
possible to convert them one-by-one, let's make the asm-generic
implementation function as either atomic_*() or arch_atomic_*()
depending on whether ARCH_ATOMIC is selected. To do this, the C
implementations are prefixed as generic_atomic_*(), and preprocessor
definitions map atomic_*()/arch_atomic_*() onto these as
appropriate.

Once all users are moved over to ARCH_ATOMIC the ifdeffery in the header
can be simplified and/or removed entirely.

For existing users (none of which select ARCH_ATOMIC), there should be
no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@....com>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: Boqun Feng <boqun.feng@...il.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Will Deacon <will@...nel.org>
---
 include/asm-generic/atomic.h | 71 ++++++++++++++++++++++++++++++++++++++------
 1 file changed, 62 insertions(+), 9 deletions(-)

diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 316c82a27b0a..649060fa0fe8 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -12,39 +12,47 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
+#ifdef CONFIG_ARCH_ATOMIC
+#define __ga_cmpxchg	arch_cmpxchg
+#define __ga_xchg	arch_xchg
+#else
+#define __ga_cmpxchg	cmpxchg
+#define __ga_xchg	xchg
+#endif
+
 #ifdef CONFIG_SMP
 
 /* we can build all atomic primitives from cmpxchg */
 
 #define ATOMIC_OP(op, c_op)						\
-static inline void atomic_##op(int i, atomic_t *v)			\
+static inline void generic_atomic_##op(int i, atomic_t *v)		\
 {									\
 	int c, old;							\
 									\
 	c = v->counter;							\
-	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
+	while ((old = __ga_cmpxchg(&v->counter, c, c c_op i)) != c)	\
 		c = old;						\
 }
 
 #define ATOMIC_OP_RETURN(op, c_op)					\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
+static inline int generic_atomic_##op##_return(int i, atomic_t *v)	\
 {									\
 	int c, old;							\
 									\
 	c = v->counter;							\
-	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
+	while ((old = __ga_cmpxchg(&v->counter, c, c c_op i)) != c)	\
 		c = old;						\
 									\
 	return c c_op i;						\
 }
 
 #define ATOMIC_FETCH_OP(op, c_op)					\
-static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v)		\
 {									\
 	int c, old;							\
 									\
 	c = v->counter;							\
-	while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)		\
+	while ((old = __ga_cmpxchg(&v->counter, c, c c_op i)) != c)	\
 		c = old;						\
 									\
 	return c;							\
@@ -55,7 +63,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 #include <linux/irqflags.h>
 
 #define ATOMIC_OP(op, c_op)						\
-static inline void atomic_##op(int i, atomic_t *v)			\
+static inline void generic_atomic_##op(int i, atomic_t *v)		\
 {									\
 	unsigned long flags;						\
 									\
@@ -65,7 +73,7 @@ static inline void atomic_##op(int i, atomic_t *v)			\
 }
 
 #define ATOMIC_OP_RETURN(op, c_op)					\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
+static inline int generic_atomic_##op##_return(int i, atomic_t *v)	\
 {									\
 	unsigned long flags;						\
 	int ret;							\
@@ -78,7 +86,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
 }
 
 #define ATOMIC_FETCH_OP(op, c_op)					\
-static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v)		\
 {									\
 	unsigned long flags;						\
 	int ret;							\
@@ -112,10 +120,55 @@ ATOMIC_OP(xor, ^)
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
+#undef __ga_cmpxchg
+#undef __ga_xchg
+
+#ifdef CONFIG_ARCH_ATOMIC
+
+#define arch_atomic_add_return			generic_atomic_add_return
+#define arch_atomic_sub_return			generic_atomic_sub_return
+
+#define arch_atomic_fetch_add			generic_atomic_fetch_add
+#define arch_atomic_fetch_sub			generic_atomic_fetch_sub
+#define arch_atomic_fetch_and			generic_atomic_fetch_and
+#define arch_atomic_fetch_or			generic_atomic_fetch_or
+#define arch_atomic_fetch_xor			generic_atomic_fetch_xor
+
+#define arch_atomic_add				generic_atomic_add
+#define arch_atomic_sub				generic_atomic_sub
+#define arch_atomic_and				generic_atomic_and
+#define arch_atomic_or				generic_atomic_or
+#define arch_atomic_xor				generic_atomic_xor
+
+#define arch_atomic_read(v)			READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i)			WRITE_ONCE(((v)->counter), (i))
+
+#define arch_atomic_xchg(ptr, v)		(arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new)	(arch_cmpxchg(&((v)->counter), (old), (new)))
+
+#else /* CONFIG_ARCH_ATOMIC */
+
+#define atomic_add_return		generic_atomic_add_return
+#define atomic_sub_return		generic_atomic_sub_return
+
+#define atomic_fetch_add		generic_atomic_fetch_add
+#define atomic_fetch_sub		generic_atomic_fetch_sub
+#define atomic_fetch_and		generic_atomic_fetch_and
+#define atomic_fetch_or			generic_atomic_fetch_or
+#define atomic_fetch_xor		generic_atomic_fetch_xor
+
+#define atomic_add			generic_atomic_add
+#define atomic_sub			generic_atomic_sub
+#define atomic_and			generic_atomic_and
+#define atomic_or			generic_atomic_or
+#define atomic_xor			generic_atomic_xor
+
 #define atomic_read(v)			READ_ONCE((v)->counter)
 #define atomic_set(v, i)		WRITE_ONCE(((v)->counter), (i))
 
 #define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
 
+#endif /* CONFIG_ARCH_ATOMIC */
+
 #endif /* __ASM_GENERIC_ATOMIC_H */
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ