[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <162202827822.29796.2868576462985035147.tip-bot2@tip-bot2>
Date: Wed, 26 May 2021 11:24:38 -0000
From: "tip-bot2 for Mark Rutland" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Mark Rutland <mark.rutland@....com>, Arnd Bergmann <arnd@...db.de>,
Boqun Feng <boqun.feng@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: locking/core] locking/atomic: cmpxchg: support ARCH_ATOMIC
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 82b993e8249ae3cb29c1b6eb8f6548f5748508b7
Gitweb: https://git.kernel.org/tip/82b993e8249ae3cb29c1b6eb8f6548f5748508b7
Author: Mark Rutland <mark.rutland@....com>
AuthorDate: Tue, 25 May 2021 15:02:11 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 26 May 2021 13:20:50 +02:00
locking/atomic: cmpxchg: support ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as this will
enable functionality, and once all architectures are converted it will
be possible to make significant cleanups to the atomic headers.
A number of architectures use asm-generic/cmpxchg.h or
asm-generic/cmpxhg-local.h, and it's impractical to convert the headers
and all these architectures in one go. To make it possible to convert
them one-by-one, let's make the asm-generic implementation function as
either cmpxchg*() or arch_cmpxchg*() depending on whether ARCH_ATOMIC is
selected. To do this, the generic implementations are prefixed as
generic_cmpxchg_*(), and preprocessor definitions map
cmpxchg_*()/arch_cmpxchg_*() onto these as appropriate.
Once all users are moved over to ARCH_ATOMIC the ifdeffery in the header
can be simplified and/or removed entirely.
For existing users (none of which select ARCH_ATOMIC), there should be
no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@....com>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: Boqun Feng <boqun.feng@...il.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Will Deacon <will@...nel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-13-mark.rutland@arm.com
---
include/asm-generic/cmpxchg.h | 61 ++++++++++++++++++++++++----------
1 file changed, 44 insertions(+), 17 deletions(-)
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index b9d54c7..98c9311 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -75,35 +73,64 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifdef CONFIG_ARCH_ATOMIC
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
+#endif
+
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
+#endif
+
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
+
+#else /* CONFIG_ARCH_ATOMIC */
+
+#ifndef xchg
+#define xchg generic_xchg
+#endif
+
+#ifndef cmpxchg_local
+#define cmpxchg_local generic_cmpxchg_local
#endif
#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define cmpxchg cmpxchg_local
+#define cmpxchg64 cmpxchg64_local
+
+#endif /* CONFIG_ARCH_ATOMIC */
#endif /* __ASM_GENERIC_CMPXCHG_H */
Powered by blists - more mailing lists