[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231104091615.4884-5-wuqiang.matt@bytedance.com>
Date: Sat, 4 Nov 2023 17:16:15 +0800
From: "wuqiang.matt" <wuqiang.matt@...edance.com>
To: vgupta@...nel.org, bcain@...cinc.com, jonas@...thpole.se,
stefan.kristiansson@...nalahti.fi, shorne@...il.com,
geert@...ux-m68k.org, andi.shyti@...ux.intel.com, mingo@...nel.org,
palmer@...osinc.com, andrzej.hajda@...el.com, arnd@...db.de,
peterz@...radead.orgm, mhiramat@...nel.org
Cc: linux-snps-arc@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-hexagon@...r.kernel.org, linux-openrisc@...r.kernel.org,
linux-trace-kernel@...r.kernel.org, mattwu@....com,
linux@...ck-us.ne, "wuqiang.matt" <wuqiang.matt@...edance.com>,
kernel test robot <lkp@...el.com>
Subject: [PATCH v2 4/4] locking/atomic: hexagon: arch_cmpxchg[64]_local undefined
For architectures that support native cmpxchg, we'd like to
implement arch_cmpxchg[64]_local with the native variants of
supported data size. If not, the generci_cmpxchg[64]_local
will be used.
Reported-by: kernel test robot <lkp@...el.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202310272207.tLPflya4-lkp@intel.com/
Signed-off-by: wuqiang.matt <wuqiang.matt@...edance.com>
---
arch/hexagon/include/asm/cmpxchg.h | 51 +++++++++++++++++++++++++++++-
1 file changed, 50 insertions(+), 1 deletion(-)
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index bf6cf5579cf4..2b5e5bbaf807 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -8,6 +8,8 @@
#ifndef _ASM_CMPXCHG_H
#define _ASM_CMPXCHG_H
+#include <linux/build_bug.h>
+
/*
* __arch_xchg - atomically exchange a register and a memory location
* @x: value to swap
@@ -51,13 +53,15 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
* variable casting.
*/
-#define arch_cmpxchg(ptr, old, new) \
+#define __cmpxchg_32(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __oldval = 0; \
\
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ \
asm volatile( \
"1: %0 = memw_locked(%1);\n" \
" { P0 = cmp.eq(%0,%2);\n" \
@@ -72,4 +76,49 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
__oldval; \
})
+#define __cmpxchg(ptr, old, val, size) \
+({ \
+ __typeof__(*(ptr)) oldval; \
+ \
+ switch (size) { \
+ case 4: \
+ oldval = __cmpxchg_32(ptr, old, val); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ oldval = val; \
+ break; \
+ } \
+ \
+ oldval; \
+})
+
+#define arch_cmpxchg(ptr, o, n) __cmpxchg((ptr), (o), (n), sizeof(*(ptr)))
+
+/*
+ * always make arch_cmpxchg[64]_local available, native cmpxchg
+ * will be used if available, then generic_cmpxchg[64]_local
+ */
+#include <asm-generic/cmpxchg-local.h>
+
+#define arch_cmpxchg_local(ptr, old, val) \
+({ \
+ __typeof__(*(ptr)) retval; \
+ int size = sizeof(*(ptr)); \
+ \
+ switch (size) { \
+ case 4: \
+ retval = __cmpxchg_32(ptr, old, val); \
+ break; \
+ default: \
+ retval = __generic_cmpxchg_local(ptr, old, \
+ val, size); \
+ break; \
+ } \
+ \
+ retval; \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+
#endif /* _ASM_CMPXCHG_H */
--
2.40.1
Powered by blists - more mailing lists