[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250820-riscv-altn-helper-wip-v1-6-c3c626c1f7e6@iscas.ac.cn>
Date: Wed, 20 Aug 2025 21:44:50 +0800
From: Vivian Wang <wangruikang@...as.ac.cn>
To: Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Alexandre Ghiti <alex@...ti.fr>, Yury Norov <yury.norov@...il.com>,
Rasmus Villemoes <linux@...musvillemoes.dk>
Cc: Vivian Wang <wangruikang@...as.ac.cn>, Vivian Wang <uwu@...m.page>,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [PATCH 6/6] riscv: cmpxchg: Convert to use_alternative_likely
Use use_alternative_likely() to check for RISCV_ISA_EXT_ZAWRS, replacing
the use of asm goto with ALTERNATIVE.
The "likely" variant is used to match the behavior of the original
implementation using ALTERNATIVE("j %l[no_zawrs]", "nop", ...).
Signed-off-by: Vivian Wang <wangruikang@...as.ac.cn>
---
arch/riscv/include/asm/cmpxchg.h | 125 +++++++++++++++++++--------------------
1 file changed, 61 insertions(+), 64 deletions(-)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 0b749e7102162477432f7cf9a34768fbdf2e8cc7..1ef6e9de5f6d2721d325fa07f2e636ebc951dc7e 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -370,74 +370,71 @@ static __always_inline void __cmpwait(volatile void *ptr,
u32 *__ptr32b;
ulong __s, __val, __mask;
- asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop",
- 0, RISCV_ISA_EXT_ZAWRS, 1)
- : : : : no_zawrs);
-
- switch (size) {
- case 1:
- __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
- __s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE;
- __val = val << __s;
- __mask = 0xff << __s;
-
- asm volatile(
- " lr.w %0, %1\n"
- " and %0, %0, %3\n"
- " xor %0, %0, %2\n"
- " bnez %0, 1f\n"
- ZAWRS_WRS_NTO "\n"
- "1:"
- : "=&r" (tmp), "+A" (*(__ptr32b))
- : "r" (__val), "r" (__mask)
- : "memory");
- break;
- case 2:
- __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
- __s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE;
- __val = val << __s;
- __mask = 0xffff << __s;
-
- asm volatile(
- " lr.w %0, %1\n"
- " and %0, %0, %3\n"
- " xor %0, %0, %2\n"
- " bnez %0, 1f\n"
- ZAWRS_WRS_NTO "\n"
- "1:"
- : "=&r" (tmp), "+A" (*(__ptr32b))
- : "r" (__val), "r" (__mask)
- : "memory");
- break;
- case 4:
- asm volatile(
- " lr.w %0, %1\n"
- " xor %0, %0, %2\n"
- " bnez %0, 1f\n"
- ZAWRS_WRS_NTO "\n"
- "1:"
- : "=&r" (tmp), "+A" (*(u32 *)ptr)
- : "r" (val));
- break;
+ if (use_alternative_likely(0, RISCV_ISA_EXT_ZAWRS)) {
+ switch (size) {
+ case 1:
+ __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+ __s = ((ulong)(ptr) & 0x3) * BITS_PER_BYTE;
+ __val = val << __s;
+ __mask = 0xff << __s;
+
+ asm volatile(
+ " lr.w %0, %1\n"
+ " and %0, %0, %3\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(__ptr32b))
+ : "r" (__val), "r" (__mask)
+ : "memory");
+ break;
+ case 2:
+ __ptr32b = (u32 *)((ulong)(ptr) & ~0x3);
+ __s = ((ulong)(ptr) & 0x2) * BITS_PER_BYTE;
+ __val = val << __s;
+ __mask = 0xffff << __s;
+
+ asm volatile(
+ " lr.w %0, %1\n"
+ " and %0, %0, %3\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(__ptr32b))
+ : "r" (__val), "r" (__mask)
+ : "memory");
+ break;
+ case 4:
+ asm volatile(
+ " lr.w %0, %1\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(u32 *)ptr)
+ : "r" (val));
+ break;
#if __riscv_xlen == 64
- case 8:
- asm volatile(
- " lr.d %0, %1\n"
- " xor %0, %0, %2\n"
- " bnez %0, 1f\n"
- ZAWRS_WRS_NTO "\n"
- "1:"
- : "=&r" (tmp), "+A" (*(u64 *)ptr)
- : "r" (val));
- break;
+ case 8:
+ asm volatile(
+ " lr.d %0, %1\n"
+ " xor %0, %0, %2\n"
+ " bnez %0, 1f\n"
+ ZAWRS_WRS_NTO "\n"
+ "1:"
+ : "=&r" (tmp), "+A" (*(u64 *)ptr)
+ : "r" (val));
+ break;
#endif
- default:
- BUILD_BUG();
- }
+ default:
+ BUILD_BUG();
+ }
- return;
+ return;
+ }
-no_zawrs:
asm volatile(RISCV_PAUSE : : : "memory");
}
--
2.50.1
Powered by blists - more mailing lists