lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Fri, 27 Mar 2020 17:52:41 +0000 (UTC)
From:   Christophe Leroy <christophe.leroy@....fr>
To:     Benjamin Herrenschmidt <benh@...nel.crashing.org>,
        Paul Mackerras <paulus@...ba.org>,
        Michael Ellerman <mpe@...erman.id.au>,
        segher@...nel.crashing.org
Cc:     linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Subject: [RFC PATCH] powerpc: Use ppu_intrinsics.h instead of opencoding

ppu_intrinsics.h already includes helpers for things
like sync(), isync(), dcbX(), etc ...

Use it instead of opencoding.

Signed-off-by: Christophe Leroy <christophe.leroy@....fr>
---
 arch/powerpc/include/asm/barrier.h  | 11 +++++++----
 arch/powerpc/include/asm/cache.h    | 25 ++++++-------------------
 arch/powerpc/include/asm/checksum.h |  4 +++-
 arch/powerpc/include/asm/synch.h    | 12 ++++--------
 4 files changed, 20 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 123adcefd40f..392c91519220 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -7,6 +7,10 @@
 
 #include <asm/asm-const.h>
 
+#ifndef __ASSEMBLY__
+#include <ppu_intrinsics.h>
+#endif
+
 /*
  * Memory barrier.
  * The sync instruction guarantees that all memory accesses initiated
@@ -31,9 +35,9 @@
  * However, on CPUs that don't support lwsync, lwsync actually maps to a
  * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
  */
-#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
-#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define mb()   __sync()
+#define rmb()  __sync()
+#define wmb()  __sync()
 
 /* The sub-arch has lwsync */
 #if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
@@ -42,7 +46,6 @@
 #    define SMPWMB      eieio
 #endif
 
-#define __lwsync()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
 #define dma_rmb()	__lwsync()
 #define dma_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
 
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 72b81015cebe..5b5e9a63060a 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -34,6 +34,8 @@
 #define IFETCH_ALIGN_BYTES	(1 << IFETCH_ALIGN_SHIFT)
 
 #if !defined(__ASSEMBLY__)
+#include <ppu_intrinsics.h>
+
 #ifdef CONFIG_PPC64
 
 struct ppc_cache_info {
@@ -111,31 +113,16 @@ extern void _set_L3CR(unsigned long);
 #define _set_L3CR(val)	do { } while(0)
 #endif
 
-static inline void dcbz(void *addr)
-{
-	__asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
-}
+#define dcbz	__dcbz
+#define dcbf	__dcbf
+#define dcbst	__dcbst
+#define icbi	__icbi
 
 static inline void dcbi(void *addr)
 {
 	__asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
 }
 
-static inline void dcbf(void *addr)
-{
-	__asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
-}
-
-static inline void dcbst(void *addr)
-{
-	__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
-}
-
-static inline void icbi(void *addr)
-{
-	asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
-}
-
 static inline void iccci(void *addr)
 {
 	asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 9cce06194dcc..16abea7c3c64 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -8,6 +8,8 @@
 
 #include <linux/bitops.h>
 #include <linux/in6.h>
+
+#include <ppu_intrinsics.h>
 /*
  * Computes the checksum of a memory block at src, length len,
  * and adds in "sum" (32-bit), while copying the block to dst.
@@ -42,7 +44,7 @@ static inline __sum16 csum_fold(__wsum sum)
 	unsigned int tmp;
 
 	/* swap the two 16-bit halves of sum */
-	__asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
+	tmp = __rlwinm(sum, 16, 0, 31);
 	/* if there is a carry from adding the two 16-bit halves,
 	   it will carry from the lower half into the upper half,
 	   giving us the correct sum in the upper half. */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index aca70fb43147..44020f89854e 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -7,19 +7,15 @@
 #include <asm/asm-const.h>
 
 #ifndef __ASSEMBLY__
+#include <ppu_intrinsics.h>
+
 extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
 extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
 			     void *fixup_end);
 
-static inline void eieio(void)
-{
-	__asm__ __volatile__ ("eieio" : : : "memory");
-}
+#define eieio __eieio
+#define isync __isync
 
-static inline void isync(void)
-{
-	__asm__ __volatile__ ("isync" : : : "memory");
-}
 #endif /* __ASSEMBLY__ */
 
 #if defined(__powerpc64__)
-- 
2.25.0

Powered by blists - more mailing lists