lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1206259443-13210-17-git-send-email-joe@perches.com>
Date:	Sun, 23 Mar 2008 01:01:51 -0700
From:	Joe Perches <joe@...ches.com>
To:	Ingo Molnar <mingo@...e.hu>, Thomas Gleixner <tglx@...utronix.de>
Cc:	linux-kernel@...r.kernel.org
Subject: [PATCH 016/148] include/asm-x86/cmpxchg_32.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <joe@...ches.com>
---
 include/asm-x86/cmpxchg_32.h |  253 ++++++++++++++++++++++--------------------
 1 files changed, 132 insertions(+), 121 deletions(-)

diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index 959fad0..bf5a69d 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -8,9 +8,12 @@
  *       you need to test for the feature in boot_cpu_data.
  */
 
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, v)							\
+	((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
 
-struct __xchg_dummy { unsigned long a[100]; };
+struct __xchg_dummy {
+	unsigned long a[100];
+};
 #define __xg(x) ((struct __xchg_dummy *)(x))
 
 /*
@@ -27,72 +30,74 @@ struct __xchg_dummy { unsigned long a[100]; };
  * of the instruction set reference 24319102.pdf. We need
  * the reader side to see the coherent 64bit value.
  */
-static inline void __set_64bit (unsigned long long * ptr,
-		unsigned int low, unsigned int high)
+static inline void __set_64bit(unsigned long long *ptr,
+			       unsigned int low, unsigned int high)
 {
-	__asm__ __volatile__ (
-		"\n1:\t"
-		"movl (%0), %%eax\n\t"
-		"movl 4(%0), %%edx\n\t"
-		LOCK_PREFIX "cmpxchg8b (%0)\n\t"
-		"jnz 1b"
-		: /* no outputs */
-		:	"D"(ptr),
-			"b"(low),
-			"c"(high)
-		:	"ax","dx","memory");
+	asm volatile("\n1:\t"
+		     "movl (%0), %%eax\n\t"
+		     "movl 4(%0), %%edx\n\t"
+		     LOCK_PREFIX "cmpxchg8b (%0)\n\t"
+		     "jnz 1b"
+		     : /* no outputs */
+		     : "D"(ptr),
+		       "b"(low),
+		       "c"(high)
+		     : "ax", "dx", "memory");
 }
 
-static inline void __set_64bit_constant (unsigned long long *ptr,
-						 unsigned long long value)
+static inline void __set_64bit_constant(unsigned long long *ptr,
+					unsigned long long value)
 {
-	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+	__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
 }
-#define ll_low(x)	*(((unsigned int*)&(x))+0)
-#define ll_high(x)	*(((unsigned int*)&(x))+1)
 
-static inline void __set_64bit_var (unsigned long long *ptr,
-			 unsigned long long value)
+#define ll_low(x)	*(((unsigned int *)&(x)) + 0)
+#define ll_high(x)	*(((unsigned int *)&(x)) + 1)
+
+static inline void __set_64bit_var(unsigned long long *ptr,
+				   unsigned long long value)
 {
-	__set_64bit(ptr,ll_low(value), ll_high(value));
+	__set_64bit(ptr, ll_low(value), ll_high(value));
 }
 
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
+#define set_64bit(ptr, value)			\
+	(__builtin_constant_p((value))		\
+	 ? __set_64bit_constant((ptr), (value))	\
+	 : __set_64bit_var((ptr), (value)))
 
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
+#define _set_64bit(ptr, value)						\
+	(__builtin_constant_p(value)					\
+	 ? __set_64bit(ptr, (unsigned int)(value),			\
+		       (unsigned int)((value) >> 32))			\
+	 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
 
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
  *	  but generally the primitive is invalid, *ptr is output argument. --ANK
  */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+				   int size)
 {
 	switch (size) {
-		case 1:
-			__asm__ __volatile__("xchgb %b0,%1"
-				:"=q" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 2:
-			__asm__ __volatile__("xchgw %w0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
-		case 4:
-			__asm__ __volatile__("xchgl %0,%1"
-				:"=r" (x)
-				:"m" (*__xg(ptr)), "0" (x)
-				:"memory");
-			break;
+	case 1:
+		asm volatile("xchgb %b0,%1"
+			     : "=q" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 2:
+		asm volatile("xchgw %w0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
+	case 4:
+		asm volatile("xchgl %0,%1"
+			     : "=r" (x)
+			     : "m" (*__xg(ptr)), "0" (x)
+			     : "memory");
+		break;
 	}
 	return x;
 }
@@ -105,24 +110,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 
 #ifdef CONFIG_X86_CMPXCHG
 #define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr, o, n)						     \
-	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	     \
-					(unsigned long)(n), sizeof(*(ptr))))
-#define sync_cmpxchg(ptr, o, n)						     \
-	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	     \
-					(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n)					     \
-	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	     \
-					(unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n)						\
+	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	\
+				       (unsigned long)(n),		\
+				       sizeof(*(ptr))))
+#define sync_cmpxchg(ptr, o, n)						\
+	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\
+					    (unsigned long)(n),		\
+					    sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
+					     (unsigned long)(n),	\
+					     sizeof(*(ptr))))
 #endif
 
 #ifdef CONFIG_X86_CMPXCHG64
-#define cmpxchg64(ptr, o, n)						      \
-	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o),      \
-					(unsigned long long)(n)))
-#define cmpxchg64_local(ptr, o, n)					      \
-	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
-					(unsigned long long)(n)))
+#define cmpxchg64(ptr, o, n)						\
+	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
+					 (unsigned long long)(n)))
+#define cmpxchg64_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
+					       (unsigned long long)(n)))
 #endif
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -131,22 +139,22 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
@@ -158,85 +166,88 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  * isn't.
  */
 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
-					    unsigned long old,
-					    unsigned long new, int size)
+					   unsigned long old,
+					   unsigned long new, int size)
 {
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("lock; cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("lock; cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__("lock; cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("lock; cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
 }
 
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
-			unsigned long old, unsigned long new, int size)
+					    unsigned long old,
+					    unsigned long new, int size)
 {
 	unsigned long prev;
 	switch (size) {
 	case 1:
-		__asm__ __volatile__("cmpxchgb %b1,%2"
-				     : "=a"(prev)
-				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 2:
-		__asm__ __volatile__("cmpxchgw %w1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	case 4:
-		__asm__ __volatile__("cmpxchgl %1,%2"
-				     : "=a"(prev)
-				     : "r"(new), "m"(*__xg(ptr)), "0"(old)
-				     : "memory");
+		asm volatile("cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
 		return prev;
 	}
 	return old;
 }
 
 static inline unsigned long long __cmpxchg64(volatile void *ptr,
-			unsigned long long old, unsigned long long new)
+					     unsigned long long old,
+					     unsigned long long new)
 {
 	unsigned long long prev;
-	__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
-			     : "=A"(prev)
-			     : "b"((unsigned long)new),
-			       "c"((unsigned long)(new >> 32)),
-			       "m"(*__xg(ptr)),
-			       "0"(old)
-			     : "memory");
+	asm volatile(LOCK_PREFIX "cmpxchg8b %3"
+		     : "=A"(prev)
+		     : "b"((unsigned long)new),
+		       "c"((unsigned long)(new >> 32)),
+		       "m"(*__xg(ptr)),
+		       "0"(old)
+		     : "memory");
 	return prev;
 }
 
 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
-			unsigned long long old, unsigned long long new)
+						   unsigned long long old,
+						   unsigned long long new)
 {
 	unsigned long long prev;
-	__asm__ __volatile__("cmpxchg8b %3"
-			     : "=A"(prev)
-			     : "b"((unsigned long)new),
-			       "c"((unsigned long)(new >> 32)),
-			       "m"(*__xg(ptr)),
-			       "0"(old)
-			     : "memory");
+	asm volatile("cmpxchg8b %3"
+		     : "=A"(prev)
+		     : "b"((unsigned long)new),
+		       "c"((unsigned long)(new >> 32)),
+		       "m"(*__xg(ptr)),
+		       "0"(old)
+		     : "memory");
 	return prev;
 }
 
@@ -252,7 +263,7 @@ extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
 
 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
-				      unsigned long new, int size)
+					unsigned long new, int size)
 {
 	switch (size) {
 	case 1:
-- 
1.5.4.rc2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ