lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 23 Mar 2008 01:03:38 -0700
From:	Joe Perches <joe@...ches.com>
To:	Ingo Molnar <mingo@...e.hu>, Thomas Gleixner <tglx@...utronix.de>
Cc:	linux-kernel@...r.kernel.org
Subject: [PATCH 123/148] include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only


Signed-off-by: Joe Perches <joe@...ches.com>
---
 include/asm-x86/sync_bitops.h |   56 ++++++++++++++++++++--------------------
 1 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index bc249f4..f1078a5 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -13,7 +13,7 @@
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#define ADDR (*(volatile long *) addr)
+#define ADDR (*(volatile long *)addr)
 
 /**
  * sync_set_bit - Atomically set a bit in memory
@@ -26,12 +26,12 @@
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void sync_set_bit(int nr, volatile unsigned long * addr)
+static inline void sync_set_bit(int nr, volatile unsigned long *addr)
 {
-	__asm__ __volatile__("lock; btsl %1,%0"
-			     :"+m" (ADDR)
-			     :"Ir" (nr)
-			     : "memory");
+	asm volatile("lock; btsl %1,%0"
+		     : "+m" (ADDR)
+		     : "Ir" (nr)
+		     : "memory");
 }
 
 /**
@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  * in order to ensure changes are visible on other processors.
  */
-static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
+static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
 {
-	__asm__ __volatile__("lock; btrl %1,%0"
-			     :"+m" (ADDR)
-			     :"Ir" (nr)
-			     : "memory");
+	asm volatile("lock; btrl %1,%0"
+		     : "+m" (ADDR)
+		     : "Ir" (nr)
+		     : "memory");
 }
 
 /**
@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void sync_change_bit(int nr, volatile unsigned long * addr)
+static inline void sync_change_bit(int nr, volatile unsigned long *addr)
 {
-	__asm__ __volatile__("lock; btcl %1,%0"
-			     :"+m" (ADDR)
-			     :"Ir" (nr)
-			     : "memory");
+	asm volatile("lock; btcl %1,%0"
+		     : "+m" (ADDR)
+		     : "Ir" (nr)
+		     : "memory");
 }
 
 /**
@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
+static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
-	__asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit),"+m" (ADDR)
-			     :"Ir" (nr) : "memory");
+	asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
+		     : "=r" (oldbit), "+m" (ADDR)
+		     : "Ir" (nr) : "memory");
 	return oldbit;
 }
 
@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
+static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
-	__asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit),"+m" (ADDR)
-			     :"Ir" (nr) : "memory");
+	asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
+		     : "=r" (oldbit), "+m" (ADDR)
+		     : "Ir" (nr) : "memory");
 	return oldbit;
 }
 
@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
+static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	int oldbit;
 
-	__asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
-			     :"=r" (oldbit),"+m" (ADDR)
-			     :"Ir" (nr) : "memory");
+	asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
+		     : "=r" (oldbit), "+m" (ADDR)
+		     : "Ir" (nr) : "memory");
 	return oldbit;
 }
 
-- 
1.5.4.rc2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ