lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 27 Jul 2011 13:21:19 +0100
From:	Will Newton <will.newton@...il.com>
To:	Linux Kernel list <linux-kernel@...r.kernel.org>
Cc:	Arnd Bergmann <arnd@...db.de>, stable@...nel.org
Subject: [PATCH] bitops: Use volatile in generic atomic bitops.

The generic atomic bitops currently explicitly cast away the
volatile from the pointer passed to them. This will allow the
access to the bitfield to happen outside of the critical section
thus making the bitops no longer interrupt-safe. Remove this cast
and add a volatile keyword to make sure all accesses to the
bitfield happen inside the critical section.

Signed-off-by: Will Newton <will.newton@...tec.com>
---
 include/asm-generic/bitops/atomic.h |   12 ++++++------
 1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/asm-generic/bitops/atomic.h
b/include/asm-generic/bitops/atomic.h
index ecc44a8..57e4b1f 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -65,7 +65,7 @@ extern arch_spinlock_t
__atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 static inline void set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	volatile unsigned long *p = addr + BIT_WORD(nr);
 	unsigned long flags;

 	_atomic_spin_lock_irqsave(p, flags);
@@ -86,7 +86,7 @@ static inline void set_bit(int nr, volatile unsigned
long *addr)
 static inline void clear_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	volatile unsigned long *p = addr + BIT_WORD(nr);
 	unsigned long flags;

 	_atomic_spin_lock_irqsave(p, flags);
@@ -107,7 +107,7 @@ static inline void clear_bit(int nr, volatile
unsigned long *addr)
 static inline void change_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	volatile unsigned long *p = addr + BIT_WORD(nr);
 	unsigned long flags;

 	_atomic_spin_lock_irqsave(p, flags);
@@ -127,7 +127,7 @@ static inline void change_bit(int nr, volatile
unsigned long *addr)
 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	volatile unsigned long *p = addr + BIT_WORD(nr);
 	unsigned long old;
 	unsigned long flags;

@@ -151,7 +151,7 @@ static inline int test_and_set_bit(int nr,
volatile unsigned long *addr)
 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	volatile unsigned long *p = addr + BIT_WORD(nr);
 	unsigned long old;
 	unsigned long flags;

@@ -174,7 +174,7 @@ static inline int test_and_clear_bit(int nr,
volatile unsigned long *addr)
 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
 {
 	unsigned long mask = BIT_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	volatile unsigned long *p = addr + BIT_WORD(nr);
 	unsigned long old;
 	unsigned long flags;

-- 
1.7.3.4

View attachment "0001-bitops-Use-volatile-in-generic-atomic-bitops.patch" of type "text/x-patch" (3162 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ