[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20100811235455.972827670@clark.site>
Date: Wed, 11 Aug 2010 16:53:45 -0700
From: Greg KH <gregkh@...e.de>
To: linux-kernel@...r.kernel.org, stable@...nel.org
Cc: stable-review@...nel.org, torvalds@...ux-foundation.org,
akpm@...ux-foundation.org, alan@...rguk.ukuu.org.uk,
"H. Peter Anvin" <hpa@...or.com>,
Glauber Costa <glommer@...hat.com>,
Avi Kivity <avi@...hat.com>,
Peter Palfrader <peter@...frader.org>,
Zachary Amsden <zamsden@...hat.com>,
Marcelo Tosatti <mtosatti@...hat.com>
Subject: [004/111] x86: Add memory modify constraints to xchg() and cmpxchg()
2.6.32-stable review patch. If anyone has any objections, please let us know.
------------------
From: H. Peter Anvin <hpa@...or.com>
commit 113fc5a6e8c2288619ff7e8187a6f556b7e0d372 upstream.
xchg() and cmpxchg() modify their memory operands, not merely read
them. For some versions of gcc the "memory" clobber has apparently
dealt with the situation, but not for all.
Originally-by: Linus Torvalds <torvalds@...ux-foundation.org>
Signed-off-by: H. Peter Anvin <hpa@...or.com>
Cc: Glauber Costa <glommer@...hat.com>
Cc: Avi Kivity <avi@...hat.com>
Cc: Peter Palfrader <peter@...frader.org>
Cc: Greg KH <gregkh@...e.de>
Cc: Alan Cox <alan@...rguk.ukuu.org.uk>
Cc: Zachary Amsden <zamsden@...hat.com>
Cc: Marcelo Tosatti <mtosatti@...hat.com>
LKML-Reference: <4C4F7277.8050306@...or.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...e.de>
---
arch/x86/include/asm/cmpxchg_32.h | 86 ++++++++++++++++++-------------------
arch/x86/include/asm/cmpxchg_64.h | 88 ++++++++++++++++++++------------------
2 files changed, 89 insertions(+), 85 deletions(-)
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -34,12 +34,12 @@ static inline void __set_64bit(unsigned
unsigned int low, unsigned int high)
{
asm volatile("\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- LOCK_PREFIX "cmpxchg8b (%0)\n\t"
+ "movl (%1), %%eax\n\t"
+ "movl 4(%1), %%edx\n\t"
+ LOCK_PREFIX "cmpxchg8b %0\n\t"
"jnz 1b"
- : /* no outputs */
- : "D"(ptr),
+ : "=m"(*ptr)
+ : "D" (ptr),
"b"(low),
"c"(high)
: "ax", "dx", "memory");
@@ -82,20 +82,20 @@ static inline unsigned long __xchg(unsig
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
- : "=q" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=q" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
}
@@ -139,21 +139,21 @@ static inline unsigned long __cmpxchg(vo
unsigned long prev;
switch (size) {
case 1:
- asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgl %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -172,21 +172,21 @@ static inline unsigned long __sync_cmpxc
unsigned long prev;
switch (size) {
case 1:
- asm volatile("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgl %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -200,21 +200,21 @@ static inline unsigned long __cmpxchg_lo
unsigned long prev;
switch (size) {
case 1:
- asm volatile("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgl %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -226,11 +226,10 @@ static inline unsigned long long __cmpxc
unsigned long long new)
{
unsigned long long prev;
- asm volatile(LOCK_PREFIX "cmpxchg8b %3"
- : "=A"(prev)
+ asm volatile(LOCK_PREFIX "cmpxchg8b %1"
+ : "=A"(prev), "+m" (*__xg(ptr))
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
@@ -241,11 +240,10 @@ static inline unsigned long long __cmpxc
unsigned long long new)
{
unsigned long long prev;
- asm volatile("cmpxchg8b %3"
- : "=A"(prev)
+ asm volatile("cmpxchg8b %1"
+ : "=A"(prev), "+m"(*__xg(ptr))
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -26,26 +26,26 @@ static inline unsigned long __xchg(unsig
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
- : "=q" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=q" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %k0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
case 8:
asm volatile("xchgq %0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
+ : "=r" (x), "+m" (*__xg(ptr))
+ : "0" (x)
: "memory");
break;
}
@@ -66,27 +66,27 @@ static inline unsigned long __cmpxchg(vo
unsigned long prev;
switch (size) {
case 1:
- asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgl %k2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 8:
- asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile(LOCK_PREFIX "cmpxchgq %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -105,21 +105,27 @@ static inline unsigned long __sync_cmpxc
unsigned long prev;
switch (size) {
case 1:
- asm volatile("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("lock; cmpxchgl %k2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ asm volatile("lock; cmpxchgq %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
@@ -133,27 +139,27 @@ static inline unsigned long __cmpxchg_lo
unsigned long prev;
switch (size) {
case 1:
- asm volatile("cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgb %b2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "q"(new), "0"(old)
: "memory");
return prev;
case 2:
- asm volatile("cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgw %w2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 4:
- asm volatile("cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgl %k2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
case 8:
- asm volatile("cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ asm volatile("cmpxchgq %2,%1"
+ : "=a"(prev), "+m"(*__xg(ptr))
+ : "r"(new), "0"(old)
: "memory");
return prev;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists