lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <4F16E3AC020000780006D796@nat28.tlf.novell.com>
Date:	Wed, 18 Jan 2012 14:22:20 +0000
From:	"Jan Beulich" <JBeulich@...e.com>
To:	<mingo@...e.hu>, <tglx@...utronix.de>, <hpa@...or.com>
Cc:	"eric.dumazet@...il.com" <eric.dumazet@...il.com>,
	<luca@...a-barbieri.com>, <linux-kernel@...r.kernel.org>
Subject: [PATCH 1/2] ix86: adjust asm constraints in atomic64 wrappers

Eric pointed out overly restrictive constraints in atomic64_set(), but
there are issues throughout the file. In the cited case, %ebx and %ecx
are inputs only (don't get changed by either of the two low level
implementations). This was also the case elsewhere.

Further in many cases early-clobber indicators were missing.

Finally, the previous implementation rolled a custom alternative
instruction macro from scratch, rather than using alternative_call()
(which was introduced with the commit that the description of the
change in question actually refers to). Adjusting has the benefit of
not hiding referenced symbols from the compiler, which however requires
them to be declared not just in the exporting source file (which, as a
desirable side effect, in turn allows that exporting file to become a
real 5-line stub).

This patch does not eliminate the overly restrictive memory clobbers,
however: Doing so would occasionally make the compiler set up a second
register for accessing the memory object (to satisfy the added "m"
constraint), and it's not clear which of the two non-optimal
alternatives is better.

Reported-by: Eric Dumazet <eric.dumazet@...il.com>
Signed-off-by: Jan Beulich <jbeulich@...e.com>
Cc: Luca Barbieri <luca@...a-barbieri.com>

---
 arch/x86/include/asm/alternative.h |    6 +
 arch/x86/include/asm/atomic64_32.h |  145 +++++++++++++++++++------------------
 arch/x86/lib/atomic64_32.c         |   60 ---------------
 3 files changed, 87 insertions(+), 124 deletions(-)

--- tip-i386-atomic64.orig/arch/x86/include/asm/alternative.h
+++ tip-i386-atomic64/arch/x86/include/asm/alternative.h
@@ -145,6 +145,12 @@ static inline int alternatives_text_rese
  */
 #define ASM_OUTPUT2(a...) a
 
+/*
+ * use this macro if you need clobbers but no inputs in
+ * alternative_{input,io,call}()
+ */
+#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+
 struct paravirt_patch_site;
 #ifdef CONFIG_PARAVIRT
 void apply_paravirt(struct paravirt_patch_site *start,
--- tip-i386-atomic64.orig/arch/x86/include/asm/atomic64_32.h
+++ tip-i386-atomic64/arch/x86/include/asm/atomic64_32.h
@@ -14,13 +14,50 @@ typedef struct {
 
 #define ATOMIC64_INIT(val)	{ (val) }
 
+#ifndef __ATOMIC64_EXPORT
+/*
+ * Don't declare these as functions, even though they are - due to their
+ * non-standard calling conventions they can't be called by C code anyway.
+ */
+#define __ATOMIC64_EXPORT(sym) extern const atomic64_t atomic64_##sym[]
+#endif
+
 #ifdef CONFIG_X86_CMPXCHG64
-#define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8"
+#define __alternative_atomic64(f, g, out, in...) \
+	asm volatile("call %P[func]" \
+		     : out : [func] "i" (atomic64_##g##_cx8), ## in)
+
+#define ATOMIC64_EXPORT(sym) __ATOMIC64_EXPORT(sym##_cx8)
 #else
-#define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8)
+#define __alternative_atomic64(f, g, out, in...) \
+	alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
+			 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
+
+#define ATOMIC64_EXPORT(sym) __ATOMIC64_EXPORT(sym##_cx8); \
+		__ATOMIC64_EXPORT(sym##_386)
+
+__ATOMIC64_EXPORT(add_386);
+__ATOMIC64_EXPORT(sub_386);
+__ATOMIC64_EXPORT(inc_386);
+__ATOMIC64_EXPORT(dec_386);
 #endif
 
-#define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f)
+#define alternative_atomic64(f, out, in...) \
+	__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+
+ATOMIC64_EXPORT(read);
+ATOMIC64_EXPORT(set);
+ATOMIC64_EXPORT(xchg);
+ATOMIC64_EXPORT(add_return);
+ATOMIC64_EXPORT(sub_return);
+ATOMIC64_EXPORT(inc_return);
+ATOMIC64_EXPORT(dec_return);
+ATOMIC64_EXPORT(dec_if_positive);
+ATOMIC64_EXPORT(inc_not_zero);
+ATOMIC64_EXPORT(add_unless);
+
+#undef ATOMIC64_EXPORT
+#undef __ATOMIC64_EXPORT
 
 /**
  * atomic64_cmpxchg - cmpxchg atomic64 variable
@@ -50,11 +87,9 @@ static inline long long atomic64_xchg(at
 	long long o;
 	unsigned high = (unsigned)(n >> 32);
 	unsigned low = (unsigned)n;
-	asm volatile(ATOMIC64_ALTERNATIVE(xchg)
-		     : "=A" (o), "+b" (low), "+c" (high)
-		     : "S" (v)
-		     : "memory"
-		     );
+	alternative_atomic64(xchg, "=&A" (o),
+			     "S" (v), "b" (low), "c" (high)
+			     : "memory");
 	return o;
 }
 
@@ -69,11 +104,9 @@ static inline void atomic64_set(atomic64
 {
 	unsigned high = (unsigned)(i >> 32);
 	unsigned low = (unsigned)i;
-	asm volatile(ATOMIC64_ALTERNATIVE(set)
-		     : "+b" (low), "+c" (high)
-		     : "S" (v)
-		     : "eax", "edx", "memory"
-		     );
+	alternative_atomic64(set, /* no output */,
+			     "S" (v), "b" (low), "c" (high)
+			     : "eax", "edx", "memory");
 }
 
 /**
@@ -85,10 +118,7 @@ static inline void atomic64_set(atomic64
 static inline long long atomic64_read(const atomic64_t *v)
 {
 	long long r;
-	asm volatile(ATOMIC64_ALTERNATIVE(read)
-		     : "=A" (r), "+c" (v)
-		     : : "memory"
-		     );
+	alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
 	return r;
  }
 
@@ -101,10 +131,9 @@ static inline long long atomic64_read(co
  */
 static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-	asm volatile(ATOMIC64_ALTERNATIVE(add_return)
-		     : "+A" (i), "+c" (v)
-		     : : "memory"
-		     );
+	alternative_atomic64(add_return,
+			     ASM_OUTPUT2("+A" (i), "+c" (v)),
+			     ASM_NO_INPUT_CLOBBER("memory"));
 	return i;
 }
 
@@ -113,32 +142,25 @@ static inline long long atomic64_add_ret
  */
 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
 {
-	asm volatile(ATOMIC64_ALTERNATIVE(sub_return)
-		     : "+A" (i), "+c" (v)
-		     : : "memory"
-		     );
+	alternative_atomic64(sub_return,
+			     ASM_OUTPUT2("+A" (i), "+c" (v)),
+			     ASM_NO_INPUT_CLOBBER("memory"));
 	return i;
 }
 
 static inline long long atomic64_inc_return(atomic64_t *v)
 {
 	long long a;
-	asm volatile(ATOMIC64_ALTERNATIVE(inc_return)
-		     : "=A" (a)
-		     : "S" (v)
-		     : "memory", "ecx"
-		     );
+	alternative_atomic64(inc_return, "=&A" (a),
+			     "S" (v) : "memory", "ecx");
 	return a;
 }
 
 static inline long long atomic64_dec_return(atomic64_t *v)
 {
 	long long a;
-	asm volatile(ATOMIC64_ALTERNATIVE(dec_return)
-		     : "=A" (a)
-		     : "S" (v)
-		     : "memory", "ecx"
-		     );
+	alternative_atomic64(dec_return, "=&A" (a),
+			     "S" (v) : "memory", "ecx");
 	return a;
 }
 
@@ -151,10 +173,9 @@ static inline long long atomic64_dec_ret
  */
 static inline long long atomic64_add(long long i, atomic64_t *v)
 {
-	asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return)
-		     : "+A" (i), "+c" (v)
-		     : : "memory"
-		     );
+	__alternative_atomic64(add, add_return,
+			       ASM_OUTPUT2("+A" (i), "+c" (v)),
+			       ASM_NO_INPUT_CLOBBER("memory"));
 	return i;
 }
 
@@ -167,10 +188,9 @@ static inline long long atomic64_add(lon
  */
 static inline long long atomic64_sub(long long i, atomic64_t *v)
 {
-	asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return)
-		     : "+A" (i), "+c" (v)
-		     : : "memory"
-		     );
+	__alternative_atomic64(sub, sub_return,
+			       ASM_OUTPUT2("+A" (i), "+c" (v)),
+			       ASM_NO_INPUT_CLOBBER("memory"));
 	return i;
 }
 
@@ -196,10 +216,8 @@ static inline int atomic64_sub_and_test(
  */
 static inline void atomic64_inc(atomic64_t *v)
 {
-	asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return)
-		     : : "S" (v)
-		     : "memory", "eax", "ecx", "edx"
-		     );
+	__alternative_atomic64(inc, inc_return, /* no output */,
+			       "S" (v) : "memory", "eax", "ecx", "edx");
 }
 
 /**
@@ -210,10 +228,8 @@ static inline void atomic64_inc(atomic64
  */
 static inline void atomic64_dec(atomic64_t *v)
 {
-	asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return)
-		     : : "S" (v)
-		     : "memory", "eax", "ecx", "edx"
-		     );
+	__alternative_atomic64(dec, dec_return, /* no output */,
+			       "S" (v) : "memory", "eax", "ecx", "edx");
 }
 
 /**
@@ -263,15 +279,16 @@ static inline int atomic64_add_negative(
  * @u: ...unless v is equal to u.
  *
  * Atomically adds @a to @v, so long as it was not @u.
- * Returns the old value of @v.
+ * Returns non-zero if the add was done, zero otherwise.
  */
 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 {
 	unsigned low = (unsigned)u;
 	unsigned high = (unsigned)(u >> 32);
-	asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t"
-		     : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
-		     : : "memory");
+	alternative_atomic64(add_unless,
+			     ASM_OUTPUT2("+A" (a), "+c" (v),
+					 "+S" (low), "+D" (high)),
+			     ASM_NO_INPUT_CLOBBER("memory"));
 	return (int)a;
 }
 
@@ -279,26 +296,20 @@ static inline int atomic64_add_unless(at
 static inline int atomic64_inc_not_zero(atomic64_t *v)
 {
 	int r;
-	asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero)
-		     : "=a" (r)
-		     : "S" (v)
-		     : "ecx", "edx", "memory"
-		     );
+	alternative_atomic64(inc_not_zero, "=&a" (r),
+			     "S" (v) : "ecx", "edx", "memory");
 	return r;
 }
 
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
 	long long r;
-	asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive)
-		     : "=A" (r)
-		     : "S" (v)
-		     : "ecx", "memory"
-		     );
+	alternative_atomic64(dec_if_positive, "=&A" (r),
+			     "S" (v) : "ecx", "memory");
 	return r;
 }
 
-#undef ATOMIC64_ALTERNATIVE
-#undef ATOMIC64_ALTERNATIVE_
+#undef alternative_atomic64
+#undef __alternative_atomic64
 
 #endif /* _ASM_X86_ATOMIC64_32_H */
--- tip-i386-atomic64.orig/arch/x86/lib/atomic64_32.c
+++ tip-i386-atomic64/arch/x86/lib/atomic64_32.c
@@ -1,59 +1,5 @@
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/types.h>
+#define __ATOMIC64_EXPORT(sym) extern const atomic64_t atomic64_##sym[]; \
+	EXPORT_SYMBOL(atomic64_##sym)
 
-#include <asm/processor.h>
-#include <asm/cmpxchg.h>
+#include <linux/export.h>
 #include <linux/atomic.h>
-
-long long atomic64_read_cx8(long long, const atomic64_t *v);
-EXPORT_SYMBOL(atomic64_read_cx8);
-long long atomic64_set_cx8(long long, const atomic64_t *v);
-EXPORT_SYMBOL(atomic64_set_cx8);
-long long atomic64_xchg_cx8(long long, unsigned high);
-EXPORT_SYMBOL(atomic64_xchg_cx8);
-long long atomic64_add_return_cx8(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_add_return_cx8);
-long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_sub_return_cx8);
-long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_inc_return_cx8);
-long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_dec_return_cx8);
-long long atomic64_dec_if_positive_cx8(atomic64_t *v);
-EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
-int atomic64_inc_not_zero_cx8(atomic64_t *v);
-EXPORT_SYMBOL(atomic64_inc_not_zero_cx8);
-int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u);
-EXPORT_SYMBOL(atomic64_add_unless_cx8);
-
-#ifndef CONFIG_X86_CMPXCHG64
-long long atomic64_read_386(long long, const atomic64_t *v);
-EXPORT_SYMBOL(atomic64_read_386);
-long long atomic64_set_386(long long, const atomic64_t *v);
-EXPORT_SYMBOL(atomic64_set_386);
-long long atomic64_xchg_386(long long, unsigned high);
-EXPORT_SYMBOL(atomic64_xchg_386);
-long long atomic64_add_return_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_add_return_386);
-long long atomic64_sub_return_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_sub_return_386);
-long long atomic64_inc_return_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_inc_return_386);
-long long atomic64_dec_return_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_dec_return_386);
-long long atomic64_add_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_add_386);
-long long atomic64_sub_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_sub_386);
-long long atomic64_inc_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_inc_386);
-long long atomic64_dec_386(long long a, atomic64_t *v);
-EXPORT_SYMBOL(atomic64_dec_386);
-long long atomic64_dec_if_positive_386(atomic64_t *v);
-EXPORT_SYMBOL(atomic64_dec_if_positive_386);
-int atomic64_inc_not_zero_386(atomic64_t *v);
-EXPORT_SYMBOL(atomic64_inc_not_zero_386);
-int atomic64_add_unless_386(atomic64_t *v, long long a, long long u);
-EXPORT_SYMBOL(atomic64_add_unless_386);
-#endif


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ