lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6a9e1c01f21ba64aefc7082f660615508c3df8f3.1741988314.git.jpoimboe@kernel.org>
Date: Fri, 14 Mar 2025 14:41:21 -0700
From: Josh Poimboeuf <jpoimboe@...nel.org>
To: x86@...nel.org
Cc: linux-kernel@...r.kernel.org,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Borislav Petkov <bp@...en8.de>,
	"H. Peter Anvin" <hpa@...or.com>,
	Uros Bizjak <ubizjak@...il.com>,
	Andrew Cooper <andrew.cooper3@...rix.com>,
	Ingo Molnar <mingo@...nel.org>
Subject: [PATCH 08/20] x86/asm: Replace ASM_{OUTPUT,INPUT}() with ARG()

Replace ASM_OUTPUT() and ASM_INPUT() with ARG().  It provides more
visual separation and vertical alignment, making it easy to distinguish
the outputs, inputs and clobbers.  It will also come in handy for other
inline asm wrappers.

Signed-off-by: Josh Poimboeuf <jpoimboe@...nel.org>
---
 arch/x86/include/asm/apic.h          |  2 +-
 arch/x86/include/asm/asm.h           | 11 ++--
 arch/x86/include/asm/atomic64_32.h   | 93 +++++++++++++++-------------
 arch/x86/include/asm/page_64.h       | 12 ++--
 arch/x86/include/asm/segment.h       |  5 +-
 arch/x86/include/asm/special_insns.h |  2 +-
 6 files changed, 67 insertions(+), 58 deletions(-)

diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ecf1b229f09b..6526bad6ec81 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -100,7 +100,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
 
 	alternative_io("movl %[val], %[mem]",
 		       "xchgl %[val], %[mem]", X86_BUG_11AP,
-		       ASM_OUTPUT([val] "+r" (v), [mem] "+m" (*addr)));
+		       ARG([val] "+r" (v), [mem] "+m" (*addr)));
 }
 
 static inline u32 native_apic_mem_read(u32 reg)
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 619817841f4c..9f0f830628f9 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -212,11 +212,14 @@ static __always_inline __pure void *rip_rel_ptr(void *p)
 #define __COMMA(...)		, ##__VA_ARGS__
 
 /*
- * Combine multiple asm inline constraint args into a single arg for passing to
- * another macro.
+ * ARG() can be used to bundle multiple arguments into a single argument for
+ * passing to a macro.
+ *
+ * For inline asm constraint operands, this is recommended even for single
+ * operands as it provides visual separation and vertical alignment similar to
+ * the ':' characters in an inline asm statement.
  */
-#define ASM_OUTPUT(x...)	x
-#define ASM_INPUT(x...)		x
+#define ARG(x...) x
 
 /*
  * This output constraint should be used for any inline asm which has a "call"
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index ab838205c1c6..8775f84222e6 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -59,9 +59,11 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
 #else
 #define __alternative_atomic64(f, g, out, in, clobbers...)		\
-	alternative_call(atomic64_##f##_386, atomic64_##g##_cx8,	\
-			 X86_FEATURE_CX8, ASM_OUTPUT(out),		\
-			 ASM_INPUT(in), clobbers)
+	alternative_call(atomic64_##f##_386,				\
+			 atomic64_##g##_cx8, X86_FEATURE_CX8,		\
+			 ARG(out),					\
+			 ARG(in),					\
+			 ARG(clobbers))
 
 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
 	ATOMIC64_DECL_ONE(sym##_386)
@@ -73,7 +75,7 @@ ATOMIC64_DECL_ONE(dec_386);
 #endif
 
 #define alternative_atomic64(f, out, in, clobbers...) \
-	__alternative_atomic64(f, f, ASM_OUTPUT(out), ASM_INPUT(in), clobbers)
+	__alternative_atomic64(f, f, ARG(out), ARG(in), ARG(clobbers))
 
 ATOMIC64_DECL(read);
 ATOMIC64_DECL(set);
@@ -109,9 +111,9 @@ static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
 	unsigned high = (unsigned)(n >> 32);
 	unsigned low = (unsigned)n;
 	alternative_atomic64(xchg,
-			     "=&A" (o),
-			     ASM_INPUT("S" (v), "b" (low), "c" (high)),
-			     "memory");
+			     ARG("=&A" (o)),
+			     ARG("S" (v), "b" (low), "c" (high)),
+			     ARG("memory"));
 	return o;
 }
 #define arch_atomic64_xchg arch_atomic64_xchg
@@ -121,24 +123,27 @@ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
 	unsigned high = (unsigned)(i >> 32);
 	unsigned low = (unsigned)i;
 	alternative_atomic64(set,
-			     /* no output */,
-			     ASM_INPUT("S" (v), "b" (low), "c" (high)),
-			     "eax", "edx", "memory");
+			     ARG(),
+			     ARG("S" (v), "b" (low), "c" (high)),
+			     ARG("eax", "edx", "memory"));
 }
 
 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
 {
 	s64 r;
-	alternative_atomic64(read, "=&A" (r), "c" (v), "memory");
+	alternative_atomic64(read,
+			     ARG("=&A" (r)),
+			     ARG("c" (v)),
+			     ARG("memory"));
 	return r;
 }
 
 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 {
 	alternative_atomic64(add_return,
-			     ASM_OUTPUT("+A" (i), "+c" (v)),
-			     /* no input */,
-			     "memory");
+			     ARG("+A" (i), "+c" (v)),
+			     ARG(),
+			     ARG("memory"));
 	return i;
 }
 #define arch_atomic64_add_return arch_atomic64_add_return
@@ -146,9 +151,9 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
 {
 	alternative_atomic64(sub_return,
-			     ASM_OUTPUT("+A" (i), "+c" (v)),
-			     /* no input */,
-			     "memory");
+			     ARG("+A" (i), "+c" (v)),
+			     ARG(),
+			     ARG("memory"));
 	return i;
 }
 #define arch_atomic64_sub_return arch_atomic64_sub_return
@@ -157,9 +162,9 @@ static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
 {
 	s64 a;
 	alternative_atomic64(inc_return,
-			     "=&A" (a),
-			     "S" (v),
-			     "memory", "ecx");
+			     ARG("=&A" (a)),
+			     ARG("S" (v)),
+			     ARG("memory", "ecx"));
 	return a;
 }
 #define arch_atomic64_inc_return arch_atomic64_inc_return
@@ -168,9 +173,9 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
 {
 	s64 a;
 	alternative_atomic64(dec_return,
-			     "=&A" (a),
-			     "S" (v),
-			     "memory", "ecx");
+			     ARG("=&A" (a)),
+			     ARG("S" (v)),
+			     ARG("memory", "ecx"));
 	return a;
 }
 #define arch_atomic64_dec_return arch_atomic64_dec_return
@@ -178,34 +183,34 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
 {
 	__alternative_atomic64(add, add_return,
-			       ASM_OUTPUT("+A" (i), "+c" (v)),
-			       /* no input */,
-			       "memory");
+			       ARG("+A" (i), "+c" (v)),
+			       ARG(),
+			       ARG("memory"));
 }
 
 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
 {
 	__alternative_atomic64(sub, sub_return,
-			       ASM_OUTPUT("+A" (i), "+c" (v)),
-			       /* no input */,
-			       "memory");
+			       ARG("+A" (i), "+c" (v)),
+			       ARG(),
+			       ARG("memory"));
 }
 
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
 	__alternative_atomic64(inc, inc_return,
-			       /* no output */,
-			       "S" (v),
-			       "memory", "eax", "ecx", "edx");
+			       ARG(),
+			       ARG("S" (v)),
+			       ARG("memory", "eax", "ecx", "edx"));
 }
 #define arch_atomic64_inc arch_atomic64_inc
 
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
 	__alternative_atomic64(dec, dec_return,
-			       /* no output */,
-			       "S" (v),
-			       "memory", "eax", "ecx", "edx");
+			       ARG(),
+			       ARG("S" (v)),
+			       ARG("memory", "eax", "ecx", "edx"));
 }
 #define arch_atomic64_dec arch_atomic64_dec
 
@@ -214,9 +219,9 @@ static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
 	unsigned low = (unsigned)u;
 	unsigned high = (unsigned)(u >> 32);
 	alternative_atomic64(add_unless,
-			     ASM_OUTPUT("+A" (a), "+c" (low), "+D" (high)),
-			     "S" (v),
-			     "memory");
+			     ARG("+A" (a), "+c" (low), "+D" (high)),
+			     ARG("S" (v)),
+			     ARG("memory"));
 	return (int)a;
 }
 #define arch_atomic64_add_unless arch_atomic64_add_unless
@@ -225,9 +230,9 @@ static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
 	int r;
 	alternative_atomic64(inc_not_zero,
-			     "=&a" (r),
-			     "S" (v),
-			     "ecx", "edx", "memory");
+			     ARG("=&a" (r)),
+			     ARG("S" (v)),
+			     ARG("ecx", "edx", "memory"));
 	return r;
 }
 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
@@ -236,9 +241,9 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 {
 	s64 r;
 	alternative_atomic64(dec_if_positive,
-			     "=&A" (r),
-			     "S" (v),
-			     "ecx", "memory");
+			     ARG("=&A" (r)),
+			     ARG("S" (v)),
+			     ARG("ecx", "memory"));
 	return r;
 }
 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index db3003acd41e..0604e9d49221 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -54,9 +54,9 @@ static inline void clear_page(void *page)
 	alternative_call_2(clear_page_orig,
 			   clear_page_rep, X86_FEATURE_REP_GOOD,
 			   clear_page_erms, X86_FEATURE_ERMS,
-			   "=D" (page),
-			   "D" (page),
-			   "cc", "memory", "rax", "rcx");
+			   ARG("=D" (page)),
+			   ARG("D" (page)),
+			   ARG("cc", "memory", "rax", "rcx"));
 }
 
 void copy_page(void *to, void *from);
@@ -87,9 +87,9 @@ static __always_inline unsigned long task_size_max(void)
 
 	alternative_io("movq %[small], %[ret]",
 		       "movq %[large], %[ret]", X86_FEATURE_LA57,
-			[ret] "=r" (ret),
-			[small] "i" ((1ul << 47)-PAGE_SIZE),
-			[large] "i" ((1ul << 56)-PAGE_SIZE));
+			ARG([ret] "=r" (ret)),
+			ARG([small] "i" ((1ul << 47)-PAGE_SIZE),
+			    [large] "i" ((1ul << 56)-PAGE_SIZE)));
 
 	return ret;
 }
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 9d6411c65920..32b1aa9f721b 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -254,10 +254,11 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
 	 *
 	 * If RDPID is available, use it.
 	 */
-	alternative_io ("lsl %[seg],%[p]",
+	alternative_io("lsl %[seg],%[p]",
 			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
 			X86_FEATURE_RDPID,
-			[p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
+			ARG([p] "=a" (p)),
+			ARG([seg] "r" (__CPUNODE_SEG)));
 
 	if (cpu)
 		*cpu = (p & VDSO_CPUNODE_MASK);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 9c1cc0ef8f3c..a6a3f4c95f03 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -178,7 +178,7 @@ static inline void clflushopt(volatile void *__p)
 {
 	alternative_io(".byte 0x3e; clflush %[val]",
 		       ".byte 0x66; clflush %[val]", X86_FEATURE_CLFLUSHOPT,
-		       [val] "+m" (*(volatile char __force *)__p));
+		       ARG([val] "+m" (*(volatile char __force *)__p)));
 }
 
 static inline void clwb(volatile void *__p)
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ