lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <174617666745.22196.15200555517786851071.tip-bot2@tip-bot2>
Date: Fri, 02 May 2025 09:04:27 -0000
From: "tip-bot2 for Xin Li (Intel)" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Xin Li (Intel)" <xin@...or.com>, Ingo Molnar <mingo@...nel.org>,
 Dave Hansen <dave.hansen@...ux.intel.com>,
 "Peter Zijlstra (Intel)" <peterz@...radead.org>,
 Andy Lutomirski <luto@...nel.org>, Brian Gerst <brgerst@...il.com>,
 Juergen Gross <jgross@...e.com>, "H. Peter Anvin" <hpa@...or.com>,
 Linus Torvalds <torvalds@...ux-foundation.org>,
 Kees Cook <keescook@...omium.org>, Borislav Petkov <bp@...en8.de>,
 Thomas Gleixner <tglx@...utronix.de>, Josh Poimboeuf <jpoimboe@...hat.com>,
 Uros Bizjak <ubizjak@...il.com>, x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: x86/merge] x86/msr: Move rdtsc{,_ordered}() to <asm/tsc.h>

The following commit has been merged into the x86/merge branch of tip:

Commit-ID:     288a4ff0ad29d1a9391b8a111a4b6da51da3aa85
Gitweb:        https://git.kernel.org/tip/288a4ff0ad29d1a9391b8a111a4b6da51da3aa85
Author:        Xin Li (Intel) <xin@...or.com>
AuthorDate:    Fri, 02 May 2025 10:20:14 +02:00
Committer:     Ingo Molnar <mingo@...nel.org>
CommitterDate: Fri, 02 May 2025 10:24:39 +02:00

x86/msr: Move rdtsc{,_ordered}() to <asm/tsc.h>

Relocate rdtsc{,_ordered}() from <asm/msr.h> to <asm/tsc.h>.

[ mingo: Do not remove the <asm/tsc.h> inclusion from <asm/msr.h>
         just yet, to reduce -next breakages. We can do this later
	 on, separately, shortly before the next -rc1. ]

Signed-off-by: Xin Li (Intel) <xin@...or.com>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
Acked-by: Dave Hansen <dave.hansen@...ux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Brian Gerst <brgerst@...il.com>
Cc: Juergen Gross <jgross@...e.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Kees Cook <keescook@...omium.org>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Josh Poimboeuf <jpoimboe@...hat.com>
Cc: Uros Bizjak <ubizjak@...il.com>
Link: https://lore.kernel.org/r/20250427092027.1598740-3-xin@zytor.com
---
 arch/x86/include/asm/msr.h | 54 +------------------------------------
 arch/x86/include/asm/tsc.h | 55 +++++++++++++++++++++++++++++++++++++-
 2 files changed, 55 insertions(+), 54 deletions(-)

diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 35a78d2..f5c0969 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -153,60 +153,6 @@ native_write_msr_safe(u32 msr, u32 low, u32 high)
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-/**
- * rdtsc() - returns the current TSC without ordering constraints
- *
- * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
- * only ordering constraint it supplies is the ordering implied by
- * "asm volatile": it will put the RDTSC in the place you expect.  The
- * CPU can and will speculatively execute that RDTSC, though, so the
- * results can be non-monotonic if compared on different CPUs.
- */
-static __always_inline u64 rdtsc(void)
-{
-	EAX_EDX_DECLARE_ARGS(val, low, high);
-
-	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
-
-	return EAX_EDX_VAL(val, low, high);
-}
-
-/**
- * rdtsc_ordered() - read the current TSC in program order
- *
- * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
- * It is ordered like a load to a global in-memory counter.  It should
- * be impossible to observe non-monotonic rdtsc_unordered() behavior
- * across multiple CPUs as long as the TSC is synced.
- */
-static __always_inline u64 rdtsc_ordered(void)
-{
-	EAX_EDX_DECLARE_ARGS(val, low, high);
-
-	/*
-	 * The RDTSC instruction is not ordered relative to memory
-	 * access.  The Intel SDM and the AMD APM are both vague on this
-	 * point, but empirically an RDTSC instruction can be
-	 * speculatively executed before prior loads.  An RDTSC
-	 * immediately after an appropriate barrier appears to be
-	 * ordered as a normal load, that is, it provides the same
-	 * ordering guarantees as reading from a global memory location
-	 * that some other imaginary CPU is updating continuously with a
-	 * time stamp.
-	 *
-	 * Thus, use the preferred barrier on the respective CPU, aiming for
-	 * RDTSCP as the default.
-	 */
-	asm volatile(ALTERNATIVE_2("rdtsc",
-				   "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
-				   "rdtscp", X86_FEATURE_RDTSCP)
-			: EAX_EDX_RET(val, low, high)
-			/* RDTSCP clobbers ECX with MSR_TSC_AUX. */
-			:: "ecx");
-
-	return EAX_EDX_VAL(val, low, high);
-}
-
 static inline u64 native_read_pmc(int counter)
 {
 	EAX_EDX_DECLARE_ARGS(val, low, high);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 94408a7..4f7f09f 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -5,10 +5,65 @@
 #ifndef _ASM_X86_TSC_H
 #define _ASM_X86_TSC_H
 
+#include <asm/asm.h>
 #include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect.  The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline u64 rdtsc(void)
+{
+	EAX_EDX_DECLARE_ARGS(val, low, high);
+
+	asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
+
+	return EAX_EDX_VAL(val, low, high);
+}
+
+/**
+ * rdtsc_ordered() - read the current TSC in program order
+ *
+ * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
+ * It is ordered like a load to a global in-memory counter.  It should
+ * be impossible to observe non-monotonic rdtsc_unordered() behavior
+ * across multiple CPUs as long as the TSC is synced.
+ */
+static __always_inline u64 rdtsc_ordered(void)
+{
+	EAX_EDX_DECLARE_ARGS(val, low, high);
+
+	/*
+	 * The RDTSC instruction is not ordered relative to memory
+	 * access.  The Intel SDM and the AMD APM are both vague on this
+	 * point, but empirically an RDTSC instruction can be
+	 * speculatively executed before prior loads.  An RDTSC
+	 * immediately after an appropriate barrier appears to be
+	 * ordered as a normal load, that is, it provides the same
+	 * ordering guarantees as reading from a global memory location
+	 * that some other imaginary CPU is updating continuously with a
+	 * time stamp.
+	 *
+	 * Thus, use the preferred barrier on the respective CPU, aiming for
+	 * RDTSCP as the default.
+	 */
+	asm volatile(ALTERNATIVE_2("rdtsc",
+				   "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
+				   "rdtscp", X86_FEATURE_RDTSCP)
+			: EAX_EDX_RET(val, low, high)
+			/* RDTSCP clobbers ECX with MSR_TSC_AUX. */
+			:: "ecx");
+
+	return EAX_EDX_VAL(val, low, high);
+}
+
 /*
  * Standard way to access the cycle counter.
  */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ