lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240103031409.2504051-11-dapeng1.mi@linux.intel.com>
Date: Wed,  3 Jan 2024 11:14:08 +0800
From: Dapeng Mi <dapeng1.mi@...ux.intel.com>
To: Sean Christopherson <seanjc@...gle.com>,
	Paolo Bonzini <pbonzini@...hat.com>,
	Jim Mattson <jmattson@...gle.com>
Cc: kvm@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	Zhenyu Wang <zhenyuw@...ux.intel.com>,
	Zhang Xiong <xiong.y.zhang@...el.com>,
	Mingwei Zhang <mizhang@...gle.com>,
	Like Xu <like.xu.linux@...il.com>,
	Jinrong Liang <cloudliang@...cent.com>,
	Dapeng Mi <dapeng1.mi@...el.com>,
	Dapeng Mi <dapeng1.mi@...ux.intel.com>
Subject: [kvm-unit-tests Patch v3 10/11] x86: pmu: Add IBPB indirect jump asm blob

Currently the lower boundary of branch misses event is set to 0.
Strictly speaking 0 shouldn't be a valid count since it can't tell us if
branch misses event counter works correctly or even disabled. Whereas
it's also possible and reasonable that branch misses event count is 0
especailly for such simple loop() program with advanced branch
predictor.

To eliminate such ambiguity and make branch misses event verification
more acccurately, an extra IBPB indirect jump asm blob is appended and
IBPB command is leveraged to clear the branch target buffer and force to
cause a branch miss for the indirect jump.

Suggested-by: Jim Mattson <jmattson@...gle.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
---
 x86/pmu.c | 56 +++++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 42 insertions(+), 14 deletions(-)

diff --git a/x86/pmu.c b/x86/pmu.c
index 8fd3db0fbf81..c8d4a0dcd362 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -27,14 +27,26 @@
 	"nop; nop; nop; nop; nop; nop; nop;\n\t"	\
 	"loop 1b;\n\t"
 
-/*Enable GLOBAL_CTRL + disable GLOBAL_CTRL + clflush/mfence instructions */
-#define PRECISE_EXTRA_INSTRNS  (2 + 4 + 2)
+#define IBPB_JMP_INSTRNS      7
+#define IBPB_JMP_BRANCHES     1
+#define IBPB_JMP_ASM(_wrmsr)				\
+	"mov $1, %%eax; xor %%edx, %%edx;\n\t"		\
+	"mov $73, %%ecx;\n\t"				\
+	_wrmsr "\n\t"					\
+	"lea 2f, %%rax;\n\t"				\
+	"jmp *%%rax;\n\t"				\
+	"nop;\n\t"					\
+	"2: nop;\n\t"
+
+/* GLOBAL_CTRL enable + disable + clflush/mfence + IBPB_JMP */
+#define PRECISE_EXTRA_INSTRNS  (2 + 4 + 2 + IBPB_JMP_INSTRNS)
 #define PRECISE_LOOP_INSTRNS   (N * LOOP_INSTRNS + PRECISE_EXTRA_INSTRNS)
-#define PRECISE_LOOP_BRANCHES  (N)
-#define PRECISE_LOOP_ASM(_clflush)					\
+#define PRECISE_LOOP_BRANCHES  (N + IBPB_JMP_BRANCHES)
+#define PRECISE_LOOP_ASM(_clflush, _wrmsr)				\
 	"wrmsr;\n\t"							\
 	"mov %%ecx, %%edi; mov %%ebx, %%ecx;\n\t"			\
 	LOOP_ASM(_clflush)						\
+	IBPB_JMP_ASM(_wrmsr)						\
 	"mov %%edi, %%ecx; xor %%eax, %%eax; xor %%edx, %%edx;\n\t"	\
 	"wrmsr;\n\t"
 
@@ -74,30 +86,42 @@ char *buf;
 static struct pmu_event *gp_events;
 static unsigned int gp_events_size;
 
-#define _loop_asm(_clflush)					\
+#define _loop_asm(_clflush, _wrmsr)				\
 do {								\
 	asm volatile(LOOP_ASM(_clflush)				\
+		     IBPB_JMP_ASM(_wrmsr)			\
 		     : "=c"(tmp), "=r"(tmp2), "=r"(tmp3)	\
-		     : "0"(N), "1"(buf));			\
+		     : "0"(N), "1"(buf)				\
+		     : "eax", "edx");				\
 } while (0)
 
-#define _precise_loop_asm(_clflush)				\
+#define _precise_loop_asm(_clflush, _wrmsr)			\
 do {								\
-	asm volatile(PRECISE_LOOP_ASM(_clflush)			\
+	asm volatile(PRECISE_LOOP_ASM(_clflush, _wrmsr)		\
 		     : "=b"(tmp), "=r"(tmp2), "=r"(tmp3)	\
 		     : "a"(eax), "d"(edx), "c"(global_ctl),	\
 		       "0"(N), "1"(buf)				\
 		     : "edi");					\
 } while (0)
 
+static int has_ibpb(void)
+{
+	return this_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+	       this_cpu_has(X86_FEATURE_AMD_IBPB);
+}
+
 static inline void __loop(void)
 {
 	unsigned long tmp, tmp2, tmp3;
 
-	if (this_cpu_has(X86_FEATURE_CLFLUSH))
-		_loop_asm("clflush (%1)");
+	if (this_cpu_has(X86_FEATURE_CLFLUSH) && has_ibpb())
+		_loop_asm("clflush (%1)", "wrmsr");
+	else if (this_cpu_has(X86_FEATURE_CLFLUSH))
+		_loop_asm("clflush (%1)", "nop");
+	else if (has_ibpb())
+		_loop_asm("nop", "wrmsr");
 	else
-		_loop_asm("nop");
+		_loop_asm("nop", "nop");
 }
 
 /*
@@ -114,10 +138,14 @@ static inline void __precise_count_loop(u64 cntrs)
 	u32 eax = cntrs & (BIT_ULL(32) - 1);
 	u32 edx = cntrs >> 32;
 
-	if (this_cpu_has(X86_FEATURE_CLFLUSH))
-		_precise_loop_asm("clflush (%1)");
+	if (this_cpu_has(X86_FEATURE_CLFLUSH) && has_ibpb())
+		_precise_loop_asm("clflush (%1)", "wrmsr");
+	else if (this_cpu_has(X86_FEATURE_CLFLUSH))
+		_precise_loop_asm("clflush (%1)", "nop");
+	else if (has_ibpb())
+		_precise_loop_asm("nop", "wrmsr");
 	else
-		_precise_loop_asm("nop");
+		_precise_loop_asm("nop", "nop");
 }
 
 static inline void loop(u64 cntrs)
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ