lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1593703107-8852-4-git-send-email-mihai.carabas@oracle.com>
Date:   Thu,  2 Jul 2020 18:18:23 +0300
From:   Mihai Carabas <mihai.carabas@...cle.com>
To:     linux-kernel@...r.kernel.org
Cc:     tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, x86@...nel.org,
        boris.ostrovsky@...cle.com, konrad.wilk@...cle.com,
        Mihai Carabas <mihai.carabas@...cle.com>
Subject: [PATCH RFC 3/7] x86: kernel: cpu: bugs.c: modify static_cpu_has to boot_cpu_has

The usage of static_cpu_has in bugs.c file is counter-productive
since the code is executed once but there is extra effort to patch
it and keep alternatives in a special section --- so there is both
space and time cost.

Quote from _static_cpu_has definition:
/*
 * Static testing of CPU features. Used the same as boot_cpu_has(). It
 * statically patches the target code for additional performance. Use
 * static_cpu_has() only in fast paths, where every cycle counts. Which
 * means that the boot_cpu_has() variant is already fast enough for the
 * majority of cases and you should stick to using it as it is generally
 * only two instructions: a RIP-relative MOV and a TEST.
 */

There are two other places where static_cpu_has is used and might be
considered critical paths: __speculation_ctrl_update() and vmx_l1d_flush().

Given these facts, changing static_cpu_has to boot_cpu_has is done in
order to bypass alternative instructions which cannot be updated at runtime
for now.

Signed-off-by: Mihai Carabas <mihai.carabas@...cle.com>
---
 arch/x86/kernel/cpu/bugs.c | 18 +++++++++---------
 arch/x86/kernel/process.c  |  8 ++++----
 arch/x86/kvm/vmx/vmx.c     |  2 +-
 3 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 1760598..21b9df3 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -168,8 +168,8 @@ void __ref check_bugs(void)
 		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 
 		/* SSBD controlled in MSR_SPEC_CTRL */
-		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
-		    static_cpu_has(X86_FEATURE_AMD_SSBD))
+		if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+		    boot_cpu_has(X86_FEATURE_AMD_SSBD))
 			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
 		/* Conditional STIBP enabled? */
@@ -186,8 +186,8 @@ void __ref check_bugs(void)
 	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
 	 */
-	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
-	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+	if (!boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD))
 		return;
 
 	/*
@@ -195,7 +195,7 @@ void __ref check_bugs(void)
 	 * virtual MSR value. If its not permanently enabled, evaluate
 	 * current's TIF_SSBD thread flag.
 	 */
-	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+	if (boot_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
 		hostval = SPEC_CTRL_SSBD;
 	else
 		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
@@ -1164,8 +1164,8 @@ static enum ssb_mitigation __ssb_select_mitigation(void)
 	 * bit in the mask to allow guests to use the mitigation even in the
 	 * case where the host does not enable it.
 	 */
-	if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
-	    static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+	if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+	    boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
 		x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
 	}
 
@@ -1181,8 +1181,8 @@ static enum ssb_mitigation __ssb_select_mitigation(void)
 		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
 		 * use a completely different MSR and bit dependent on family.
 		 */
-		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
-		    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+		if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+		    !boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
 			x86_amd_ssb_disable();
 		} else {
 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index f362ce0..6362e0c 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -546,14 +546,14 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
 	lockdep_assert_irqs_disabled();
 
 	/* Handle change of TIF_SSBD depending on the mitigation method. */
-	if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
 		if (tif_diff & _TIF_SSBD)
 			amd_set_ssb_virt_state(tifn);
-	} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+	} else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
 		if (tif_diff & _TIF_SSBD)
 			amd_set_core_ssb_state(tifn);
-	} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
-		   static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+	} else if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+		   boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
 		updmsr |= !!(tif_diff & _TIF_SSBD);
 		msr |= ssbd_tif_to_spec_ctrl(tifn);
 	}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index cb22f33..f08ef38 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6145,7 +6145,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
 
 	vcpu->stat.l1d_flush++;
 
-	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+	if (boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
 		wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
 		return;
 	}
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ