[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260120130735.GCaW9-F_Kj5-fgQRRs@fat_crate.local>
Date: Tue, 20 Jan 2026 14:07:35 +0100
From: Borislav Petkov <bp@...en8.de>
To: David Kaplan <david.kaplan@....com>
Cc: Thomas Gleixner <tglx@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Ingo Molnar <mingo@...hat.com>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H . Peter Anvin" <hpa@...or.com>, Alexander Graf <graf@...zon.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH 08/56] x86/bugs: Reset SSB mitigations
On Mon, Oct 13, 2025 at 09:33:56AM -0500, David Kaplan wrote:
> @@ -2916,6 +2937,8 @@ void x86_spec_ctrl_setup_ap(void)
>
> if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
> x86_amd_ssb_disable();
> + else
> + x86_amd_ssb_enable();
I'm assuming we need this for the case when we do alternatives-patch and then
some CPUs are coming online later so they have to get SSBD properly set
there...
In any case, lemme suggest a simplification (I hope I've gotten the booleans
right):
---
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6b25192560f0..e78e010b4752 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -221,24 +221,20 @@ x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
}
EXPORT_SYMBOL_FOR_KVM(x86_virt_spec_ctrl);
-static void x86_amd_ssb_disable(void)
+static void x86_amd_ssb_toggle(bool disable)
{
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+ u64 msrval = x86_amd_ls_cfg_base;
+ u64 msrvirt = 0;
- if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
- wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
- else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- wrmsrq(MSR_AMD64_LS_CFG, msrval);
-}
-
-static void x86_amd_ssb_enable(void)
-{
- u64 msrval = x86_amd_ls_cfg_base;
+ if (disable) {
+ msrval |= x86_amd_ls_cfg_ssbd_mask;
+ msrvirt = SPEC_CTRL_SSBD;
+ }
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
- wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, 0);
+ wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, msrvirt);
else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ wrmsrq(MSR_AMD64_LS_CFG, msrval);
}
#undef pr_fmt
@@ -2524,7 +2520,7 @@ static void __init ssb_apply_mitigation(void)
*/
if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
!static_cpu_has(X86_FEATURE_AMD_SSBD)) {
- x86_amd_ssb_disable();
+ x86_amd_ssb_toggle(true);
} else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
update_spec_ctrl(x86_spec_ctrl_base);
@@ -2785,10 +2781,7 @@ void x86_spec_ctrl_setup_ap(void)
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
update_spec_ctrl(x86_spec_ctrl_base);
- if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
- x86_amd_ssb_disable();
- else
- x86_amd_ssb_enable();
+ x86_amd_ssb_toggle(ssb_mode == SPEC_STORE_BYPASS_DISABLE);
}
bool itlb_multihit_kvm_mitigation;
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
Powered by blists - more mailing lists