[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241105215455.359471-7-david.kaplan@amd.com>
Date: Tue, 5 Nov 2024 15:54:26 -0600
From: David Kaplan <david.kaplan@....com>
To: Thomas Gleixner <tglx@...utronix.de>, Borislav Petkov <bp@...en8.de>,
Peter Zijlstra <peterz@...radead.org>, Josh Poimboeuf <jpoimboe@...nel.org>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>, Ingo Molnar
<mingo@...hat.com>, Dave Hansen <dave.hansen@...ux.intel.com>,
<x86@...nel.org>, "H . Peter Anvin" <hpa@...or.com>
CC: <linux-kernel@...r.kernel.org>
Subject: [PATCH v2 06/35] x86/bugs: Restructure mmio mitigation
Restructure mmio mitigation to use select/update/apply functions to
create consistent vulnerability handling.
Signed-off-by: David Kaplan <david.kaplan@....com>
---
arch/x86/kernel/cpu/bugs.c | 55 +++++++++++++++++++++++++++-----------
1 file changed, 39 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c676804dfd84..1332b70e48f8 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -68,6 +68,8 @@ static void __init taa_select_mitigation(void);
static void __init taa_update_mitigation(void);
static void __init taa_apply_mitigation(void);
static void __init mmio_select_mitigation(void);
+static void __init mmio_update_mitigation(void);
+static void __init mmio_apply_mitigation(void);
static void __init srbds_select_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
@@ -190,6 +192,7 @@ void __init cpu_select_mitigations(void)
l1tf_select_mitigation();
mds_select_mitigation();
taa_select_mitigation();
+ mmio_select_mitigation();
md_clear_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
@@ -207,9 +210,11 @@ void __init cpu_select_mitigations(void)
*/
mds_update_mitigation();
taa_update_mitigation();
+ mmio_update_mitigation();
mds_apply_mitigation();
taa_apply_mitigation();
+ mmio_apply_mitigation();
}
/*
@@ -505,6 +510,40 @@ static void __init mmio_select_mitigation(void)
return;
}
+ if (mmio_mitigation == MMIO_MITIGATION_OFF)
+ return;
+
+ /*
+ * Check if the system has the right microcode.
+ *
+ * CPU Fill buffer clear mitigation is enumerated by either an explicit
+ * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
+ * affected systems.
+ */
+ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
+ (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
+ boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+ else
+ mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
+}
+
+static void __init mmio_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ return;
+
+ if (mitigate_any_verw())
+ mmio_mitigation = MMIO_MITIGATION_VERW;
+
+ pr_info("%s\n", mmio_strings[mmio_mitigation]);
+ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ pr_info("Unknown: No mitigations\n");
+}
+
+static void __init mmio_apply_mitigation(void)
+{
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
@@ -533,21 +572,6 @@ static void __init mmio_select_mitigation(void)
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
- /*
- * Check if the system has the right microcode.
- *
- * CPU Fill buffer clear mitigation is enumerated by either an explicit
- * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
- * affected systems.
- */
- if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
- (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
- boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
- !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
- mmio_mitigation = MMIO_MITIGATION_VERW;
- else
- mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
-
if (mmio_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
}
@@ -670,7 +694,6 @@ static void __init md_clear_update_mitigation(void)
static void __init md_clear_select_mitigation(void)
{
- mmio_select_mitigation();
rfds_select_mitigation();
/*
--
2.34.1
Powered by blists - more mailing lists