[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250513164523.094d87a1@canb.auug.org.au>
Date: Tue, 13 May 2025 16:45:23 +1000
From: Stephen Rothwell <sfr@...b.auug.org.au>
To: Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>, Peter Zijlstra <peterz@...radead.org>
Cc: "Borislav Petkov (AMD)" <bp@...en8.de>, Dave Hansen
<dave.hansen@...ux.intel.com>, David Kaplan <david.kaplan@....com>, Linux
Kernel Mailing List <linux-kernel@...r.kernel.org>, Linux Next Mailing List
<linux-next@...r.kernel.org>, Pawan Gupta
<pawan.kumar.gupta@...ux.intel.com>
Subject: linux-next: manual merge of the tip tree with Linus' tree
Hi all,
Today's linux-next merge of the tip tree got a conflict in:
arch/x86/kernel/cpu/bugs.c
between commits:
073fdbe02c69 ("x86/bhi: Do not set BHI_DIS_S in 32-bit mode")
f4818881c47f ("x86/its: Enable Indirect Target Selection mitigation")
from Linus' tree and commits:
559c758bc722 ("x86/bugs: Restructure MDS mitigation")
bdd7fce7a816 ("x86/bugs: Restructure TAA mitigation")
4a5a04e61d7f ("x86/bugs: Restructure MMIO mitigation")
203d81f8e167 ("x86/bugs: Restructure RFDS mitigation")
2178ac58e176 ("x86/bugs: Restructure SRBDS mitigation")
9dcad2fb31bd ("x86/bugs: Restructure GDS mitigation")
46d5925b8eb8 ("x86/bugs: Restructure spectre_v1 mitigation")
e3b78a7ad5ea ("x86/bugs: Restructure retbleed mitigation")
ddfca9430a61 ("x86/bugs: Restructure spectre_v2_user mitigation")
efe313827c98 ("x86/bugs: Restructure BHI mitigation")
480e803dacf8 ("x86/bugs: Restructure spectre_v2 mitigation")
5ece59a2fca6 ("x86/bugs: Restructure SSB mitigation")
d43ba2dc8eee ("x86/bugs: Restructure L1TF mitigation")
1f4bb068b498 ("x86/bugs: Restructure SRSO mitigation")
4e2c719782a8 ("x86/cpu: Help users notice when running old Intel microcode")
from the tip tree.
I fixed it up (see below - but probably not completely) and can carry
the fix as necessary. This is now fixed as far as linux-next is
concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging. You may
also want to consider cooperating with the maintainer of the
conflicting tree to minimise any particularly complex conflicts.
--
Cheers,
Stephen Rothwell
diff --cc arch/x86/kernel/cpu/bugs.c
index 8596ce85026c,a938fb4add65..000000000000
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@@ -34,22 -34,63 +34,64 @@@
#include "cpu.h"
+ /*
+ * Speculation Vulnerability Handling
+ *
+ * Each vulnerability is handled with the following functions:
+ * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
+ * take into account all relevant command line
+ * options.
+ * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
+ * selected a mitigation, in case the selection
+ * may want to change based on other choices
+ * made. This function is optional.
+ * <vuln>_apply_mitigation() -- Enable the selected mitigation.
+ *
+ * The compile-time mitigation in all cases should be AUTO. An explicit
+ * command-line option can override AUTO. If no such option is
+ * provided, <vuln>_select_mitigation() will override AUTO to the best
+ * mitigation option.
+ */
+
static void __init spectre_v1_select_mitigation(void);
+ static void __init spectre_v1_apply_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
+ static void __init spectre_v2_update_mitigation(void);
+ static void __init spectre_v2_apply_mitigation(void);
static void __init retbleed_select_mitigation(void);
+ static void __init retbleed_update_mitigation(void);
+ static void __init retbleed_apply_mitigation(void);
static void __init spectre_v2_user_select_mitigation(void);
+ static void __init spectre_v2_user_update_mitigation(void);
+ static void __init spectre_v2_user_apply_mitigation(void);
static void __init ssb_select_mitigation(void);
+ static void __init ssb_apply_mitigation(void);
static void __init l1tf_select_mitigation(void);
+ static void __init l1tf_apply_mitigation(void);
static void __init mds_select_mitigation(void);
- static void __init md_clear_update_mitigation(void);
- static void __init md_clear_select_mitigation(void);
+ static void __init mds_update_mitigation(void);
+ static void __init mds_apply_mitigation(void);
static void __init taa_select_mitigation(void);
+ static void __init taa_update_mitigation(void);
+ static void __init taa_apply_mitigation(void);
static void __init mmio_select_mitigation(void);
+ static void __init mmio_update_mitigation(void);
+ static void __init mmio_apply_mitigation(void);
+ static void __init rfds_select_mitigation(void);
+ static void __init rfds_update_mitigation(void);
+ static void __init rfds_apply_mitigation(void);
static void __init srbds_select_mitigation(void);
+ static void __init srbds_apply_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
+ static void __init srso_update_mitigation(void);
+ static void __init srso_apply_mitigation(void);
static void __init gds_select_mitigation(void);
+ static void __init gds_apply_mitigation(void);
+static void __init its_select_mitigation(void);
+ static void __init bhi_select_mitigation(void);
+ static void __init bhi_update_mitigation(void);
+ static void __init bhi_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@@@ -172,22 -204,56 +213,57 @@@ void __init cpu_select_mitigations(void
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
- md_clear_select_mitigation();
+ mds_select_mitigation();
+ taa_select_mitigation();
+ mmio_select_mitigation();
+ rfds_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
-
- /*
- * srso_select_mitigation() depends and must run after
- * retbleed_select_mitigation().
- */
srso_select_mitigation();
gds_select_mitigation();
+ bhi_select_mitigation();
+ its_select_mitigation();
+
+ /*
+ * After mitigations are selected, some may need to update their
+ * choices.
+ */
+ spectre_v2_update_mitigation();
+ /*
+ * retbleed_update_mitigation() relies on the state set by
+ * spectre_v2_update_mitigation(); specifically it wants to know about
+ * spectre_v2=ibrs.
+ */
+ retbleed_update_mitigation();
+
+ /*
+ * spectre_v2_user_update_mitigation() depends on
+ * retbleed_update_mitigation(), specifically the STIBP
+ * selection is forced for UNRET or IBPB.
+ */
+ spectre_v2_user_update_mitigation();
+ mds_update_mitigation();
+ taa_update_mitigation();
+ mmio_update_mitigation();
+ rfds_update_mitigation();
+ bhi_update_mitigation();
+ /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
+ srso_update_mitigation();
+
+ spectre_v1_apply_mitigation();
+ spectre_v2_apply_mitigation();
+ retbleed_apply_mitigation();
+ spectre_v2_user_apply_mitigation();
+ ssb_apply_mitigation();
+ l1tf_apply_mitigation();
+ mds_apply_mitigation();
+ taa_apply_mitigation();
+ mmio_apply_mitigation();
+ rfds_apply_mitigation();
+ srbds_apply_mitigation();
+ srso_apply_mitigation();
+ gds_apply_mitigation();
+ bhi_apply_mitigation();
}
/*
@@@ -1173,169 -1323,8 +1333,147 @@@ static void __init retbleed_apply_mitig
if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
(retbleed_nosmt || cpu_mitigations_auto_nosmt()))
cpu_smt_disable(false);
-
- /*
- * Let IBRS trump all on Intel without affecting the effects of the
- * retbleed= cmdline option except for call depth based stuffing
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- switch (spectre_v2_enabled) {
- case SPECTRE_V2_IBRS:
- retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
- break;
- case SPECTRE_V2_EIBRS:
- case SPECTRE_V2_EIBRS_RETPOLINE:
- case SPECTRE_V2_EIBRS_LFENCE:
- retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
- break;
- default:
- if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
- pr_err(RETBLEED_INTEL_MSG);
- }
- }
-
- pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
}
+#undef pr_fmt
+#define pr_fmt(fmt) "ITS: " fmt
+
+enum its_mitigation_cmd {
+ ITS_CMD_OFF,
+ ITS_CMD_ON,
+ ITS_CMD_VMEXIT,
+ ITS_CMD_RSB_STUFF,
+};
+
+enum its_mitigation {
+ ITS_MITIGATION_OFF,
+ ITS_MITIGATION_VMEXIT_ONLY,
+ ITS_MITIGATION_ALIGNED_THUNKS,
+ ITS_MITIGATION_RETPOLINE_STUFF,
+};
+
+static const char * const its_strings[] = {
+ [ITS_MITIGATION_OFF] = "Vulnerable",
+ [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
+ [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
+ [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
+};
+
+static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
+
+static enum its_mitigation_cmd its_cmd __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
+
+static int __init its_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
+ pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
+ return 0;
+ }
+
+ if (!strcmp(str, "off")) {
+ its_cmd = ITS_CMD_OFF;
+ } else if (!strcmp(str, "on")) {
+ its_cmd = ITS_CMD_ON;
+ } else if (!strcmp(str, "force")) {
+ its_cmd = ITS_CMD_ON;
+ setup_force_cpu_bug(X86_BUG_ITS);
+ } else if (!strcmp(str, "vmexit")) {
+ its_cmd = ITS_CMD_VMEXIT;
+ } else if (!strcmp(str, "stuff")) {
+ its_cmd = ITS_CMD_RSB_STUFF;
+ } else {
+ pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
+ }
+
+ return 0;
+}
+early_param("indirect_target_selection", its_parse_cmdline);
+
+static void __init its_select_mitigation(void)
+{
+ enum its_mitigation_cmd cmd = its_cmd;
+
+ if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
+ its_mitigation = ITS_MITIGATION_OFF;
+ return;
+ }
+
+ /* Retpoline+CDT mitigates ITS, bail out */
+ if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+ boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
+ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
+ goto out;
+ }
+
+ /* Exit early to avoid irrelevant warnings */
+ if (cmd == ITS_CMD_OFF) {
+ its_mitigation = ITS_MITIGATION_OFF;
+ goto out;
+ }
+ if (spectre_v2_enabled == SPECTRE_V2_NONE) {
+ pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ goto out;
+ }
+ if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
+ !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
+ pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ goto out;
+ }
+ if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
+ pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ goto out;
+ }
+ if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
+ pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
+ its_mitigation = ITS_MITIGATION_OFF;
+ goto out;
+ }
+
+ if (cmd == ITS_CMD_RSB_STUFF &&
+ (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
+ pr_err("RSB stuff mitigation not supported, using default\n");
+ cmd = ITS_CMD_ON;
+ }
+
+ switch (cmd) {
+ case ITS_CMD_OFF:
+ its_mitigation = ITS_MITIGATION_OFF;
+ break;
+ case ITS_CMD_VMEXIT:
+ if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
+ its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
+ goto out;
+ }
+ fallthrough;
+ case ITS_CMD_ON:
+ its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
+ if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
+ setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ set_return_thunk(its_return_thunk);
+ break;
+ case ITS_CMD_RSB_STUFF:
+ its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
+ set_return_thunk(call_depth_return_thunk);
+ if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
+ retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
+ pr_info("Retbleed mitigation updated to stuffing\n");
+ }
+ break;
+ }
+out:
+ pr_info("%s\n", its_strings[its_mitigation]);
+}
+
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
@@@ -2833,8 -2806,52 +2955,52 @@@ static void __init srso_apply_mitigatio
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
- if (srso_mitigation != SRSO_MITIGATION_NONE)
- pr_info("%s\n", srso_strings[srso_mitigation]);
+ if (srso_mitigation == SRSO_MITIGATION_NONE) {
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+ return;
+ }
+
+ switch (srso_mitigation) {
+ case SRSO_MITIGATION_SAFE_RET:
+ case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+ /*
+ * Enable the return thunk for generated code
+ * like ftrace, static_call, etc.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
- x86_return_thunk = srso_alias_return_thunk;
++ set_return_thunk(srso_alias_return_thunk);
+ } else {
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
- x86_return_thunk = srso_return_thunk;
++ set_return_thunk(srso_return_thunk);
+ }
+ break;
+ case SRSO_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+ /*
+ * IBPB on entry already obviates the need for
+ * software-based untraining so clear those in case some
+ * other mitigation like Retbleed has selected them.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+ fallthrough;
+ case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ /*
+ * There is no need for RSB filling: entry_ibpb() ensures
+ * all predictions, including the RSB, are invalidated,
+ * regardless of IBPB implementation.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ break;
+ default:
+ break;
+ }
}
#undef pr_fmt
@@@ -2949,11 -2963,14 +3112,19 @@@ static ssize_t rfds_show_state(char *bu
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
}
+static ssize_t its_show_state(char *buf)
+{
+ return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
+}
+
+ static ssize_t old_microcode_show_state(char *buf)
+ {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return sysfs_emit(buf, "Unknown: running under hypervisor");
+
+ return sysfs_emit(buf, "Vulnerable\n");
+ }
+
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@@ -3136,9 -3152,9 +3306,12 @@@ static ssize_t cpu_show_common(struct d
case X86_BUG_RFDS:
return rfds_show_state(buf);
+ case X86_BUG_ITS:
+ return its_show_state(buf);
+
+ case X86_BUG_OLD_MICROCODE:
+ return old_microcode_show_state(buf);
+
default:
break;
}
@@@ -3219,10 -3232,10 +3389,15 @@@ ssize_t cpu_show_reg_file_data_sampling
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
}
+ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+}
++
+ ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
+ }
#endif
void __warn_thunk(void)
Content of type "application/pgp-signature" skipped
Powered by blists - more mailing lists