lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <nycvar.YFH.7.76.1809041624420.15880@cbobk.fhfr.pm>
Date:   Tue, 4 Sep 2018 16:25:20 +0200 (CEST)
From:   Jiri Kosina <jikos@...nel.org>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Andrea Arcangeli <aarcange@...hat.com>,
        "Woodhouse, David" <dwmw@...zon.co.uk>,
        Oleg Nesterov <oleg@...hat.com>,
        Tim Chen <tim.c.chen@...ux.intel.com>
cc:     linux-kernel@...r.kernel.org, x86@...nel.org
Subject: [PATCH v3 3/3] x86/speculation: Enable cross-hyperthread spectre v2
 STIBP mitigation

From: Jiri Kosina <jkosina@...e.cz>

STIBP is a feature provided by certain Intel ucodes / CPUs. This feature 
(once enabled) prevents cross-hyperthread control of decisions made by 
indirect branch predictors.

Enable this feature if

- the CPU is vulnerable to spectre v2
- the CPU supports SMT and has SMT siblings online
- spectre_v2 mitigation autoselection is enabled (default)

After some previous discussion, this patch leaves STIBP on all the time, 
as wrmsr on crossing kernel boundary is a no-no. This could perhaps later 
be a bit more optimized (like disabling it in NOHZ, experiment with 
disabling it in idle, etc) if needed.

Note: the code could be made less awkward if it'd be guaranteed that STIBP 
could be kept on on a primary thread with SMT sibling being offline, 
without potentially imposing performance penalty. This doesn't seem to be 
defined anywhere though, so let's better be safe then sorry and always 
flip STIBP both on primary and sibling threads on hotplug transitions.

Cc: stable@...r.kernel.org
Signed-off-by: Jiri Kosina <jkosina@...e.cz>
---
 arch/x86/kernel/cpu/bugs.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++
 kernel/cpu.c               | 13 +++++++++-
 2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 40bdaea97fe7..ba3df0a49a2e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -325,6 +325,56 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
 	return cmd;
 }
 
+static bool stibp_needed(void)
+{
+	if (spectre_v2_enabled == SPECTRE_V2_NONE)
+		return false;
+
+	if (cpu_smt_control != CPU_SMT_ENABLED)
+		return false;
+
+	if (!boot_cpu_has(X86_FEATURE_STIBP))
+		return false;
+
+	return true;
+}
+
+/*
+ * The read-modify-write of the MSR doesn't need any race protection here,
+ * as we're running in atomic context.
+ */
+static void enable_stibp(void *info)
+{
+	u64 mask;
+	rdmsrl(MSR_IA32_SPEC_CTRL, mask);
+	mask |= SPEC_CTRL_STIBP;
+	wrmsrl(MSR_IA32_SPEC_CTRL, mask);
+}
+
+static void disable_stibp(void *info)
+{
+	u64 mask;
+	rdmsrl(MSR_IA32_SPEC_CTRL, mask);
+	mask &= ~SPEC_CTRL_STIBP;
+	wrmsrl(MSR_IA32_SPEC_CTRL, mask);
+}
+
+void arch_smt_enable_errata(void)
+{
+	if (stibp_needed()) {
+		pr_info("Spectre v2 cross-process SMT mitigation: Enabling STIBP\n");
+		on_each_cpu(enable_stibp, NULL, 1);
+	}
+}
+
+void arch_smt_disable_errata(void)
+{
+	if (stibp_needed()) {
+		pr_info("Spectre v2 cross-process SMT mitigation: Disabling STIBP\n");
+		on_each_cpu(disable_stibp, NULL, 1);
+	}
+}
+
 static void __init spectre_v2_select_mitigation(void)
 {
 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -424,6 +474,9 @@ static void __init spectre_v2_select_mitigation(void)
 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
 		pr_info("Enabling Restricted Speculation for firmware calls\n");
 	}
+
+	/* Enable STIBP on BP if needed */
+	arch_smt_enable_errata();
 }
 
 #undef pr_fmt
@@ -655,6 +708,16 @@ void x86_spec_ctrl_setup_ap(void)
 
 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
 		x86_amd_ssb_disable();
+
+	/*
+	 * If we are here during system bootup, enable STIBP.
+	 *
+	 * If we are here because of SMT hotplug, STIBP will be enabled by the
+	 * SMT control code (enabling here would not be sufficient, as it
+	 * needs to happen on primary threads as well).
+	 */
+	if (stibp_needed() && system_state < SYSTEM_RUNNING)
+		enable_stibp(NULL);
 }
 
 #undef pr_fmt
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aa7fe85ad62e..d3613d546829 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2025,17 +2025,27 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
 	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
 }
 
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override these.
+ */
+void __weak arch_smt_enable_errata(void) { };
+void __weak arch_smt_disable_errata(void) { };
+
 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
 {
 	int cpu, ret = 0;
 
 	cpu_maps_update_begin();
+	arch_smt_disable_errata();
 	for_each_online_cpu(cpu) {
 		if (topology_is_primary_thread(cpu))
 			continue;
 		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
-		if (ret)
+		if (ret) {
+			arch_smt_enable_errata();
 			break;
+		}
 		/*
 		 * As this needs to hold the cpu maps lock it's impossible
 		 * to call device_offline() because that ends up calling
@@ -2073,6 +2083,7 @@ static int cpuhp_smt_enable(void)
 		/* See comment in cpuhp_smt_disable() */
 		cpuhp_online_cpu_device(cpu);
 	}
+	arch_smt_enable_errata();
 	cpu_maps_update_done();
 	return ret;
 }

-- 
Jiri Kosina
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ