lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu,  2 Jul 2020 18:18:25 +0300
From:   Mihai Carabas <mihai.carabas@...cle.com>
To:     linux-kernel@...r.kernel.org
Cc:     tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, x86@...nel.org,
        boris.ostrovsky@...cle.com, konrad.wilk@...cle.com,
        Mihai Carabas <mihai.carabas@...cle.com>
Subject: [PATCH RFC 5/7] x86: microcode: late loading feature and bug evaluation

While doing microcode late loading, need to probe again all the
CPU features after the microcode has been loaded. Before probing
the CPU features and bug, need to clear the current bug bits. The
new function, cpu_clear_bug_bits, will clear all the bug bits and
then set them.

The logic is as follows:

- for boot cpu call cpu_clear_bug_bits, get_cpu_cap and then
cpu_set_bug_bits

- meanwhile all the other cores are waiting because they need
information from boot cpu about the forced caps

- in the last step every cpu is calling cpu_clear_bug_bits and
the bug bits will be set by get_cpu_cap through the apply_forced_caps

- also when the microcode feature for disabling TSX is not available
at boot time, taa_select_mitigation will not disable TSX to ensure
proper mitigation for TAA. Call tsx_init on each CPU after the new
microcode has been loaded

Signed-off-by: Mihai Carabas <mihai.carabas@...cle.com>
---
 arch/x86/include/asm/microcode.h     |  3 +++
 arch/x86/kernel/cpu/common.c         | 28 +++++++++++++++++++++++++++-
 arch/x86/kernel/cpu/microcode/core.c | 26 ++++++++++++++++++++++++++
 3 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 2b7cc53..7a6a5aa 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -142,4 +142,7 @@ static inline void reload_early_microcode(void)			{ }
 get_builtin_firmware(struct cpio_data *cd, const char *name)	{ return false; }
 #endif
 
+void cpu_set_bug_bits(struct cpuinfo_x86 *c);
+void cpu_clear_bug_bits(struct cpuinfo_x86 *c);
+
 #endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c11daa6..f722c1e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1101,6 +1101,32 @@ static bool cpu_matches(const struct x86_cpu_id *table, unsigned long which)
 	return m && !!(m->driver_data & which);
 }
 
+void cpu_clear_bug_bits(struct cpuinfo_x86 *c)
+{
+	int i;
+	unsigned int bugs[] = {
+		X86_BUG_SPECTRE_V1,
+		X86_BUG_SPECTRE_V2,
+		X86_BUG_SPEC_STORE_BYPASS,
+		X86_FEATURE_IBRS_ENHANCED,
+		X86_BUG_MDS,
+		X86_BUG_MSBDS_ONLY,
+		X86_BUG_SWAPGS,
+		X86_BUG_TAA,
+		X86_BUG_SRBDS,
+		X86_BUG_CPU_MELTDOWN,
+		X86_BUG_L1TF
+	};
+
+	for (i = 0; i < ARRAY_SIZE(bugs); i++)
+		clear_cpu_cap(c, bugs[i]);
+
+	if (c->cpu_index == boot_cpu_data.cpu_index) {
+		for (i = 0; i < ARRAY_SIZE(bugs); i++)
+			setup_clear_cpu_cap(bugs[i]);
+	}
+}
+
 u64 x86_read_arch_cap_msr(void)
 {
 	u64 ia32_cap = 0;
@@ -1111,7 +1137,7 @@ u64 x86_read_arch_cap_msr(void)
 	return ia32_cap;
 }
 
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+void cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
 	u64 ia32_cap = x86_read_arch_cap_msr();
 
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index baec68b..2cd983a 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -40,6 +40,8 @@
 #include <asm/cmdline.h>
 #include <asm/setup.h>
 
+#include "../cpu.h"
+
 #define DRIVER_VERSION	"2.2"
 
 static struct microcode_ops	*microcode_ops;
@@ -542,6 +544,20 @@ static int __wait_for_cpus(atomic_t *t, long long timeout)
 	return 0;
 }
 
+static void update_cpu_caps(struct cpuinfo_x86 *c)
+{
+	cpu_clear_bug_bits(c);
+
+	/*
+	 * If we are at late loading, we need to re-initialize tsx because
+	 * MSR_IA32_TSX_CTRL might be available as result of the microcode
+	 * update.
+	 */
+	tsx_init();
+
+	get_cpu_cap(c);
+}
+
 /*
  * Returns:
  * < 0 - on error
@@ -550,6 +566,7 @@ static int __wait_for_cpus(atomic_t *t, long long timeout)
 static int __reload_late(void *info)
 {
 	int cpu = smp_processor_id();
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	enum ucode_state err;
 	int ret = 0;
 
@@ -579,6 +596,12 @@ static int __reload_late(void *info)
 		ret = -1;
 	}
 
+	if (ret == 0 && c->cpu_index == boot_cpu_data.cpu_index) {
+		update_cpu_caps(c);
+		memcpy(&boot_cpu_data, c, sizeof(boot_cpu_data));
+		cpu_set_bug_bits(c);
+	}
+
 wait_for_siblings:
 	if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
 		panic("Timeout during microcode update!\n");
@@ -592,6 +615,9 @@ static int __reload_late(void *info)
 	if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
 		apply_microcode_local(&err);
 
+	if (ret == 0 && c->cpu_index != boot_cpu_data.cpu_index)
+		update_cpu_caps(c);
+
 	return ret;
 }
 
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ