[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230130213955.6046-8-ashok.raj@intel.com>
Date: Mon, 30 Jan 2023 13:39:53 -0800
From: Ashok Raj <ashok.raj@...el.com>
To: Borislav Petkov <bp@...en8.de>,
Thomas Gleixner <tglx@...utronix.de>
Cc: Ashok Raj <ashok.raj@...el.com>, Tony Luck <tony.luck@...el.com>,
LKML <linux-kernel@...r.kernel.org>, x86 <x86@...nel.org>,
Ingo Molnar <mingo@...nel.org>,
Dave Hansen <dave.hansen@...el.com>,
Alison Schofield <alison.schofield@...el.com>,
Reinette Chatre <reinette.chatre@...el.com>,
Tom Lendacky <thomas.lendacky@....com>,
Stefan Talpalaru <stefantalpalaru@...oo.com>,
David Woodhouse <dwmw2@...radead.org>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Jonathan Corbet <corbet@....net>,
"Rafael J . Wysocki" <rafael@...nel.org>,
Peter Zilstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>,
Andrew Cooper <Andrew.Cooper3@...rix.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Martin Pohlack <mpohlack@...zon.de>
Subject: [Patch v3 Part2 7/9] x86/microcode: Add a generic mechanism to declare support for minrev
Intel microcode adds some meta-data to report a minimum required revision
before this new microcode can be safely late loaded. There are no generic
mechanism to declare support for all vendors.
Add generic support to microcode core to declare such support, this allows
late-loading to be permitted in those architectures that report support
for safe late loading.
Late loading has added support for
- New images declaring a required minimum base version before a late-load
is performed.
Tainting only happens on architectures that don't support minimum required
version reporting.
Add a new variable in microcode_ops to allow an architecture to declare
support for safe microcode late loading.
Also make CONFIG_MICROCODE_LOADING by default, now that kernel enforces the
"minrev" requirement strictly.
Signed-off-by: Ashok Raj <ashok.raj@...el.com>
Reviewed-by: Tony Luck <tony.luck@...el.com>
Cc: LKML <linux-kernel@...r.kernel.org>
Cc: x86 <x86@...nel.org>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Tony Luck <tony.luck@...el.com>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: Alison Schofield <alison.schofield@...el.com>
Cc: Reinette Chatre <reinette.chatre@...el.com>
Cc: Thomas Gleixner (Intel) <tglx@...utronix.de>
Cc: Tom Lendacky <thomas.lendacky@....com>
Cc: Stefan Talpalaru <stefantalpalaru@...oo.com>
Cc: David Woodhouse <dwmw2@...radead.org>
Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
Cc: Jonathan Corbet <corbet@....net>
Cc: Rafael J. Wysocki <rafael@...nel.org>
Cc: Peter Zilstra (Intel) <peterz@...radead.org>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Andrew Cooper <Andrew.Cooper3@...rix.com>
Cc: Boris Ostrovsky <boris.ostrovsky@...cle.com>
Cc: Martin Pohlack <mpohlack@...zon.de>
---
arch/x86/include/asm/microcode.h | 2 ++
arch/x86/kernel/cpu/microcode/core.c | 26 +++++++++++++++++++++-----
arch/x86/kernel/cpu/microcode/intel.c | 1 +
arch/x86/Kconfig | 7 ++++---
4 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index d5a58bde091c..3d48143e84a9 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -33,6 +33,8 @@ enum ucode_state {
};
struct microcode_ops {
+ bool safe_late_load;
+
enum ucode_state (*request_microcode_fw) (int cpu, struct device *);
void (*microcode_fini_cpu) (int cpu);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index bff566c05f46..be5d70396b79 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -470,6 +470,7 @@ static ssize_t reload_store(struct device *dev,
{
enum ucode_state tmp_ret = UCODE_OK;
int bsp = boot_cpu_data.cpu_index;
+ bool safe_late_load = false;
unsigned long val;
int load_ret = -1;
ssize_t ret;
@@ -484,12 +485,25 @@ static ssize_t reload_store(struct device *dev,
if (ret)
goto put;
- pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
- pr_err("You should switch to early loading, if possible.\n");
+ safe_late_load = microcode_ops->safe_late_load;
+
+ /*
+ * If safe loading indication isn't present, bail out.
+ */
+ if (!safe_late_load) {
+ pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
+ pr_err("You should switch to early loading, if possible.\n");
+ ret = -EINVAL;
+ goto put;
+ }
tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev);
if (tmp_ret != UCODE_NEW) {
- ret = size;
+ /*
+ * If loading fails for some other reason,
+ * inform user appropriately
+ */
+ ret = (tmp_ret == UCODE_ERROR) ? -EINVAL : size;
goto put;
}
@@ -505,8 +519,10 @@ static ssize_t reload_store(struct device *dev,
*/
if (load_ret == 0) {
ret = size;
- add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
- pr_warn("Microcode late loading tainted the kernel\n");
+ if (!safe_late_load) {
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ pr_warn("Microcode late loading tainted the kernel\n");
+ }
}
return ret;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 4b3df85f2ca6..98c92b9affa2 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -814,6 +814,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
}
static struct microcode_ops microcode_intel_ops = {
+ .safe_late_load = true,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode_intel,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3604074a878b..ddc4130e6f8c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1352,15 +1352,16 @@ config MICROCODE_AMD
processors will be enabled.
config MICROCODE_LATE_LOADING
- bool "Late microcode loading (DANGEROUS)"
- default n
+ bool "Late microcode loading"
+ default y
depends on MICROCODE
help
Loading microcode late, when the system is up and executing instructions
is a tricky business and should be avoided if possible. Just the sequence
of synchronizing all cores and SMT threads is one fragile dance which does
not guarantee that cores might not softlock after the loading. Therefore,
- use this at your own risk. Late loading taints the kernel too.
+ use this at your own risk. Late loading taints the kernel, if it
+ doesn't support a minimum required base version before an update.
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
--
2.37.2
Powered by blists - more mailing lists