[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200610110037.11853-4-bp@alien8.de>
Date: Wed, 10 Jun 2020 13:00:37 +0200
From: Borislav Petkov <bp@...en8.de>
To: X86 ML <x86@...nel.org>
Cc: Tom Lendacky <thomas.lendacky@....com>,
LKML <linux-kernel@...r.kernel.org>,
Eric Morton <Eric.Morton@....com>
Subject: [PATCH 3/3] x86/msr: Add an MSR write callback
From: Borislav Petkov <bp@...e.de>
Add a callback which gets executed after a MSR is written from
userspace. This is needed because in the case of AMD's MSR_AMD64_LS_CFG
MSR which gets cached in the kernel, the cached value needs to be
updated after the write, otherwise latter doesn't stick.
Reported-by: Eric Morton <Eric.Morton@....com>
Signed-off-by: Borislav Petkov <bp@...e.de>
---
arch/x86/include/asm/msr.h | 2 ++
arch/x86/kernel/cpu/bugs.c | 2 +-
arch/x86/kernel/cpu/common.c | 31 +++++++++++++++++++++++++++++++
arch/x86/kernel/msr.c | 3 +++
4 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 71393a4c2104..d151eb8cbd51 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -335,6 +335,8 @@ void msrs_free(struct msr *msrs);
int msr_set_bit(u32 msr, u8 bit);
int msr_clear_bit(u32 msr, u8 bit);
+void msr_write_callback(int cpu, u32 reg, u32 low, u32 hi);
+
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ed54b3b21c39..bfaf303d6965 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -57,7 +57,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
* AMD specific MSR info for Speculative Store Bypass control.
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
*/
-u64 __ro_after_init x86_amd_ls_cfg_base;
+u64 x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
/* Control conditional STIBP in switch_to() */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index bed0cb83fe24..8d6fa73940a3 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -55,6 +55,7 @@
#include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
+#include <asm/spec-ctrl.h>
#include <asm/uv/uv.h>
#include "cpu.h"
@@ -1960,3 +1961,33 @@ void arch_smt_update(void)
/* Check whether IPI broadcasting can be enabled */
apic_smt_update();
}
+
+void msr_write_callback(int cpu, u32 reg, u32 lo, u32 hi)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return;
+
+ if (reg == MSR_AMD64_LS_CFG) {
+ u64 new = ((u64)hi << 32) | lo;
+ cpumask_var_t tmp_mask;
+
+ if (new == x86_amd_ls_cfg_base)
+ return;
+
+ if (WARN_ON_ONCE(!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)))
+ return;
+
+ /*
+ * Remove the @cpu it was just written to from the mask
+ * so that it doesn't get written to again pointlessly.
+ */
+ cpumask_xor(tmp_mask, cpu_online_mask, cpumask_of(cpu));
+
+ x86_amd_ls_cfg_base = new;
+
+ wrmsrl_val_on_cpus(tmp_mask, MSR_AMD64_LS_CFG, new);
+
+ free_cpumask_var(tmp_mask);
+ }
+}
+EXPORT_SYMBOL_GPL(msr_write_callback);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 1547be359d7f..167125088eda 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -95,6 +95,9 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
if (err)
break;
+
+ msr_write_callback(cpu, reg, data[0], data[1]);
+
tmp += 2;
bytes += 8;
}
--
2.21.0
Powered by blists - more mailing lists