[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200206070412.17400-3-xiaoyao.li@intel.com>
Date: Thu, 6 Feb 2020 15:04:06 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
hpa@...or.com, Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Andy Lutomirski <luto@...nel.org>, tony.luck@...el.com
Cc: peterz@...radead.org, fenghua.yu@...el.com, x86@...nel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Xiaoyao Li <xiaoyao.li@...el.com>
Subject: [PATCH v3 2/8] x86/split_lock: Ensure X86_FEATURE_SPLIT_LOCK_DETECT means the existence of feature
When flag X86_FEATURE_SPLIT_LOCK_DETECT is set, it should ensure the
existence of MSR_TEST_CTRL and MSR_TEST_CTRL.SPLIT_LOCK_DETECT bit.
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
---
arch/x86/kernel/cpu/intel.c | 41 +++++++++++++++++++++----------------
1 file changed, 23 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 2b3874a96bd4..49535ed81c22 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -702,7 +702,8 @@ static void init_intel(struct cpuinfo_x86 *c)
if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
- split_lock_init();
+ if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+ split_lock_init();
}
#ifdef CONFIG_X86_32
@@ -986,9 +987,26 @@ static inline bool match_option(const char *arg, int arglen, const char *opt)
static void __init split_lock_setup(void)
{
+ u64 test_ctrl_val;
char arg[20];
int i, ret;
+ /*
+ * Use the "safe" versions of rdmsr/wrmsr here to ensure MSR_TEST_CTRL
+ * and MSR_TEST_CTRL.SPLIT_LOCK_DETECT bit do exist. Because there may
+ * be glitches in virtualization that leave a guest with an incorrect
+ * view of real h/w capabilities.
+ */
+ if (rdmsrl_safe(MSR_TEST_CTRL, &test_ctrl_val))
+ return;
+
+ if (wrmsrl_safe(MSR_TEST_CTRL,
+ test_ctrl_val | MSR_TEST_CTRL_SPLIT_LOCK_DETECT))
+ return;
+
+ if (wrmsrl_safe(MSR_TEST_CTRL, test_ctrl_val))
+ return;
+
setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
sld_state = sld_warn;
@@ -1022,24 +1040,19 @@ static void __init split_lock_setup(void)
* Locking is not required at the moment because only bit 29 of this
* MSR is implemented and locking would not prevent that the operation
* of one thread is immediately undone by the sibling thread.
- * Use the "safe" versions of rdmsr/wrmsr here because although code
- * checks CPUID and MSR bits to make sure the TEST_CTRL MSR should
- * exist, there may be glitches in virtualization that leave a guest
- * with an incorrect view of real h/w capabilities.
*/
-static bool __sld_msr_set(bool on)
+static void __sld_msr_set(bool on)
{
u64 test_ctrl_val;
- if (rdmsrl_safe(MSR_TEST_CTRL, &test_ctrl_val))
- return false;
+ rdmsrl(MSR_TEST_CTRL, test_ctrl_val);
if (on)
test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
else
test_ctrl_val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
- return !wrmsrl_safe(MSR_TEST_CTRL, test_ctrl_val);
+ wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
}
static void split_lock_init(void)
@@ -1047,15 +1060,7 @@ static void split_lock_init(void)
if (sld_state == sld_off)
return;
- if (__sld_msr_set(true))
- return;
-
- /*
- * If this is anything other than the boot-cpu, you've done
- * funny things and you get to keep whatever pieces.
- */
- pr_warn("MSR fail -- disabled\n");
- sld_state = sld_off;
+ __sld_msr_set(true);
}
bool handle_user_split_lock(unsigned long ip)
--
2.23.0
Powered by blists - more mailing lists