[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200314073414.184213-6-xiaoyao.li@intel.com>
Date: Sat, 14 Mar 2020 15:34:09 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
hpa@...or.com, Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Andy Lutomirski <luto@...nel.org>, tony.luck@...el.com
Cc: peterz@...radead.org, fenghua.yu@...el.com,
Arvind Sankar <nivedita@...m.mit.edu>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Jim Mattson <jmattson@...gle.com>, x86@...nel.org,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Xiaoyao Li <xiaoyao.li@...el.com>
Subject: [PATCH v4 05/10] x86/split_lock: Add and export several functions for KVM
KVM will use split_lock_detect_disabled() and split_lock_detect_on()
in vmx_vcpu_run() to check whether split lock detect can be exposed to
guest and whether host has turned it on. Make them static inline to
avoid the extra CALL+RET in that path.
sld_msr_set() will be used when switching from/to guest.
Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
---
arch/x86/include/asm/cpu.h | 23 +++++++++++++++++++++++
arch/x86/kernel/cpu/intel.c | 17 ++++++++---------
2 files changed, 31 insertions(+), 9 deletions(-)
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index ff567afa6ee1..2e17315b1fed 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -41,15 +41,38 @@ unsigned int x86_family(unsigned int sig);
unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL
+enum split_lock_detect_state {
+ sld_not_exist = 0,
+ sld_disable,
+ sld_kvm_only,
+ sld_warn,
+ sld_fatal,
+};
+extern enum split_lock_detect_state sld_state;
+
+static inline bool split_lock_detect_on(void)
+{
+ return (sld_state == sld_warn) || (sld_state == sld_fatal);
+}
+
+static inline bool split_lock_detect_disabled(void)
+{
+ return sld_state == sld_disable;
+}
+
extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(unsigned long ip);
+extern void sld_msr_set(bool on);
#else
+static inline bool split_lock_detect_on(void) { return false; }
+static inline bool split_lock_detect_disabled(void) { return true; }
static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
static inline bool handle_user_split_lock(unsigned long ip)
{
return false;
}
+static inline void sld_msr_set(bool on) {}
#endif
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c401d174c8db..8bfe8b07e06e 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -33,19 +33,12 @@
#include <asm/apic.h>
#endif
-enum split_lock_detect_state {
- sld_not_exist = 0,
- sld_disable,
- sld_kvm_only,
- sld_warn,
- sld_fatal,
-};
-
/*
* split_lock_setup() will switch this to sld_warn on systems that support
* split lock detect, unless there is a command line override.
*/
-static enum split_lock_detect_state sld_state = sld_not_exist;
+enum split_lock_detect_state sld_state = sld_not_exist;
+EXPORT_SYMBOL_GPL(sld_state);
/*
* Processors which have self-snooping capability can handle conflicting
@@ -1121,6 +1114,12 @@ bool handle_user_split_lock(unsigned long ip)
}
EXPORT_SYMBOL_GPL(handle_user_split_lock);
+void sld_msr_set(bool on)
+{
+ __sld_msr_set(on);
+}
+EXPORT_SYMBOL_GPL(sld_msr_set);
+
/*
* This function is called only when switching between tasks with
* different split-lock detection modes. It sets the MSR for the
--
2.20.1
Powered by blists - more mailing lists