lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1556134382-58814-13-git-send-email-fenghua.yu@intel.com>
Date:   Wed, 24 Apr 2019 12:32:59 -0700
From:   Fenghua Yu <fenghua.yu@...el.com>
To:     "Thomas Gleixner" <tglx@...utronix.de>,
        "Ingo Molnar" <mingo@...hat.com>, "Borislav Petkov" <bp@...en8.de>,
        "H Peter Anvin" <hpa@...or.com>,
        "Paolo Bonzini" <pbonzini@...hat.com>,
        "Dave Hansen" <dave.hansen@...el.com>,
        "Ashok Raj" <ashok.raj@...el.com>,
        "Peter Zijlstra" <peterz@...radead.org>,
        "Ravi V Shankar" <ravi.v.shankar@...el.com>,
        "Xiaoyao Li " <xiaoyao.li@...el.com>,
        "Christopherson Sean J" <sean.j.christopherson@...el.com>,
        "Kalle Valo" <kvalo@...eaurora.org>,
        "Michael Chan" <michael.chan@...adcom.com>
Cc:     "linux-kernel" <linux-kernel@...r.kernel.org>,
        "x86" <x86@...nel.org>, kvm@...r.kernel.org,
        netdev@...r.kernel.org, linux-wireless@...r.kernel.org,
        Xiaoyao Li <xiaoyao.li@...ux.intel.com>,
        Fenghua Yu <fenghua.yu@...el.com>
Subject: [PATCH v8 12/15] kvm/vmx: Emulate MSR TEST_CTL

From: Xiaoyao Li <xiaoyao.li@...ux.intel.com>

A control bit (bit 29) in TEST_CTL MSR 0x33 will be introduced in
future x86 processors. When bit 29 is set, the processor causes #AC
exception for split locked accesses at all CPL.

Please check the latest Intel 64 and IA-32 Architectures Software
Developer's Manual for more detailed information on the MSR and
the split lock bit.

This patch emulates MSR_TEST_CTL with vmx->msr_test_ctl and does the
following:
1. As MSR TEST_CTL of guest is emulated, enable the related bit
in CORE_CAPABILITY to correctly report this feature to guest.

2. Differentiate MSR_TEST_CTL between host and guest.

To avoid costly RDMSR of TEST_CTL when switching between host and guest
during vmentry, read per CPU variable msr_test_ctl_cache which caches
the MSR value.

Signed-off-by: Xiaoyao Li <xiaoyao.li@...ux.intel.com>
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
Changes in v7:
  - Add vmx->msr_test_ctl_mask to indicate the valid bits of
  guest's MSR_TEST_CTL.
  - Add X86_FEATURE_SPLIT_LOCK_DETECT check to determine if it needs
  switch MSR_TEST_CTL.
  - Use msr_test_ctl_cache to replace costly RDMSR.
  - minimal adjustment in kvm_get_core_capability(), making it more
  clear.

 arch/x86/kvm/vmx/vmx.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.h |  2 ++
 arch/x86/kvm/x86.c     | 19 ++++++++++++++++++-
 3 files changed, 62 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b4e7d645275a..bbb9859350b5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1663,6 +1663,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	u32 index;
 
 	switch (msr_info->index) {
+	case MSR_TEST_CTL:
+		if (!vmx->msr_test_ctl_mask)
+			return 1;
+		msr_info->data = vmx->msr_test_ctl;
+		break;
 #ifdef CONFIG_X86_64
 	case MSR_FS_BASE:
 		msr_info->data = vmcs_readl(GUEST_FS_BASE);
@@ -1797,6 +1802,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	u32 index;
 
 	switch (msr_index) {
+	case MSR_TEST_CTL:
+		if (!vmx->msr_test_ctl_mask ||
+		    (data & vmx->msr_test_ctl_mask) != data)
+			return 1;
+		vmx->msr_test_ctl = data;
+		break;
 	case MSR_EFER:
 		ret = kvm_set_msr_common(vcpu, msr_info);
 		break;
@@ -4106,6 +4117,16 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	}
 }
 
+static u64 vmx_get_msr_test_ctl_mask(struct kvm_vcpu *vcpu)
+{
+	u64 mask = 0;
+
+	if (vcpu->arch.core_capability & CORE_CAP_SPLIT_LOCK_DETECT)
+		mask |= TEST_CTL_SPLIT_LOCK_DETECT;
+
+	return mask;
+}
+
 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4114,6 +4135,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
 	vmx->rmode.vm86_active = 0;
 	vmx->spec_ctrl = 0;
+	vmx->msr_test_ctl = 0;
+	vmx->msr_test_ctl_mask = vmx_get_msr_test_ctl_mask(vcpu);
 
 	vcpu->arch.microcode_version = 0x100000000ULL;
 	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
@@ -6313,6 +6336,23 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
 					msrs[i].host, false);
 }
 
+static void atomic_switch_msr_test_ctl(struct vcpu_vmx *vmx)
+{
+	u64 host_msr_test_ctl;
+
+	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+		return;
+
+	host_msr_test_ctl = this_cpu_read(msr_test_ctl_cache);
+
+	if (host_msr_test_ctl == vmx->msr_test_ctl) {
+		clear_atomic_switch_msr(vmx, MSR_TEST_CTL);
+	} else {
+		add_atomic_switch_msr(vmx, MSR_TEST_CTL, vmx->msr_test_ctl,
+				      host_msr_test_ctl, false);
+	}
+}
+
 static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
 {
 	vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
@@ -6421,6 +6461,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	atomic_switch_perf_msrs(vmx);
 
+	atomic_switch_msr_test_ctl(vmx);
+
 	vmx_update_hv_timer(vcpu);
 
 	/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index f879529906b4..8690a1295548 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -190,6 +190,8 @@ struct vcpu_vmx {
 	u64		      msr_guest_kernel_gs_base;
 #endif
 
+	u64		      msr_test_ctl;
+	u64		      msr_test_ctl_mask;
 	u64		      spec_ctrl;
 
 	u32 vm_entry_controls_shadow;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e88be97d47b9..60aaf75d0fe5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1231,7 +1231,24 @@ EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
 
 static u64 kvm_get_core_capability(void)
 {
-	return 0;
+	u64 data = 0;
+
+	if (boot_cpu_has(X86_FEATURE_CORE_CAPABILITY)) {
+		rdmsrl(MSR_IA32_CORE_CAPABILITY, data);
+
+		/* mask non-virtualizable functions */
+		data &= CORE_CAP_SPLIT_LOCK_DETECT;
+	} else if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
+		/*
+		 * There will be a list of FMS values that have split lock
+		 * detection but lack the CORE CAPABILITY MSR. In this case,
+		 * set CORE_CAP_SPLIT_LOCK_DETECT since we emulate
+		 * MSR CORE_CAPABILITY.
+		 */
+		data |= CORE_CAP_SPLIT_LOCK_DETECT;
+	}
+
+	return data;
 }
 
 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
-- 
2.19.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ