lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1511814242-12949-6-git-send-email-luwei.kang@intel.com>
Date:   Tue, 28 Nov 2017 04:23:58 +0800
From:   Luwei Kang <luwei.kang@...el.com>
To:     kvm@...r.kernel.org
Cc:     tglx@...utronix.de, mingo@...hat.com, hpa@...or.com,
        x86@...nel.org, pbonzini@...hat.com, rkrcmar@...hat.com,
        linux-kernel@...r.kernel.org, joro@...tes.org,
        Luwei Kang <luwei.kang@...el.com>
Subject: [PATCH v3 5/9] KVM: x86: Add a function to disable/enable Intel PT MSRs intercept

Intel Processor Trace MSRs(except IA32_RTIT_CTL) would be passthrough to
guest when Intel PT is enable in guest. So we need this function to
disable/enable intercept these MSRs.

Signed-off-by: Luwei Kang <luwei.kang@...el.com>
---
 arch/x86/kvm/vmx.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 69 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d8ad68a..87a9f52 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4976,6 +4976,41 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
 	}
 }
 
+static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+						u32 msr, int type)
+{
+	int f = sizeof(unsigned long);
+
+	if (!cpu_has_vmx_msr_bitmap())
+		return;
+
+	/*
+	 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+	 * have the write-low and read-high bitmap offsets the wrong way round.
+	 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+	 */
+	if (msr <= 0x1fff) {
+		if (type & MSR_TYPE_R)
+			/* read-low */
+			__set_bit(msr, msr_bitmap + 0x000 / f);
+
+		if (type & MSR_TYPE_W)
+			/* write-low */
+			__set_bit(msr, msr_bitmap + 0x800 / f);
+
+	} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+		msr &= 0x1fff;
+		if (type & MSR_TYPE_R)
+			/* read-high */
+			__set_bit(msr, msr_bitmap + 0x400 / f);
+
+		if (type & MSR_TYPE_W)
+			/* write-high */
+			__set_bit(msr, msr_bitmap + 0xc00 / f);
+
+	}
+}
+
 /*
  * If a msr is allowed by L0, we should check whether it is allowed by L1.
  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
@@ -5031,6 +5066,40 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
 						msr, MSR_TYPE_R | MSR_TYPE_W);
 }
 
+static void vmx_enable_intercept_for_msr(u32 msr, bool longmode_only)
+{
+	if (!longmode_only)
+		__vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy,
+						msr, MSR_TYPE_R | MSR_TYPE_W);
+	__vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode,
+						msr, MSR_TYPE_R | MSR_TYPE_W);
+}
+
+static inline void pt_disable_intercept_for_msr(bool flag)
+{
+	u32 i, eax, ebx, ecx, edx;
+
+	cpuid_count(0x14, 1, &eax, &ebx, &ecx, &edx);
+
+	if (flag) {
+		vmx_disable_intercept_for_msr(MSR_IA32_RTIT_STATUS, false);
+		vmx_disable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_BASE, false);
+		vmx_disable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_MASK, false);
+		vmx_disable_intercept_for_msr(MSR_IA32_RTIT_CR3_MATCH, false);
+		for (i = 0; i < (eax & 0x7); i++)
+			vmx_disable_intercept_for_msr(MSR_IA32_RTIT_ADDR0_A + i,
+									false);
+	} else {
+		vmx_enable_intercept_for_msr(MSR_IA32_RTIT_STATUS, false);
+		vmx_enable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_BASE, false);
+		vmx_enable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_MASK, false);
+		vmx_enable_intercept_for_msr(MSR_IA32_RTIT_CR3_MATCH, false);
+		for (i = 0; i < (eax & 0x7); i++)
+			vmx_enable_intercept_for_msr(MSR_IA32_RTIT_ADDR0_A + i,
+									false);
+	}
+}
+
 static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active)
 {
 	if (apicv_active) {
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ