lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed,  6 Dec 2017 14:43:03 +0300
From:   Jan Dakinevich <jan.dakinevich@...tuozzo.com>
To:     linux-kernel@...r.kernel.org
Cc:     Jan Dakinevich <jan.dakinevich@...tuozzo.com>,
        "Denis V . Lunev" <den@...tuozzo.com>,
        Roman Kagan <rkagan@...tuozzo.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        "H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
        Paolo Bonzini <pbonzini@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Kan Liang <kan.liang@...el.com>,
        Stephane Eranian <eranian@...gle.com>,
        Colin King <colin.king@...onical.com>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Jin Yao <yao.jin@...ux.intel.com>, kvm@...r.kernel.org
Subject: [PATCH RFC 2/2] KVM: x86/vPMU: ignore access to LBR-related MSRs

Windows Server 2016 Essentials (for yet unknown reason) attempts to
access MSR_LBR_TOS and other LBR-related registers at startup.  These
are not currently hadled by KVM so the guest gets #GP and crashes.

To prevent that, identify LBR-related MSRs pertinent to the CPU model
exposed to the guest, and dummy handle them (ignore writes and return
zero on reads).

Signed-off-by: Jan Dakinevich <jan.dakinevich@...tuozzo.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/pmu_intel.c        | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5f..04324dd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
 #include <asm/msr-index.h>
 #include <asm/asm.h>
 #include <asm/kvm_page_track.h>
+#include <asm/perf_event.h>
 
 #define KVM_MAX_VCPUS 288
 #define KVM_SOFT_MAX_VCPUS 240
@@ -416,6 +417,7 @@ struct kvm_pmu {
 	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 	struct irq_work irq_work;
 	u64 reprogram_pmi;
+	struct x86_pmu_lbr lbr;
 };
 
 struct kvm_pmu_ops;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 5ab4a36..7edf191 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -142,6 +142,24 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
 	return &counters[idx];
 }
 
+static bool intel_is_lbr_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	struct x86_pmu_lbr *lbr = &pmu->lbr;
+
+	if (!lbr->nr)
+		return false;
+
+	if (msr == lbr->tos)
+		return true;
+	if (msr >= lbr->from && msr < lbr->from + lbr->nr)
+		return true;
+	if (msr >= lbr->to && msr < lbr->to + lbr->nr)
+		return true;
+
+	return false;
+}
+
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -155,6 +173,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 		ret = pmu->version > 1;
 		break;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr)) {
+			ret = true;
+			break;
+		}
 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
 			get_fixed_pmc(pmu, msr);
@@ -183,6 +205,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 		*data = pmu->global_ovf_ctrl;
 		return 0;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr)) {
+			*data = 0;
+			return 0;
+		}
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
 			*data = pmc_read_counter(pmc);
@@ -235,6 +261,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr))
+			return 0;
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
 			if (!msr_info->host_initiated)
@@ -303,6 +331,11 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+
+	entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (entry)
+		intel_pmu_lbr_fill(&pmu->lbr,
+			x86_family(entry->eax), x86_model(entry->eax));
 }
 
 static void intel_pmu_init(struct kvm_vcpu *vcpu)
-- 
2.1.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ