lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 23 Mar 2019 22:18:06 +0800
From:   Like Xu <like.xu@...ux.intel.com>
To:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     like.xu@...el.com, wei.w.wang@...el.com,
        Andi Kleen <ak@...ux.intel.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Kan Liang <kan.liang@...ux.intel.com>,
        Ingo Molnar <mingo@...hat.com>,
        Paolo Bonzini <pbonzini@...hat.com>
Subject: [RFC] [PATCH v2 3/5] KVM/x86/vPMU: add Intel vPMC enable/disable and save/restore support

We may not assume the guest fixed vPMC would be assigned by
an host fixed counter and vice versa. This issue (the host hw->idx
has a different type of guest hw->idx) is named as the cross-mapping
and it needs to keep semantics for mask select and enable ctrl.

Signed-off-by: Like Xu <like.xu@...ux.intel.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 92 +++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 87 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index bb16031..0b69acc 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -133,6 +133,34 @@ static void intel_pmu_update_host_fixed_ctrl(u64 new_ctrl, u8 host_idx)
 	wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, host_ctrl);
 }
 
+static void intel_pmu_enable_host_counter(struct kvm_pmc *pmc)
+{
+	u8 host_idx;
+
+	if (!intel_pmc_is_assigned(pmc))
+		return;
+
+	host_idx = pmc->perf_event->hw.idx;
+	if (host_idx >= INTEL_PMC_IDX_FIXED)
+		intel_pmu_enable_host_fixed_counter(pmc);
+	else
+		intel_pmu_enable_host_gp_counter(pmc);
+}
+
+static void intel_pmu_disable_host_counter(struct kvm_pmc *pmc)
+{
+	u8 host_idx;
+
+	if (!intel_pmc_is_assigned(pmc))
+		return;
+
+	host_idx = pmc->perf_event->hw.idx;
+	if (host_idx >= INTEL_PMC_IDX_FIXED)
+		intel_pmu_disable_host_fixed_counter(pmc);
+	else
+		intel_pmu_disable_host_gp_counter(pmc);
+}
+
 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 {
 	int i;
@@ -262,6 +290,57 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 	return ret;
 }
 
+static void intel_pmu_save_guest_pmc(struct kvm_pmu *pmu, u32 idx)
+{
+	struct kvm_pmc *pmc = intel_pmc_idx_to_pmc(pmu, idx);
+
+	if (!intel_pmc_is_assigned(pmc))
+		return;
+
+	rdmsrl(pmc->perf_event->hw.event_base, pmc->counter);
+	wrmsrl(pmc->perf_event->hw.event_base, 0);
+}
+
+static void intel_pmu_restore_guest_pmc(struct kvm_pmu *pmu, u32 idx)
+{
+	struct kvm_pmc *pmc = intel_pmc_idx_to_pmc(pmu, idx);
+	u8 ctrl;
+
+	if (!intel_pmc_is_assigned(pmc))
+		return;
+
+	if (pmc->idx >= INTEL_PMC_IDX_FIXED) {
+		ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
+			pmc->idx - INTEL_PMC_IDX_FIXED);
+		if (ctrl)
+			intel_pmu_enable_host_counter(pmc);
+		else
+			intel_pmu_disable_host_counter(pmc);
+	} else {
+		if (!(pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE))
+			intel_pmu_disable_host_counter(pmc);
+		else
+			intel_pmu_enable_host_counter(pmc);
+	}
+
+	wrmsrl(pmc->perf_event->hw.event_base, pmc->counter);
+}
+
+static void intel_pmc_stop_counter(struct kvm_pmc *pmc)
+{
+	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+
+	if (!pmc->perf_event)
+		return;
+
+	intel_pmu_disable_host_counter(pmc);
+	intel_pmu_save_guest_pmc(pmu, pmc->idx);
+	pmc_read_counter(pmc);
+	perf_event_release_kernel(pmc->perf_event);
+	pmc->perf_event = NULL;
+	pmc->hw_life_count = 0;
+}
+
 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -424,17 +503,20 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	struct kvm_pmc *pmc;
 	int i;
 
 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
-		struct kvm_pmc *pmc = &pmu->gp_counters[i];
-
-		pmc_stop_counter(pmc);
+		pmc = &pmu->gp_counters[i];
+		intel_pmc_stop_counter(pmc);
 		pmc->counter = pmc->eventsel = 0;
 	}
 
-	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
-		pmc_stop_counter(&pmu->fixed_counters[i]);
+	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
+		pmc = &pmu->fixed_counters[i];
+		intel_pmc_stop_counter(pmc);
+		pmc->counter = 0;
+	}
 
 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
 		pmu->global_ovf_ctrl = 0;
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ