lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1553350688-39627-6-git-send-email-like.xu@linux.intel.com>
Date:   Sat, 23 Mar 2019 22:18:08 +0800
From:   Like Xu <like.xu@...ux.intel.com>
To:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     like.xu@...el.com, wei.w.wang@...el.com,
        Andi Kleen <ak@...ux.intel.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Kan Liang <kan.liang@...ux.intel.com>,
        Ingo Molnar <mingo@...hat.com>,
        Paolo Bonzini <pbonzini@...hat.com>
Subject: [RFC] [PATCH v2 5/5] KVM/x86/vPMU: not do reprogram_counter for Intel hw-assigned vPMC

Considering the cross-mapping issue, this patch directly passes
the intel_pmu_set_msr request value to the hw-assigned vPMC.

This patch would reprogram a counter from host perf scheduler
just one time when it's first requested and keep to reuse it
during a certain period of time until it's lazy-released, which
is associated with HW_LIFE_COUNT_MAX and scheduling time slice.

Signed-off-by: Like Xu <like.xu@...ux.intel.com>
---
 arch/x86/kvm/pmu.c           | 19 +++++++++++++++
 arch/x86/kvm/vmx/pmu_intel.c | 58 +++++++++++++++++++++++++++++++++++---------
 2 files changed, 65 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 672e268..d7e7fb6 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -137,6 +137,11 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
 	}
 
 	pmc->perf_event = event;
+	if (pmc_is_assigned(pmc)) {
+		pmc->hw_life_count = HW_LIFE_COUNT_MAX;
+		wrmsrl(pmc->perf_event->hw.event_base, pmc->counter);
+	}
+
 	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
 }
 
@@ -155,6 +160,13 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
 		return;
 
+	if (pmc_is_assigned(pmc)) {
+		pmc->hw_life_count = HW_LIFE_COUNT_MAX;
+		clear_bit(pmc->idx,
+			(unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
+		return;
+	}
+
 	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
 	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
 
@@ -192,6 +204,13 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
 	if (!en_field || !pmc_is_enabled(pmc))
 		return;
 
+	if (pmc_is_assigned(pmc)) {
+		pmc->hw_life_count = HW_LIFE_COUNT_MAX;
+		clear_bit(pmc->idx,
+			(unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
+		return;
+	}
+
 	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
 			      kvm_x86_ops->pmu_ops->find_fixed_event(idx),
 			      !(en_field & 0x2), /* exclude user */
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 63e00ea..2dfdf54 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -163,12 +163,13 @@ static void intel_pmu_disable_host_counter(struct kvm_pmc *pmc)
 
 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 {
+	struct hw_perf_event *hwc;
+	struct kvm_pmc *pmc;
 	int i;
 
 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
 		u8 new_ctrl = fixed_ctrl_field(data, i);
 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
-		struct kvm_pmc *pmc;
 
 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
 
@@ -176,6 +177,19 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 			continue;
 
 		reprogram_fixed_counter(pmc, new_ctrl, i);
+
+		if (!intel_pmc_is_assigned(pmc))
+			continue;
+
+		hwc = &pmc->perf_event->hw;
+		if (hwc->idx < INTEL_PMC_IDX_FIXED) {
+			u64 config = (new_ctrl == 0) ? 0 :
+				(hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
+			wrmsrl(hwc->config_base, config);
+		} else {
+			intel_pmu_update_host_fixed_ctrl(new_ctrl,
+				hwc->idx - INTEL_PMC_IDX_FIXED);
+		}
 	}
 
 	pmu->fixed_ctr_ctrl = data;
@@ -345,6 +359,7 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct kvm_pmc *pmc;
+	struct hw_perf_event *hwc;
 
 	switch (msr) {
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -362,7 +377,13 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
-			*data = pmc_read_counter(pmc);
+			if (intel_pmc_is_assigned(pmc)) {
+				hwc = &pmc->perf_event->hw;
+				rdmsrl_safe(hwc->event_base, data);
+				pmc->counter = *data;
+			} else {
+				*data = pmc->counter;
+			}
 			return 0;
 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
 			*data = pmc->eventsel;
@@ -377,6 +398,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct kvm_pmc *pmc;
+	struct hw_perf_event *hwc;
 	u32 msr = msr_info->index;
 	u64 data = msr_info->data;
 
@@ -414,18 +436,30 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
-			if (!msr_info->host_initiated)
-				data = (s64)(s32)data;
-			pmc->counter += data - pmc_read_counter(pmc);
-			return 0;
-		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
-			if (data == pmc->eventsel)
-				return 0;
-			if (!(data & pmu->reserved_bits)) {
-				reprogram_gp_counter(pmc, data);
-				return 0;
+			pmc->counter = data;
+			if (intel_pmc_is_assigned(pmc)) {
+				hwc = &pmc->perf_event->hw;
+				wrmsrl(hwc->event_base, pmc->counter);
 			}
+			return 0;
 		}
+
+		pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
+		if (!pmc)
+			return 1;
+
+		if (data == pmc->eventsel
+				|| (data & pmu->reserved_bits))
+			return 0;
+
+		reprogram_gp_counter(pmc, data);
+
+		if (pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE)
+			intel_pmu_enable_host_counter(pmc);
+		else
+			intel_pmu_disable_host_counter(pmc);
+
+		return 0;
 	}
 
 	return 1;
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ