[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201109021254.79755-7-like.xu@linux.intel.com>
Date: Mon, 9 Nov 2020 10:12:43 +0800
From: Like Xu <like.xu@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Paolo Bonzini <pbonzini@...hat.com>, kvm@...r.kernel.org
Cc: Sean Christopherson <sean.j.christopherson@...el.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Kan Liang <kan.liang@...ux.intel.com>, luwei.kang@...el.com,
Thomas Gleixner <tglx@...utronix.de>, wei.w.wang@...el.com,
Tony Luck <tony.luck@...el.com>,
Stephane Eranian <eranian@...gle.com>,
Mark Gross <mgross@...ux.intel.com>,
Srinivas Pandruvada <srinivas.pandruvada@...ux.intel.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 06/17] KVM: x86/pmu: Add IA32_PEBS_ENABLE MSR emulation for extended PEBS
If IA32_PERF_CAPABILITIES.PEBS_BASELINE [bit 14] is set, the
IA32_PEBS_ENABLE MSR exists and all architecturally enumerated fixed
and general purpose counters have corresponding bits in IA32_PEBS_ENABLE
that enable generation of PEBS records. The general-purpose counter bits
start at bit IA32_PEBS_ENABLE[0], and the fixed counter bits start at
bit IA32_PEBS_ENABLE[32].
When guest PEBS is enabled, the IA32_PEBS_ENABLE MSR will be
added to the perf_guest_switch_msr() and switched during the
VMX transitions just like CORE_PERF_GLOBAL_CTRL MSR.
Originally-by: Andi Kleen <ak@...ux.intel.com>
Co-developed-by: Kan Liang <kan.liang@...ux.intel.com>
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
Co-developed-by: Luwei Kang <luwei.kang@...el.com>
Signed-off-by: Luwei Kang <luwei.kang@...el.com>
Signed-off-by: Like Xu <like.xu@...ux.intel.com>
---
arch/x86/events/intel/core.c | 21 +++++++++++++++++++++
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/include/asm/msr-index.h | 6 ++++++
arch/x86/kvm/vmx/pmu_intel.c | 28 ++++++++++++++++++++++++++++
4 files changed, 56 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 31e6887d24f1..d824c7156d34 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3419,6 +3419,27 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
*nr = 2;
}
+ if (cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask) {
+ arr[1].msr = MSR_IA32_PEBS_ENABLE;
+ arr[1].host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask;
+ arr[1].guest = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
+ /*
+ * Without x86_pmu.counter_freezing support on the host,
+ * the guest PEBS is disabled once the host PEBS is enabled
+ * since the both enabled case may brings a unknown PMI to
+ * confuse host and the guest PEBS overflow PMI would be missed.
+ */
+ if (!x86_pmu.counter_freezing && arr[1].host)
+ arr[1].guest = 0;
+ arr[0].guest |= arr[1].guest;
+ *nr = 2;
+ } else if (*nr == 1) {
+ /* Remove MSR_IA32_PEBS_ENABLE from MSR switch list in KVM */
+ arr[1].msr = MSR_IA32_PEBS_ENABLE;
+ arr[1].host = arr[1].guest = 0;
+ *nr = 2;
+ }
+
return arr;
}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index eb0d73a095a3..6d7e895ae535 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -450,6 +450,7 @@ struct kvm_pmu {
DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
u64 pebs_enable;
+ u64 pebs_enable_mask;
/*
* The gate to release perf_events not marked in
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 2859ee4f39a8..8bc6269f577d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -184,6 +184,12 @@
#define MSR_PEBS_DATA_CFG 0x000003f2
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
+#define PERF_CAP_ARCH_REG BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT 0xf00
+#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
+#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
+ PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
#define MSR_IA32_RTIT_CTL 0x00000570
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 50047114c298..2f10587bda19 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -180,6 +180,9 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
ret = pmu->version > 1;
break;
+ case MSR_IA32_PEBS_ENABLE:
+ ret = vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT;
+ break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
@@ -221,6 +224,9 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
msr_info->data = pmu->global_ovf_ctrl;
return 0;
+ case MSR_IA32_PEBS_ENABLE:
+ msr_info->data = pmu->pebs_enable;
+ return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -280,6 +286,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
break;
+ case MSR_IA32_PEBS_ENABLE:
+ if (pmu->pebs_enable == data)
+ return 0;
+ if (!(data & pmu->pebs_enable_mask)) {
+ pmu->pebs_enable = data;
+ return 0;
+ }
+ break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -329,6 +343,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->fixed_ctr_ctrl_mask = ~0ull;
+ pmu->pebs_enable_mask = ~0ull;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
@@ -384,6 +399,19 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
bitmap_set(pmu->all_valid_pmc_idx,
INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
+ if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_FORMAT) {
+ if (vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE) {
+ pmu->pebs_enable_mask = ~pmu->global_ctrl;
+ pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ pmu->fixed_ctr_ctrl_mask &=
+ ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
+ } else
+ pmu->pebs_enable_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
+ } else {
+ vcpu->arch.perf_capabilities &= ~PERF_CAP_PEBS_MASK;
+ }
+
nested_vmx_pmu_entry_exit_ctls_update(vcpu);
}
--
2.21.3
Powered by blists - more mailing lists