[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c9b3d50e-ec3d-3fa3-2706-5672100ffe09@gmail.com>
Date: Tue, 6 Sep 2022 20:45:00 +0800
From: Like Xu <like.xu.linux@...il.com>
To: Jim Mattson <jmattson@...gle.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Sandipan Das <sandipan.das@....com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/4] KVM: x86/svm/pmu: Add AMD PerfMonV2 support
On 6/9/2022 2:00 am, Jim Mattson wrote:
> On Mon, Sep 5, 2022 at 5:44 AM Like Xu <like.xu.linux@...il.com> wrote:
>>
>> From: Like Xu <likexu@...cent.com>
>>
>> If AMD Performance Monitoring Version 2 (PerfMonV2) is detected
>> by the guest, it can use a new scheme to manage the Core PMCs using
>> the new global control and status registers.
>>
>> In addition to benefiting from the PerfMonV2 functionality in the same
>> way as the host (higher precision), the guest also can reduce the number
>> of vm-exits by lowering the total number of MSRs accesses.
>>
>> In terms of implementation details, amd_is_valid_msr() is resurrected
>> since three newly added MSRs could not be mapped to one vPMC.
>> The possibility of emulating PerfMonV2 on the mainframe has also
>> been eliminated for reasons of precision.
>>
>> Co-developed-by: Sandipan Das <sandipan.das@....com>
>> Signed-off-by: Sandipan Das <sandipan.das@....com>
>> Signed-off-by: Like Xu <likexu@...cent.com>
>> ---
>> arch/x86/kvm/pmu.c | 6 +++++
>> arch/x86/kvm/svm/pmu.c | 50 +++++++++++++++++++++++++++++++++---------
>> arch/x86/kvm/x86.c | 11 ++++++++++
>> 3 files changed, 57 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
>> index 7002e1b74108..56b4f898a246 100644
>> --- a/arch/x86/kvm/pmu.c
>> +++ b/arch/x86/kvm/pmu.c
>> @@ -455,12 +455,15 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>
>> switch (msr) {
>> case MSR_CORE_PERF_GLOBAL_STATUS:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
>> msr_info->data = pmu->global_status;
>> return 0;
>> case MSR_CORE_PERF_GLOBAL_CTRL:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
>> msr_info->data = pmu->global_ctrl;
>> return 0;
>> case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
>> msr_info->data = 0;
>> return 0;
>> default:
>> @@ -479,12 +482,14 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>
>> switch (msr) {
>> case MSR_CORE_PERF_GLOBAL_STATUS:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
>> if (msr_info->host_initiated) {
>> pmu->global_status = data;
>> return 0;
>> }
>> break; /* RO MSR */
>> case MSR_CORE_PERF_GLOBAL_CTRL:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
>> if (pmu->global_ctrl == data)
>> return 0;
>> if (kvm_valid_perf_global_ctrl(pmu, data)) {
>> @@ -495,6 +500,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> }
>> break;
>> case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
>> if (!(data & pmu->global_ovf_ctrl_mask)) {
>> if (!msr_info->host_initiated)
>> pmu->global_status &= ~data;
>> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
>> index 3a20972e9f1a..4c7d408e3caa 100644
>> --- a/arch/x86/kvm/svm/pmu.c
>> +++ b/arch/x86/kvm/svm/pmu.c
>> @@ -92,12 +92,6 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
>> return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
>> }
>>
>> -static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
>> -{
>> - /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
>> - return false;
>> -}
>> -
>> static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
>> {
>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>> @@ -109,6 +103,29 @@ static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
>> return pmc;
>> }
>>
>> +static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
>> +{
>> + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>> +
>> + switch (msr) {
>> + case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
>> + return pmu->version > 0;
>> + case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
>> + return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
>> + case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
>> + return pmu->version > 1;
>> + default:
>> + if (msr > MSR_F15H_PERF_CTR5 &&
>> + msr < MSR_F15H_PERF_CTL0 + 2 * KVM_AMD_PMC_MAX_GENERIC)
>> + return pmu->version > 1;
>
> Should this be bounded by guest CPUID.80000022H:EBX[NumCorePmc]
> (unless host-initiated)?
Indeed, how about:
default:
if (msr > MSR_F15H_PERF_CTR5 &&
msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
return pmu->version > 1;
and for host-initiated:
#define MSR_F15H_PERF_MSR_MAX \
(MSR_F15H_PERF_CTR0 + 2 * (KVM_AMD_PMC_MAX_GENERIC - 1))
kvm_{set|get}_msr_common()
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_MSR_MAX:
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
?
>
>> + break;
>> + }
>> +
>> + return amd_msr_idx_to_pmc(vcpu, msr);
>> +}
>> +
>> static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> {
>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>> @@ -162,20 +179,31 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
>> {
>> struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
>> + struct kvm_cpuid_entry2 *entry;
>> + union cpuid_0x80000022_ebx ebx;
>>
>> - if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
>> + pmu->version = 1;
>> + entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0);
>> + if (kvm_pmu_cap.version > 1 && entry && (entry->eax & BIT(0))) {
>> + pmu->version = 2;
>> + ebx.full = entry->ebx;
>> + pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc,
>> + (unsigned int)kvm_pmu_cap.num_counters_gp,
>> + (unsigned int)KVM_AMD_PMC_MAX_GENERIC);
>> + pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
>> + pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask;
>> + } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
>> pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
>
> The logic above doesn't seem quite right, since guest_cpuid_has(vcpu,
> X86_FEATURE_PERFCTR_CORE) promises 6 PMCs, regardless of what
> CPUID.80000022 says.
I would have expected the appearance of CPUID.80000022 to override PERFCTR_CORE,
now I don't think it's a good idea as you do, so how about:
amd_pmu_refresh():
bool perfctr_core = guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
pmu->version = 1;
if (kvm_pmu_cap.version > 1)
entry = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0);
if (!perfctr_core)
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
if (entry && (entry->eax & BIT(0))) {
pmu->version = 2;
ebx.full = entry->ebx;
pmu->nr_arch_gp_counters = min3((unsigned int)ebx.split.num_core_pmc,
(unsigned int)kvm_pmu_cap.num_counters_gp,
(unsigned int)KVM_AMD_PMC_MAX_GENERIC);
}
/* PERFCTR_CORE promises 6 PMCs, regardless of CPUID.80000022 */
if (perfctr_core) {
pmu->nr_arch_gp_counters = max(pmu->nr_arch_gp_counters,
AMD64_NUM_COUNTERS_CORE);
}
if (pmu->version > 1) {
pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask;
}
?
Powered by blists - more mailing lists