[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200501213033.GA1176754@weiserver.amd.com>
Date: Fri, 1 May 2020 16:30:33 -0500
From: Wei Huang <wei.huang2@....com>
To: Li RongQing <lirongqing@...du.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org, x86@...nel.org,
hpa@...or.com, bp@...en8.de, mingo@...hat.com, tglx@...utronix.de,
jmattson@...gle.com, wanpengli@...cent.com, vkuznets@...hat.com,
sean.j.christopherson@...el.com, pbonzini@...hat.com,
xiaoyao.li@...el.com
Subject: Re: [PATCH] [v3] kvm: x86: support APERF/MPERF registers
On 04/30 06:45, Li RongQing wrote:
> Guest kernel reports a fixed cpu frequency in /proc/cpuinfo,
> this is confused to user when turbo is enable, and aperf/mperf
> can be used to show current cpu frequency after 7d5905dc14a
> "(x86 / CPU: Always show current CPU frequency in /proc/cpuinfo)"
> so guest should support aperf/mperf capability
>
> this patch implements aperf/mperf by three mode: none, software
^^^^
This
> emulation, and pass-through
>
> none: default mode, guest does not support aperf/mperf
>
> software emulation: the period of aperf/mperf in guest mode are
> accumulated as emulated value
>
> pass-though: it is only suitable for KVM_HINTS_REALTIME, Because
> that hint guarantees we have a 1:1 vCPU:CPU binding and guaranteed
> no over-commit.
If we save/restore the values of aperf/mperf properly during vcpu migration
among different cores, is pinning still required?
>
> and a per-VM capability is added to configure aperfmperf mode
>
> Signed-off-by: Li RongQing <lirongqing@...du.com>
> Signed-off-by: Chai Wen <chaiwen@...du.com>
> Signed-off-by: Jia Lina <jialina01@...du.com>
> ---
> diff v2:
> support aperfmperf pass though
> move common codes to kvm_get_msr_common
>
> diff v1:
> 1. support AMD, but not test
pt-mode doesn't work doesn't work on AMD. See below.
> 2. support per-vm capability to enable
> Documentation/virt/kvm/api.rst | 10 ++++++++++
> arch/x86/include/asm/kvm_host.h | 11 +++++++++++
> arch/x86/kvm/cpuid.c | 13 ++++++++++++-
> arch/x86/kvm/svm.c | 8 ++++++++
> arch/x86/kvm/vmx/vmx.c | 6 ++++++
> arch/x86/kvm/x86.c | 42 +++++++++++++++++++++++++++++++++++++++++
> arch/x86/kvm/x86.h | 15 +++++++++++++++
> include/uapi/linux/kvm.h | 1 +
> 8 files changed, 105 insertions(+), 1 deletion(-)
>
> diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> index efbbe570aa9b..c3be3b6a1717 100644
> --- a/Documentation/virt/kvm/api.rst
> +++ b/Documentation/virt/kvm/api.rst
> @@ -6109,3 +6109,13 @@ KVM can therefore start protected VMs.
> This capability governs the KVM_S390_PV_COMMAND ioctl and the
> KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
> guests when the state change is invalid.
> +
> +8.23 KVM_CAP_APERFMPERF
> +----------------------------
> +
> +:Architectures: x86
> +:Parameters: args[0] is aperfmperf mode;
> + 0 for not support, 1 for software emulation, 2 for pass-through
> +:Returns: 0 on success; -1 on error
> +
> +This capability indicates that KVM supports APERF and MPERF MSR registers
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 42a2d0d3984a..81477f676f60 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -820,6 +820,9 @@ struct kvm_vcpu_arch {
>
> /* AMD MSRC001_0015 Hardware Configuration */
> u64 msr_hwcr;
> +
> + u64 v_mperf;
> + u64 v_aperf;
> };
>
> struct kvm_lpage_info {
> @@ -885,6 +888,12 @@ enum kvm_irqchip_mode {
> KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
> };
>
> +enum kvm_aperfmperf_mode {
> + KVM_APERFMPERF_NONE,
> + KVM_APERFMPERF_SOFT, /* software emulate aperfmperf */
> + KVM_APERFMPERF_PT, /* pass-through aperfmperf to guest */
> +};
> +
> #define APICV_INHIBIT_REASON_DISABLE 0
> #define APICV_INHIBIT_REASON_HYPERV 1
> #define APICV_INHIBIT_REASON_NESTED 2
> @@ -982,6 +991,8 @@ struct kvm_arch {
>
> struct kvm_pmu_event_filter *pmu_event_filter;
> struct task_struct *nx_lpage_recovery_thread;
> +
> + enum kvm_aperfmperf_mode aperfmperf_mode;
> };
>
> struct kvm_vm_stat {
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 901cd1fdecd9..7a64ea2c3eef 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -124,6 +124,14 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
> MSR_IA32_MISC_ENABLE_MWAIT);
> }
>
> + best = kvm_find_cpuid_entry(vcpu, 6, 0);
> + if (best) {
> + if (guest_has_aperfmperf(vcpu->kvm) &&
> + boot_cpu_has(X86_FEATURE_APERFMPERF))
> + best->ecx |= 1;
> + else
> + best->ecx &= ~1;
> + }
> /* Update physical-address width */
> vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
> kvm_mmu_reset_context(vcpu);
> @@ -558,7 +566,10 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
> case 6: /* Thermal management */
> entry->eax = 0x4; /* allow ARAT */
> entry->ebx = 0;
> - entry->ecx = 0;
> + if (boot_cpu_has(X86_FEATURE_APERFMPERF))
> + entry->ecx = 0x1;
> + else
> + entry->ecx = 0x0;
> entry->edx = 0;
> break;
> /* function 7 has additional index. */
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 851e9cc79930..5646b6475049 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
^^^^^^^^^
The latest kernel moves svm-related files to arch/x86/kvm/svm
directory. You need to update your patches.
> @@ -2292,6 +2292,14 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
> svm->msrpm = page_address(msrpm_pages);
> svm_vcpu_init_msrpm(svm->msrpm);
>
> + if (guest_aperfmperf_soft(vcpu->kvm)) {
> + set_msr_interception(svm->msrpm, MSR_IA32_MPERF, 1, 0);
> + set_msr_interception(svm->msrpm, MSR_IA32_APERF, 1, 0);
> + } else if (guest_aperfmperf_pt(vcpu->kvm)) {
> + set_msr_interception(svm->msrpm, MSR_IA32_MPERF, 0, 0);
> + set_msr_interception(svm->msrpm, MSR_IA32_APERF, 0, 0);
> + }
The bit setting for KVM_APERFMPERF_SOFT and KVM_APERFMPERF_PT is
incorrect. set_msr_interception() takes read/write as parameters. When
they are 1, it means svm doesn't intercept this specific MSR. So, you
code should look like:
if (guest_aperfmperf_soft(vcpu->kvm)) {
set_msr_interception(svm->msrpm, MSR_IA32_MPERF, 0, 0);
set_msr_interception(svm->msrpm, MSR_IA32_APERF, 0, 0);
} else if (guest_aperfmperf_pt(vcpu->kvm)) {
set_msr_interception(svm->msrpm, MSR_IA32_MPERF, 1, 0);
set_msr_interception(svm->msrpm, MSR_IA32_APERF, 1, 0);
}
> +
> svm->nested.msrpm = page_address(nested_msrpm_pages);
> svm_vcpu_init_msrpm(svm->nested.msrpm);
>
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 91749f1254e8..023c411ce5ad 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -6759,6 +6759,12 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
> vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
> vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
> }
> +
> + if (guest_aperfmperf_pt(vcpu->kvm)) {
> + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_MPERF, MSR_TYPE_R);
> + vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_APERF, MSR_TYPE_R);
> + }
> +
> vmx->msr_bitmap_mode = 0;
>
> vmx->loaded_vmcs = &vmx->vmcs01;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b8124b562dea..a57f69a0eb6e 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3227,6 +3227,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> case MSR_K7_HWCR:
> msr_info->data = vcpu->arch.msr_hwcr;
> break;
> + case MSR_IA32_MPERF:
> + msr_info->data = vcpu->arch.v_mperf;
> + break;
> + case MSR_IA32_APERF:
> + msr_info->data = vcpu->arch.v_aperf;
> + break;
> default:
> if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
> return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
> @@ -3435,6 +3441,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
> case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
> r = kvm_x86_ops.nested_enable_evmcs != NULL;
> break;
> + case KVM_CAP_APERFMPERF:
> + r = boot_cpu_has(X86_FEATURE_APERFMPERF) ? 1 : 0;
> + break;
> default:
> break;
> }
> @@ -4883,6 +4892,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
> kvm->arch.exception_payload_enabled = cap->args[0];
> r = 0;
> break;
> + case KVM_CAP_APERFMPERF:
> + kvm->arch.aperfmperf_mode =
> + boot_cpu_has(X86_FEATURE_APERFMPERF) ? cap->args[0] : 0;
> + r = 0;
> + break;
> default:
> r = -EINVAL;
> break;
> @@ -8163,6 +8177,25 @@ void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
> }
> EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
>
> +
> +static void guest_enter_aperfmperf(u64 *mperf, u64 *aperf)
> +{
> + rdmsrl(MSR_IA32_MPERF, *mperf);
> + rdmsrl(MSR_IA32_APERF, *aperf);
> +}
> +
> +static void guest_exit_aperfmperf(struct kvm_vcpu *vcpu,
> + u64 mperf, u64 aperf)
> +{
> + u64 perf;
> +
> + rdmsrl(MSR_IA32_MPERF, perf);
> + vcpu->arch.v_mperf += perf - mperf;
> +
> + rdmsrl(MSR_IA32_APERF, perf);
> + vcpu->arch.v_aperf += perf - aperf;
> +}
> +
> /*
> * Returns 1 to let vcpu_run() continue the guest execution loop without
> * exiting to the userspace. Otherwise, the value will be returned to the
> @@ -8176,7 +8209,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> kvm_cpu_accept_dm_intr(vcpu);
> enum exit_fastpath_completion exit_fastpath = EXIT_FASTPATH_NONE;
>
> + bool enable_aperfmperf = guest_aperfmperf_soft(vcpu->kvm);
> bool req_immediate_exit = false;
> + u64 mperf, aperf;
>
> if (kvm_request_pending(vcpu)) {
> if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
> @@ -8326,6 +8361,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>
> preempt_disable();
>
> + if (unlikely(enable_aperfmperf))
> + guest_enter_aperfmperf(&mperf, &aperf);
> +
> kvm_x86_ops.prepare_guest_switch(vcpu);
>
> /*
> @@ -8449,6 +8487,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> }
>
> local_irq_enable();
> +
> + if (unlikely(enable_aperfmperf))
> + guest_exit_aperfmperf(vcpu, mperf, aperf);
> +
> preempt_enable();
>
> vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index b968acc0516f..d58dc4e4f96d 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -355,6 +355,21 @@ static inline bool kvm_dr7_valid(u64 data)
> return !(data >> 32);
> }
>
> +static inline bool guest_has_aperfmperf(struct kvm *kvm)
> +{
> + return kvm->arch.aperfmperf_mode != KVM_APERFMPERF_NONE;
> +}
> +
> +static inline bool guest_aperfmperf_soft(struct kvm *kvm)
> +{
> + return kvm->arch.aperfmperf_mode == KVM_APERFMPERF_SOFT;
> +}
> +
> +static inline bool guest_aperfmperf_pt(struct kvm *kvm)
> +{
> + return kvm->arch.aperfmperf_mode == KVM_APERFMPERF_PT;
> +}
> +
> void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
> void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
> u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> index 428c7dde6b4b..c67109a02a4d 100644
> --- a/include/uapi/linux/kvm.h
> +++ b/include/uapi/linux/kvm.h
> @@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
> #define KVM_CAP_S390_VCPU_RESETS 179
> #define KVM_CAP_S390_PROTECTED 180
> #define KVM_CAP_PPC_SECURE_GUEST 181
> +#define KVM_CAP_APERFMPERF 182
>
> #ifdef KVM_CAP_IRQ_ROUTING
>
>
Powered by blists - more mailing lists