[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <10bdaf6d-1c5c-6502-c340-db3f84bf74a1@intel.com>
Date: Wed, 6 Sep 2023 14:40:59 +0800
From: Xiaoyao Li <xiaoyao.li@...el.com>
To: Hao Peng <flyingpenghao@...il.com>, pbonzini@...hat.com
Cc: kvm@...r.kernel.org, Sean Christopherson <seanjc@...gle.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: X86: Reduce calls to vcpu_load
On 9/6/2023 2:24 PM, Hao Peng wrote:
> From: Peng Hao <flyingpeng@...cent.com>
>
> The call of vcpu_load/put takes about 1-2us. Each
> kvm_arch_vcpu_create will call vcpu_load/put
> to initialize some fields of vmcs, which can be
> delayed until the call of vcpu_ioctl to process
> this part of the vmcs field, which can reduce calls
> to vcpu_load.
what if no vcpu ioctl is called after vcpu creation?
And will the first (it was second before this patch) vcpu_load() becomes
longer? have you measured it?
I don't think it worth the optimization unless a strong reason.
> Signed-off-by: Peng Hao <flyingpeng@...cent.com>
> ---
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/x86.c | 21 ++++++++++++++++-----
> 2 files changed, 17 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 9320019708f9..2f2dcd283788 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -984,6 +984,7 @@ struct kvm_vcpu_arch {
> /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
> bool l1tf_flush_l1d;
>
> + bool initialized;
> /* Host CPU on which VM-entry was most recently attempted */
> int last_vmentry_cpu;
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 4fd08a5e0e98..a3671a54e850 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -317,7 +317,20 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
> u64 __read_mostly host_xcr0;
>
> static struct kmem_cache *x86_emulator_cache;
> +static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
>
> +static inline bool kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
> +{
> + return vcpu->arch.initialized;
> +}
> +
> +static void kvm_vcpu_initial_reset(struct kvm_vcpu *vcpu)
> +{
> + kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
> + kvm_vcpu_reset(vcpu, false);
> + kvm_init_mmu(vcpu);
> + vcpu->arch.initialized = true;
> +}
> /*
> * When called, it means the previous get/set msr reached an invalid msr.
> * Return true if we want to ignore/silent this failed msr access.
> @@ -5647,6 +5660,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
>
> vcpu_load(vcpu);
>
> + if (!kvm_vcpu_initialized(vcpu))
> + kvm_vcpu_initial_reset(vcpu);
> +
> u.buffer = NULL;
> switch (ioctl) {
> case KVM_GET_LAPIC: {
> @@ -11930,11 +11946,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
> kvm_xen_init_vcpu(vcpu);
> kvm_vcpu_mtrr_init(vcpu);
> - vcpu_load(vcpu);
> - kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
> - kvm_vcpu_reset(vcpu, false);
> - kvm_init_mmu(vcpu);
> - vcpu_put(vcpu);
> return 0;
>
> free_guest_fpu:
> --
> 2.31.1
Powered by blists - more mailing lists