[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9bab1e3528990ad6122d48bcc17648806cc1dd8b.camel@redhat.com>
Date: Wed, 23 Feb 2022 18:23:28 +0200
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: seanjc@...gle.com
Subject: Re: [PATCH v2 11/18] KVM: x86/mmu: Always use current mmu's role
when loading new PGD
On Thu, 2022-02-17 at 16:03 -0500, Paolo Bonzini wrote:
> Since the guest PGD is now loaded after the MMU has been set up
> completely, the desired role for a cache hit is simply the current
> mmu_role. There is no need to compute it again, so __kvm_mmu_new_pgd
> can be folded in kvm_mmu_new_pgd.
>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/mmu/mmu.c | 29 ++++-------------------------
> 1 file changed, 4 insertions(+), 25 deletions(-)
https://www.monkeyuser.com/2020/levels-of-satisfaction/ ;-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 906a9244ad28..b01160716c6a 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -190,8 +190,6 @@ struct kmem_cache *mmu_page_header_cache;
> static struct percpu_counter kvm_total_used_mmu_pages;
>
> static void mmu_spte_set(u64 *sptep, u64 spte);
> -static union kvm_mmu_page_role
> -kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
>
> struct kvm_mmu_role_regs {
> const unsigned long cr0;
> @@ -4191,10 +4189,10 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
> return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
> }
>
> -static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
> - union kvm_mmu_page_role new_role)
> +void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
> {
> struct kvm_mmu *mmu = vcpu->arch.mmu;
> + union kvm_mmu_page_role new_role = mmu->mmu_role.base;
>
> if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
> /* kvm_mmu_ensure_valid_pgd will set up a new root. */
> @@ -4230,11 +4228,6 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
> __clear_sp_write_flooding_count(
> to_shadow_page(vcpu->arch.mmu->root.hpa));
> }
> -
> -void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
> -{
> - __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
> -}
> EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
>
> static unsigned long get_cr3(struct kvm_vcpu *vcpu)
> @@ -4904,7 +4897,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
> new_role = kvm_calc_shadow_npt_root_page_role(vcpu, ®s);
>
> shadow_mmu_init_context(vcpu, context, ®s, new_role);
> - __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
> + kvm_mmu_new_pgd(vcpu, nested_cr3);
> }
> EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
>
> @@ -4960,7 +4953,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
> reset_ept_shadow_zero_bits_mask(context, execonly);
> }
>
> - __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
> + kvm_mmu_new_pgd(vcpu, new_eptp);
> }
> EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
>
> @@ -5045,20 +5038,6 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu)
> }
> EXPORT_SYMBOL_GPL(kvm_init_mmu);
>
> -static union kvm_mmu_page_role
> -kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
> -{
> - struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
> - union kvm_mmu_role role;
> -
> - if (tdp_enabled)
> - role = kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, true);
> - else
> - role = kvm_calc_shadow_mmu_root_page_role(vcpu, ®s, true);
> -
> - return role.base;
> -}
> -
> void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
> {
> /*
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists