[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211110100018.367426-4-mlevitsk@redhat.com>
Date: Wed, 10 Nov 2021 12:00:18 +0200
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: Wanpeng Li <wanpengli@...cent.com>, Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, linux-kernel@...r.kernel.org,
Sean Christopherson <seanjc@...gle.com>,
Joerg Roedel <joro@...tes.org>,
x86@...nel.org (maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)),
Vitaly Kuznetsov <vkuznets@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Paolo Bonzini <pbonzini@...hat.com>,
Jim Mattson <jmattson@...gle.com>,
Maxim Levitsky <mlevitsk@...hat.com>
Subject: [PATCH 3/3] KVM: x86/mmu: don't skip mmu initialization when mmu root level changes
When running mix of 32 and 64 bit guests, it is possible to have mmu
reset with same mmu role but different root level (32 bit vs 64 bit paging)
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
arch/x86/kvm/mmu/mmu.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 354d2ca92df4d..763867475860f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4745,7 +4745,10 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
union kvm_mmu_role new_role =
kvm_calc_tdp_mmu_root_page_role(vcpu, ®s, false);
- if (new_role.as_u64 == context->mmu_role.as_u64)
+ u8 new_root_level = role_regs_to_root_level(®s);
+
+ if (new_role.as_u64 == context->mmu_role.as_u64 &&
+ context->root_level == new_root_level)
return;
context->mmu_role.as_u64 = new_role.as_u64;
@@ -4757,7 +4760,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
- context->root_level = role_regs_to_root_level(®s);
+ context->root_level = new_root_level;
if (!is_cr0_pg(context))
context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -4806,7 +4809,10 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
struct kvm_mmu_role_regs *regs,
union kvm_mmu_role new_role)
{
- if (new_role.as_u64 == context->mmu_role.as_u64)
+ u8 new_root_level = role_regs_to_root_level(regs);
+
+ if (new_role.as_u64 == context->mmu_role.as_u64 &&
+ context->root_level == new_root_level)
return;
context->mmu_role.as_u64 = new_role.as_u64;
@@ -4817,8 +4823,8 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
paging64_init_context(context);
else
paging32_init_context(context);
- context->root_level = role_regs_to_root_level(regs);
+ context->root_level = new_root_level;
reset_guest_paging_metadata(vcpu, context);
context->shadow_root_level = new_role.base.level;
--
2.26.3
Powered by blists - more mailing lists