[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200320212833.3507-31-sean.j.christopherson@intel.com>
Date: Fri, 20 Mar 2020 14:28:26 -0700
From: Sean Christopherson <sean.j.christopherson@...el.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <sean.j.christopherson@...el.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Ben Gardon <bgardon@...gle.com>,
Junaid Shahid <junaids@...gle.com>,
Liran Alon <liran.alon@...cle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
John Haxby <john.haxby@...cle.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Tom Lendacky <thomas.lendacky@....com>
Subject: [PATCH v3 30/37] KVM: x86/mmu: Move fast_cr3_switch() side effects to __kvm_mmu_new_cr3()
Handle the side effects of a fast CR3 (PGD) switch up a level in
__kvm_mmu_new_cr3(), which is the only caller of fast_cr3_switch().
This consolidates handling all side effects in __kvm_mmu_new_cr3()
(where freeing the current root when KVM can't do a fast switch is
already handled), and ameliorates the pain of adding a second boolean in
a future patch to provide a separate "skip" override for the MMU sync.
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
---
arch/x86/kvm/mmu/mmu.c | 69 +++++++++++++++++++-----------------------
1 file changed, 31 insertions(+), 38 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 97d906a42e81..b95933198f4c 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4288,8 +4288,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
}
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
- union kvm_mmu_page_role new_role,
- bool skip_tlb_flush)
+ union kvm_mmu_page_role new_role)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -4299,39 +4298,9 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
* later if necessary.
*/
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
- mmu->root_level >= PT64_ROOT_4LEVEL) {
- if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
- return false;
-
- if (cached_root_available(vcpu, new_cr3, new_role)) {
- /*
- * It is possible that the cached previous root page is
- * obsolete because of a change in the MMU generation
- * number. However, changing the generation number is
- * accompanied by KVM_REQ_MMU_RELOAD, which will free
- * the root set here and allocate a new one.
- */
- kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
- if (!skip_tlb_flush) {
- kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
-
- /*
- * The last MMIO access's GVA and GPA are cached in the
- * VCPU. When switching to a new CR3, that GVA->GPA
- * mapping may no longer be valid. So clear any cached
- * MMIO info even when we don't need to sync the shadow
- * page tables.
- */
- vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
-
- __clear_sp_write_flooding_count(
- page_header(mmu->root_hpa));
-
- return true;
- }
- }
+ mmu->root_level >= PT64_ROOT_4LEVEL)
+ return !mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT) &&
+ cached_root_available(vcpu, new_cr3, new_role);
return false;
}
@@ -4340,9 +4309,33 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role,
bool skip_tlb_flush)
{
- if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
- kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
- KVM_MMU_ROOT_CURRENT);
+ if (!fast_cr3_switch(vcpu, new_cr3, new_role)) {
+ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
+ return;
+ }
+
+ /*
+ * It's possible that the cached previous root page is obsolete because
+ * of a change in the MMU generation number. However, changing the
+ * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
+ * free the root set here and allocate a new one.
+ */
+ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
+
+ if (!skip_tlb_flush) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
+
+ /*
+ * The last MMIO access's GVA and GPA are cached in the VCPU. When
+ * switching to a new CR3, that GVA->GPA mapping may no longer be
+ * valid. So clear any cached MMIO info even when we don't need to sync
+ * the shadow page tables.
+ */
+ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+
+ __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
}
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
--
2.24.1
Powered by blists - more mailing lists