[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210824075524.3354-3-jiangshanlai@gmail.com>
Date: Tue, 24 Aug 2021 15:55:18 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Lai Jiangshan <laijs@...ux.alibaba.com>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H. Peter Anvin" <hpa@...or.com>,
Marcelo Tosatti <mtosatti@...hat.com>,
Avi Kivity <avi@...hat.com>, kvm@...r.kernel.org
Subject: [PATCH 2/7] KVM: X86: Synchronize the shadow pagetable before link it
From: Lai Jiangshan <laijs@...ux.alibaba.com>
If gpte is changed from non-present to present, the guest doesn't need
to flush tlb per SDM. So the host must synchronze sp before
link it. Otherwise the guest might use a wrong mapping.
For example: the guest first changes a level-1 pagetable, and then
links its parent to a new place where the original gpte is non-present.
Finally the guest can access the remapped area without flushing
the tlb. The guest's behavior should be allowed per SDM, but the host
kvm mmu makes it wrong.
Fixes: 4731d4c7a077 ("KVM: MMU: out of sync shadow core")
Signed-off-by: Lai Jiangshan <laijs@...ux.alibaba.com>
---
arch/x86/kvm/mmu/mmu.c | 21 ++++++++++++++-------
arch/x86/kvm/mmu/paging_tmpl.h | 28 +++++++++++++++++++++++++---
2 files changed, 39 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 313918df1a10..987953a901d2 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2032,8 +2032,9 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
} while (!sp->unsync_children);
}
-static void mmu_sync_children(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *parent)
+static bool mmu_sync_children(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *parent,
+ bool root)
{
int i;
struct kvm_mmu_page *sp;
@@ -2061,11 +2062,20 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
+ /*
+ * If @parent is not root, the caller doesn't have
+ * any reference to it. And we couldn't access to
+ * @parent and continue synchronizing after the
+ * mmu_lock was once released.
+ */
+ if (!root)
+ return false;
flush = false;
}
}
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+ return true;
}
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
@@ -2151,9 +2161,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
- if (sp->unsync_children)
- kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-
__clear_sp_write_flooding_count(sp);
trace_get_page:
@@ -3650,7 +3657,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
write_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
- mmu_sync_children(vcpu, sp);
+ mmu_sync_children(vcpu, sp, true);
kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
write_unlock(&vcpu->kvm->mmu_lock);
@@ -3666,7 +3673,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root);
- mmu_sync_children(vcpu, sp);
+ mmu_sync_children(vcpu, sp, true);
}
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 50ade6450ace..48c7fe1b2d50 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -664,7 +664,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
* emulate this operation, return 1 to indicate this case.
*/
static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
- struct guest_walker *gw)
+ struct guest_walker *gw, unsigned long mmu_seq)
{
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
@@ -678,6 +678,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
top_level = vcpu->arch.mmu->root_level;
if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL;
+
+again:
/*
* Verify that the top-level gpte is still there. Since the page
* is a root page, it is either write protected (and cannot be
@@ -713,8 +715,28 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
goto out_gpte_changed;
- if (sp)
+ if (sp) {
+ /*
+ * We must synchronize the pagetable before link it
+ * because the guest doens't need to flush tlb when
+ * gpte is changed from non-present to present.
+ * Otherwise, the guest may use the wrong mapping.
+ *
+ * For PG_LEVEL_4K, kvm_mmu_get_page() has already
+ * synchronized it transiently via kvm_sync_page().
+ *
+ * For higher level pagetable, we synchronize it
+ * via slower mmu_sync_children(). If it once
+ * released the mmu_lock, we need to restart from
+ * the root since we don't have reference to @sp.
+ */
+ if (sp->unsync_children && !mmu_sync_children(vcpu, sp, false)) {
+ if (mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+ goto out_gpte_changed;
+ goto again;
+ }
link_shadow_page(vcpu, it.sptep, sp);
+ }
}
kvm_mmu_hugepage_adjust(vcpu, fault);
@@ -905,7 +927,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = make_mmu_pages_available(vcpu);
if (r)
goto out_unlock;
- r = FNAME(fetch)(vcpu, fault, &walker);
+ r = FNAME(fetch)(vcpu, fault, &walker, mmu_seq);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock:
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists