[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210305011101.3597423-6-seanjc@google.com>
Date: Thu, 4 Mar 2021 17:10:49 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Ben Gardon <bgardon@...gle.com>,
Brijesh Singh <brijesh.singh@....com>,
Tom Lendacky <thomas.lendacky@....com>
Subject: [PATCH v2 05/17] KVM: x86/mmu: Allocate pae_root and lm_root pages in
dedicated helper
Move the on-demand allocation of the pae_root and lm_root pages, used by
nested NPT for 32-bit L1s, into a separate helper. This will allow a
future patch to hold mmu_lock while allocating the non-special roots so
that make_mmu_pages_available() can be checked once at the start of root
allocation, and thus avoid having to deal with failure in the middle of
root allocation.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/mmu/mmu.c | 84 +++++++++++++++++++++++++++---------------
1 file changed, 54 insertions(+), 30 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7cb5fb5d2d4d..dd9d5cc13a46 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3308,38 +3308,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
- if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
+ if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
- /*
- * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
- * tables are allocated and initialized at root creation as there is no
- * equivalent level in the guest's NPT to shadow. Allocate the tables
- * on demand, as running a 32-bit L1 VMM is very rare. Unlike 32-bit
- * NPT, the PDP table doesn't need to be in low mem. Preallocate the
- * pages so that the PAE roots aren't leaked on failure.
- */
- if (mmu->shadow_root_level == PT64_ROOT_4LEVEL &&
- (!mmu->pae_root || !mmu->lm_root)) {
- u64 *lm_root, *pae_root;
-
- if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root))
- return -EIO;
-
- pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
- if (!pae_root)
- return -ENOMEM;
-
- lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
- if (!lm_root) {
- free_page((unsigned long)pae_root);
- return -ENOMEM;
- }
-
- mmu->pae_root = pae_root;
- mmu->lm_root = lm_root;
-
- lm_root[0] = __pa(mmu->pae_root) | pm_mask;
+ mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask;
}
for (i = 0; i < 4; ++i) {
@@ -3373,6 +3345,55 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
return 0;
}
+static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ u64 *lm_root, *pae_root;
+
+ /*
+ * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
+ * tables are allocated and initialized at root creation as there is no
+ * equivalent level in the guest's NPT to shadow. Allocate the tables
+ * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
+ */
+ if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
+ mmu->shadow_root_level < PT64_ROOT_4LEVEL)
+ return 0;
+
+ /*
+ * This mess only works with 4-level paging and needs to be updated to
+ * work with 5-level paging.
+ */
+ if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
+ return -EIO;
+
+ if (mmu->pae_root && mmu->lm_root)
+ return 0;
+
+ /*
+ * The special roots should always be allocated in concert. Yell and
+ * bail if KVM ends up in a state where only one of the roots is valid.
+ */
+ if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->lm_root))
+ return -EIO;
+
+ /* Unlike 32-bit NPT, the PDP table doesn't need to be in low mem. */
+ pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!pae_root)
+ return -ENOMEM;
+
+ lm_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!lm_root) {
+ free_page((unsigned long)pae_root);
+ return -ENOMEM;
+ }
+
+ mmu->pae_root = pae_root;
+ mmu->lm_root = lm_root;
+
+ return 0;
+}
+
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.mmu->direct_map)
@@ -4820,6 +4841,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
int r;
r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
+ if (r)
+ goto out;
+ r = mmu_alloc_special_roots(vcpu);
if (r)
goto out;
r = mmu_alloc_roots(vcpu);
--
2.30.1.766.gb4fecdf3b7-goog
Powered by blists - more mailing lists