[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220622192710.2547152-8-pbonzini@redhat.com>
Date: Wed, 22 Jun 2022 15:26:54 -0400
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: maz@...nel.org, anup@...infault.org, seanjc@...gle.com,
bgardon@...gle.com, peterx@...hat.com, maciej.szmigiero@...cle.com,
kvmarm@...ts.cs.columbia.edu, linux-mips@...r.kernel.org,
kvm-riscv@...ts.infradead.org, pfeiner@...gle.com,
jiangshanlai@...il.com, dmatlack@...gle.com
Subject: [PATCH v7 07/23] KVM: x86/mmu: Consolidate shadow page allocation and initialization
From: David Matlack <dmatlack@...gle.com>
Consolidate kvm_mmu_alloc_page() and kvm_mmu_alloc_shadow_page() under
the latter so that all shadow page allocation and initialization happens
in one place.
No functional change intended.
Signed-off-by: David Matlack <dmatlack@...gle.com>
Message-Id: <20220516232138.1783324-8-dmatlack@...gle.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/mmu/mmu.c | 39 +++++++++++++++++----------------------
1 file changed, 17 insertions(+), 22 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a59fe860da29..8b84cdd8c6cd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1664,27 +1664,6 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
mmu_spte_clear_no_track(parent_pte);
}
-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, bool direct)
-{
- struct kvm_mmu_page *sp;
-
- sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
- sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
- if (!direct)
- sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
- set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
-
- /*
- * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
- * depends on valid pages being added to the head of the list. See
- * comments in kvm_zap_obsolete_pages().
- */
- sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
- list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- kvm_mod_used_mmu_pages(vcpu->kvm, +1);
- return sp;
-}
-
static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
@@ -2072,7 +2051,23 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
struct hlist_head *sp_list,
union kvm_mmu_page_role role)
{
- struct kvm_mmu_page *sp = kvm_mmu_alloc_page(vcpu, role.direct);
+ struct kvm_mmu_page *sp;
+
+ sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
+ sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+ if (!role.direct)
+ sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+
+ /*
+ * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
+ * depends on valid pages being added to the head of the list. See
+ * comments in kvm_zap_obsolete_pages().
+ */
+ sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
+ list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+ kvm_mod_used_mmu_pages(vcpu->kvm, +1);
sp->gfn = gfn;
sp->role = role;
--
2.31.1
Powered by blists - more mailing lists