[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220622192710.2547152-7-pbonzini@redhat.com>
Date: Wed, 22 Jun 2022 15:26:53 -0400
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: maz@...nel.org, anup@...infault.org, seanjc@...gle.com,
bgardon@...gle.com, peterx@...hat.com, maciej.szmigiero@...cle.com,
kvmarm@...ts.cs.columbia.edu, linux-mips@...r.kernel.org,
kvm-riscv@...ts.infradead.org, pfeiner@...gle.com,
jiangshanlai@...il.com, dmatlack@...gle.com
Subject: [PATCH v7 06/23] KVM: x86/mmu: Decompose kvm_mmu_get_page() into separate functions
From: David Matlack <dmatlack@...gle.com>
Decompose kvm_mmu_get_page() into separate helper functions to increase
readability and prepare for allocating shadow pages without a vcpu
pointer.
Specifically, pull the guts of kvm_mmu_get_page() into 2 helper
functions:
kvm_mmu_find_shadow_page() -
Walks the page hash checking for any existing mmu pages that match the
given gfn and role.
kvm_mmu_alloc_shadow_page()
Allocates and initializes an entirely new kvm_mmu_page. This currently
requries a vcpu pointer for allocation and looking up the memslot but
that will be removed in a future commit.
No functional change intended.
Reviewed-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: David Matlack <dmatlack@...gle.com>
Message-Id: <20220516232138.1783324-7-dmatlack@...gle.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/mmu/mmu.c | 52 +++++++++++++++++++++++++++++++-----------
1 file changed, 39 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f4e7978a6c6a..a59fe860da29 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1993,16 +1993,16 @@ static void clear_sp_write_flooding_count(u64 *spte)
__clear_sp_write_flooding_count(sptep_to_sp(spte));
}
-static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- union kvm_mmu_page_role role)
+static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
+ gfn_t gfn,
+ struct hlist_head *sp_list,
+ union kvm_mmu_page_role role)
{
- struct hlist_head *sp_list;
struct kvm_mmu_page *sp;
int ret;
int collisions = 0;
LIST_HEAD(invalid_list);
- sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
for_each_valid_sp(vcpu->kvm, sp, sp_list) {
if (sp->gfn != gfn) {
collisions++;
@@ -2027,7 +2027,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
/* unsync and write-flooding only apply to indirect SPs. */
if (sp->role.direct)
- goto trace_get_page;
+ goto out;
if (sp->unsync) {
/*
@@ -2053,14 +2053,26 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
__clear_sp_write_flooding_count(sp);
-trace_get_page:
- trace_kvm_mmu_get_page(sp, false);
goto out;
}
+ sp = NULL;
++vcpu->kvm->stat.mmu_cache_miss;
- sp = kvm_mmu_alloc_page(vcpu, role.direct);
+out:
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
+ if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
+ vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
+ return sp;
+}
+
+static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
+ gfn_t gfn,
+ struct hlist_head *sp_list,
+ union kvm_mmu_page_role role)
+{
+ struct kvm_mmu_page *sp = kvm_mmu_alloc_page(vcpu, role.direct);
sp->gfn = gfn;
sp->role = role;
@@ -2070,12 +2082,26 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
if (role.level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
}
- trace_kvm_mmu_get_page(sp, true);
-out:
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
- if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
- vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
+ return sp;
+}
+
+static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ union kvm_mmu_page_role role)
+{
+ struct hlist_head *sp_list;
+ struct kvm_mmu_page *sp;
+ bool created = false;
+
+ sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
+
+ sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
+ if (!sp) {
+ created = true;
+ sp = kvm_mmu_alloc_shadow_page(vcpu, gfn, sp_list, role);
+ }
+
+ trace_kvm_mmu_get_page(sp, created);
return sp;
}
--
2.31.1
Powered by blists - more mailing lists