[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <bf08a06675ed97d2456f7dcd9b0b2ef92f4602be.1715137643.git.houwenlong.hwl@antgroup.com>
Date: Wed, 08 May 2024 11:18:42 +0800
From: "Hou Wenlong" <houwenlong.hwl@...group.com>
To: kvm@...r.kernel.org
Cc: "Lai Jiangshan" <jiangshan.ljs@...group.com>,
"Sean Christopherson" <seanjc@...gle.com>,
"Paolo Bonzini" <pbonzini@...hat.com>,
"Thomas Gleixner" <tglx@...utronix.de>,
"Ingo Molnar" <mingo@...hat.com>,
"Borislav Petkov" <bp@...en8.de>,
"Dave Hansen" <dave.hansen@...ux.intel.com>,
<x86@...nel.org>,
"H. Peter Anvin" <hpa@...or.com>,
<linux-kernel@...r.kernel.org>
Subject: [PATCH] KVM: x86/mmu: Only allocate shadowed translation cache for sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL
Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might
have leaf gptes, so allocation of shadowed translation cache is needed
only for it. Additionally, use sp->shadowed_translation to determine
whether to use the information in shadowed translation cache or not.
Suggested-by: Lai Jiangshan <jiangshan.ljs@...group.com>
Signed-off-by: Hou Wenlong <houwenlong.hwl@...group.com>
---
arch/x86/kvm/mmu/mmu.c | 27 +++++++++++++++++++--------
1 file changed, 19 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index fc3b59b59ee1..8be987d0f05e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -716,12 +716,12 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp);
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
+ if (sp->shadowed_translation)
+ return sp->shadowed_translation[index] >> PAGE_SHIFT;
+
if (sp->role.passthrough)
return sp->gfn;
- if (!sp->role.direct)
- return sp->shadowed_translation[index] >> PAGE_SHIFT;
-
return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
}
@@ -733,7 +733,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
*/
static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
{
- if (sp_has_gptes(sp))
+ if (sp->shadowed_translation)
return sp->shadowed_translation[index] & ACC_ALL;
/*
@@ -754,7 +754,7 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
gfn_t gfn, unsigned int access)
{
- if (sp_has_gptes(sp)) {
+ if (sp->shadowed_translation) {
sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
return;
}
@@ -1697,7 +1697,7 @@ static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
hlist_del(&sp->hash_link);
list_del(&sp->link);
free_page((unsigned long)sp->spt);
- if (!sp->role.direct)
+ if (sp->shadowed_translation)
free_page((unsigned long)sp->shadowed_translation);
kmem_cache_free(mmu_page_header_cache, sp);
}
@@ -1850,6 +1850,17 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);
+static bool sp_might_have_leaf_gptes(struct kvm_mmu_page *sp)
+{
+ if (sp->role.direct)
+ return false;
+
+ if (sp->role.level > KVM_MAX_HUGEPAGE_LEVEL)
+ return false;
+
+ return true;
+}
+
static bool sp_has_gptes(struct kvm_mmu_page *sp)
{
if (sp->role.direct)
@@ -2199,7 +2210,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
- if (!role.direct)
+ sp->role = role;
+ if (sp_might_have_leaf_gptes(sp))
sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
@@ -2216,7 +2228,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
kvm_account_mmu_page(kvm, sp);
sp->gfn = gfn;
- sp->role = role;
hlist_add_head(&sp->hash_link, sp_list);
if (sp_has_gptes(sp))
account_shadowed(kvm, sp);
--
2.31.1
Powered by blists - more mailing lists