[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260123090304.32286-1-jiangshanlai@gmail.com>
Date: Fri, 23 Jan 2026 17:03:02 +0800
From: Lai Jiangshan <jiangshanlai@...il.com>
To: linux-kernel@...r.kernel.org
Cc: Lai Jiangshan <jiangshan.ljs@...group.com>,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
kvm@...r.kernel.org
Subject: [PATCH 1/2] KVM: x86/mmu: Don't check old SPTE permissions when trying to unsync
From: Lai Jiangshan <jiangshan.ljs@...group.com>
Commit ecc5589f19a5 ("KVM: MMU: optimize set_spte for page sync") added
a writable permission check on the old SPTE to avoid unnecessary calls
to mmu_try_to_unsync_pages() when syncing SPTEs.
Later, commit e6722d9211b2 ("KVM: x86/mmu: Reduce the update to the spte
in FNAME(sync_spte)") indirectly achieves it by avoiding some SPTE
updates altogether, which makes the writable permission check in
make_spte() much less useful.
Remove the old-SPTE writable permission check from make_spte() to
simplify the code.
This may cause mmu_try_to_unsync_pages() to be called in a few
additional cases not covered by commit e6722d9211b2, such as when the
guest toggles the execute bit, which is expected to be rare.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@...group.com>
---
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
arch/x86/kvm/mmu/spte.c | 12 ++----------
arch/x86/kvm/mmu/spte.h | 2 +-
arch/x86/kvm/mmu/tdp_mmu.c | 2 +-
5 files changed, 6 insertions(+), 14 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 02c450686b4a..4535d2836004 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3073,7 +3073,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
was_rmapped = 1;
}
- wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
+ wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, prefetch,
false, host_writable, &spte);
if (*sptep == spte) {
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 901cd2bd40b8..95fccee63563 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -954,7 +954,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
host_writable = spte & shadow_host_writable_mask;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
make_spte(vcpu, sp, slot, pte_access, gfn,
- spte_to_pfn(spte), spte, true, true,
+ spte_to_pfn(spte), true, true,
host_writable, &spte);
/*
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 85a0473809b0..a8e2606ccd22 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -186,7 +186,7 @@ bool spte_needs_atomic_update(u64 spte)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
- u64 old_spte, bool prefetch, bool synchronizing,
+ bool prefetch, bool synchronizing,
bool host_writable, u64 *new_spte)
{
int level = sp->role.level;
@@ -258,16 +258,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* SPTE. Write-protect the SPTE if the page can't be unsync'd,
* e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed.
- *
- * When overwriting an existing leaf SPTE, and the old SPTE was
- * writable, skip trying to unsync shadow pages as any relevant
- * shadow pages must already be unsync, i.e. the hash lookup is
- * unnecessary (and expensive). Note, this relies on KVM not
- * changing PFNs without first zapping the old SPTE, which is
- * guaranteed by both the shadow MMU and the TDP MMU.
*/
- if ((!is_last_spte(old_spte, level) || !is_writable_pte(old_spte)) &&
- mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch))
+ if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch))
wrprot = true;
else
spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask |
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 91ce29fd6f1b..cf9cd27bcd4f 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -543,7 +543,7 @@ bool spte_needs_atomic_update(u64 spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
- u64 old_spte, bool prefetch, bool synchronizing,
+ bool prefetch, bool synchronizing,
bool host_writable, u64 *new_spte);
u64 make_small_spte(struct kvm *kvm, u64 huge_spte,
union kvm_mmu_page_role role, int index);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 9c26038f6b77..8dfaab2a4fd9 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1188,7 +1188,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
- fault->pfn, iter->old_spte, fault->prefetch,
+ fault->pfn, fault->prefetch,
false, fault->map_writable, &new_spte);
if (new_spte == iter->old_spte)
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists