[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241011021051.1557902-8-seanjc@google.com>
Date: Thu, 10 Oct 2024 19:10:39 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Yan Zhao <yan.y.zhao@...el.com>, Sagi Shahar <sagis@...gle.com>,
"Alex Bennée" <alex.bennee@...aro.org>, David Matlack <dmatlack@...gle.com>,
James Houghton <jthoughton@...gle.com>
Subject: [PATCH 07/18] KVM: x86/mmu: Fold mmu_spte_update_no_track() into mmu_spte_update()
Fold the guts of mmu_spte_update_no_track() into mmu_spte_update() now
that the latter doesn't flush when clearing A/D bits, i.e. now that there
is no need to explicitly avoid TLB flushes when aging SPTEs.
Opportunistically WARN if mmu_spte_update() requests a TLB flush when
aging SPTEs, as aging should never modify a SPTE in such a way that KVM
thinks a TLB flush is needed.
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++------------------------
1 file changed, 21 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index faa524d5a0e8..a72ecac63e07 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -485,32 +485,6 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
__set_spte(sptep, new_spte);
}
-/*
- * Update the SPTE (excluding the PFN), but do not track changes in its
- * accessed/dirty status.
- */
-static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
-{
- u64 old_spte = *sptep;
-
- WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
- check_spte_writable_invariants(new_spte);
-
- if (!is_shadow_present_pte(old_spte)) {
- mmu_spte_set(sptep, new_spte);
- return old_spte;
- }
-
- if (!spte_has_volatile_bits(old_spte))
- __update_clear_spte_fast(sptep, new_spte);
- else
- old_spte = __update_clear_spte_slow(sptep, new_spte);
-
- WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
-
- return old_spte;
-}
-
/* Rules for using mmu_spte_update:
* Update the state bits, it means the mapped pfn is not changed.
*
@@ -535,10 +509,23 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
*/
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
- u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
+ u64 old_spte = *sptep;
- if (!is_shadow_present_pte(old_spte))
+ WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
+ check_spte_writable_invariants(new_spte);
+
+ if (!is_shadow_present_pte(old_spte)) {
+ mmu_spte_set(sptep, new_spte);
return false;
+ }
+
+ if (!spte_has_volatile_bits(old_spte))
+ __update_clear_spte_fast(sptep, new_spte);
+ else
+ old_spte = __update_clear_spte_slow(sptep, new_spte);
+
+ WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
+ spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
return is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte);
}
@@ -1587,8 +1574,13 @@ static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
clear_bit((ffs(shadow_accessed_mask) - 1),
(unsigned long *)sptep);
} else {
+ /*
+ * WARN if mmu_spte_update() signals the need
+ * for a TLB flush, as Access tracking a SPTE
+ * should never trigger an _immediate_ flush.
+ */
spte = mark_spte_for_access_track(spte);
- mmu_spte_update_no_track(sptep, spte);
+ WARN_ON_ONCE(mmu_spte_update(sptep, spte));
}
young = true;
}
--
2.47.0.rc1.288.g06298d1525-goog
Powered by blists - more mailing lists