>From 4e34801acab175913b6fd5b6c7c4aa1350d5e571 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 15 Oct 2024 16:35:15 -0700 Subject: [PATCH 1/5] KVM: pfncache: Snapshot mmu_invalidate_seq immediately before hva_to_pfn() Grab the snapshot of the mmu_notifier sequence counter immediately before calling hva_to_pfn() when refreching a gpc, as there's no requirement that the snapshot be taken while holding gpc->lock; the sequence counter is completely independent of locking. This will allow waiting on in-progress invalidations to complete, instead of actively trying to resolve a pfn that KVM is guaranteed to discard (because either the invalidation will still be in-progress, or it will have completed and bumped the sequence counter). Signed-off-by: Sean Christopherson --- virt/kvm/pfncache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index f0039efb9e1e..4afbc1262e3f 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -172,9 +172,6 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) gpc->valid = false; do { - mmu_seq = gpc->kvm->mmu_invalidate_seq; - smp_rmb(); - write_unlock_irq(&gpc->lock); /* @@ -197,6 +194,9 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) cond_resched(); } + mmu_seq = gpc->kvm->mmu_invalidate_seq; + smp_rmb(); + /* We always request a writeable mapping */ new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL); if (is_error_noslot_pfn(new_pfn)) base-commit: 8cf0b93919e13d1e8d4466eb4080a4c4d9d66d7b -- 2.47.0.rc1.288.g06298d1525-goog