[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210412084019.078608558@linuxfoundation.org>
Date: Mon, 12 Apr 2021 10:39:57 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Ben Gardon <bgardon@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.11 092/210] KVM: x86/mmu: Merge flush and non-flush tdp_mmu_iter_cond_resched
From: Ben Gardon <bgardon@...gle.com>
[ Upstream commit e139a34ef9d5627a41e1c02210229082140d1f92 ]
The flushing and non-flushing variants of tdp_mmu_iter_cond_resched have
almost identical implementations. Merge the two functions and add a
flush parameter.
Signed-off-by: Ben Gardon <bgardon@...gle.com>
Message-Id: <20210202185734.1680553-12-bgardon@...gle.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
arch/x86/kvm/mmu/tdp_mmu.c | 42 ++++++++++++--------------------------
1 file changed, 13 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index abdd89771b9b..0dd27767c770 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -412,33 +412,13 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
_mmu->shadow_root_level, _start, _end)
-/*
- * Flush the TLB and yield if the MMU lock is contended or this thread needs to
- * return control to the scheduler.
- *
- * If this function yields, it will also reset the tdp_iter's walk over the
- * paging structure and the calling function should allow the iterator to
- * continue its traversal from the paging structure root.
- *
- * Return true if this function yielded, the TLBs were flushed, and the
- * iterator's traversal was reset. Return false if a yield was not needed.
- */
-static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
-{
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- kvm_flush_remote_tlbs(kvm);
- cond_resched_lock(&kvm->mmu_lock);
- tdp_iter_refresh_walk(iter);
- return true;
- }
-
- return false;
-}
-
/*
* Yield if the MMU lock is contended or this thread needs to return control
* to the scheduler.
*
+ * If this function should yield and flush is set, it will perform a remote
+ * TLB flush before yielding.
+ *
* If this function yields, it will also reset the tdp_iter's walk over the
* paging structure and the calling function should allow the iterator to
* continue its traversal from the paging structure root.
@@ -446,9 +426,13 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *it
* Return true if this function yielded and the iterator's traversal was reset.
* Return false if a yield was not needed.
*/
-static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
+static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
+ struct tdp_iter *iter, bool flush)
{
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
cond_resched_lock(&kvm->mmu_lock);
tdp_iter_refresh_walk(iter);
return true;
@@ -491,7 +475,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte(kvm, &iter, 0);
flush_needed = !can_yield ||
- !tdp_mmu_iter_flush_cond_resched(kvm, &iter);
+ !tdp_mmu_iter_cond_resched(kvm, &iter, true);
}
return flush_needed;
}
@@ -864,7 +848,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;
- tdp_mmu_iter_cond_resched(kvm, &iter);
+ tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
}
@@ -923,7 +907,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;
- tdp_mmu_iter_cond_resched(kvm, &iter);
+ tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
}
@@ -1039,7 +1023,7 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;
- tdp_mmu_iter_cond_resched(kvm, &iter);
+ tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
@@ -1092,7 +1076,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
tdp_mmu_set_spte(kvm, &iter, 0);
- spte_set = !tdp_mmu_iter_flush_cond_resched(kvm, &iter);
+ spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
}
if (spte_set)
--
2.30.2
Powered by blists - more mailing lists