[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210112181041.356734-16-bgardon@google.com>
Date: Tue, 12 Jan 2021 10:10:32 -0800
From: Ben Gardon <bgardon@...gle.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>, Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Peter Shier <pshier@...gle.com>,
Peter Feiner <pfeiner@...gle.com>,
Junaid Shahid <junaids@...gle.com>,
Jim Mattson <jmattson@...gle.com>,
Yulei Zhang <yulei.kernel@...il.com>,
Wanpeng Li <kernellwp@...il.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Xiao Guangrong <xiaoguangrong.eric@...il.com>,
Ben Gardon <bgardon@...gle.com>
Subject: [PATCH 15/24] kvm: mmu: Wrap mmu_lock cond_resched and needbreak
Wrap the MMU lock cond_reseched and needbreak operations in a function.
This will support a refactoring to move the lock into the struct
kvm_arch(s) so that x86 can change the spinlock to a rwlock without
affecting the performance of other archs.
No functional change intended.
Reviewed-by: Peter Feiner <pfeiner@...gle.com>
Signed-off-by: Ben Gardon <bgardon@...gle.com>
---
arch/arm64/kvm/mmu.c | 2 +-
arch/x86/kvm/mmu/mmu.c | 16 ++++++++--------
arch/x86/kvm/mmu/tdp_mmu.c | 8 ++++----
include/linux/kvm_host.h | 2 ++
virt/kvm/kvm_main.c | 10 ++++++++++
5 files changed, 25 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 402b1642c944..57ef1ec23b56 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -58,7 +58,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
break;
if (resched && next != end)
- cond_resched_lock(&kvm->mmu_lock);
+ kvm_mmu_lock_cond_resched(kvm);
} while (addr = next, addr != end);
return ret;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5a4577830606..659ed0a2875f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2016,9 +2016,9 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
- if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
+ if (need_resched() || kvm_mmu_lock_needbreak(vcpu->kvm)) {
kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
- cond_resched_lock(&vcpu->kvm->mmu_lock);
+ kvm_mmu_lock_cond_resched(vcpu->kvm);
flush = false;
}
}
@@ -5233,14 +5233,14 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (iterator.rmap)
flush |= fn(kvm, iterator.rmap);
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (need_resched() || kvm_mmu_lock_needbreak(kvm)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs_with_address(kvm,
start_gfn,
iterator.gfn - start_gfn + 1);
flush = false;
}
- cond_resched_lock(&kvm->mmu_lock);
+ kvm_mmu_lock_cond_resched(kvm);
}
}
@@ -5390,7 +5390,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
* be in active use by the guest.
*/
if (batch >= BATCH_ZAP_PAGES &&
- cond_resched_lock(&kvm->mmu_lock)) {
+ kvm_mmu_lock_cond_resched(kvm)) {
batch = 0;
goto restart;
}
@@ -5688,7 +5688,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
continue;
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
goto restart;
- if (cond_resched_lock(&kvm->mmu_lock))
+ if (kvm_mmu_lock_cond_resched(kvm))
goto restart;
}
@@ -6013,9 +6013,9 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
WARN_ON_ONCE(sp->lpage_disallowed);
}
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (need_resched() || kvm_mmu_lock_needbreak(kvm)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list);
- cond_resched_lock(&kvm->mmu_lock);
+ kvm_mmu_lock_cond_resched(kvm);
}
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 90807f2d928f..fb911ca428b2 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -488,10 +488,10 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm,
struct tdp_iter *iter)
{
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (need_resched() || kvm_mmu_lock_needbreak(kvm)) {
kvm_flush_remote_tlbs(kvm);
rcu_read_unlock();
- cond_resched_lock(&kvm->mmu_lock);
+ kvm_mmu_lock_cond_resched(kvm);
rcu_read_lock();
tdp_iter_refresh_walk(iter);
return true;
@@ -512,9 +512,9 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm,
*/
static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
{
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (need_resched() || kvm_mmu_lock_needbreak(kvm)) {
rcu_read_unlock();
- cond_resched_lock(&kvm->mmu_lock);
+ kvm_mmu_lock_cond_resched(kvm);
rcu_read_lock();
tdp_iter_refresh_walk(iter);
return true;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 433d14fdae30..6e2773fc406c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1497,5 +1497,7 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
void kvm_mmu_lock(struct kvm *kvm);
void kvm_mmu_unlock(struct kvm *kvm);
+int kvm_mmu_lock_needbreak(struct kvm *kvm);
+int kvm_mmu_lock_cond_resched(struct kvm *kvm);
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 32f97ed1188d..b4c49a7e0556 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -442,6 +442,16 @@ void kvm_mmu_unlock(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock);
}
+int kvm_mmu_lock_needbreak(struct kvm *kvm)
+{
+ return spin_needbreak(&kvm->mmu_lock);
+}
+
+int kvm_mmu_lock_cond_resched(struct kvm *kvm)
+{
+ return cond_resched_lock(&kvm->mmu_lock);
+}
+
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
--
2.30.0.284.gd98b1dd5eaa7-goog
Powered by blists - more mailing lists