[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220926100800.523753916@linuxfoundation.org>
Date: Mon, 26 Sep 2022 12:12:31 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, David Matlack <dmatlack@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.15 117/148] KVM: x86/mmu: Fold rmap_recycle into rmap_add
From: David Matlack <dmatlack@...gle.com>
[ Upstream commit 68be1306caea8948738cab04014ca4506b590d38 ]
Consolidate rmap_recycle and rmap_add into a single function since they
are only ever called together (and only from one place). This has a nice
side effect of eliminating an extra kvm_vcpu_gfn_to_memslot(). In
addition it makes mmu_set_spte(), which is a very long function, a
little shorter.
No functional change intended.
Signed-off-by: David Matlack <dmatlack@...gle.com>
Message-Id: <20210813203504.2742757-3-dmatlack@...gle.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
Stable-dep-of: 604f533262ae ("KVM: x86/mmu: add missing update to max_mmu_rmap_size")
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
arch/x86/kvm/mmu/mmu.c | 40 ++++++++++++++--------------------------
1 file changed, 14 insertions(+), 26 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f267cca9fe09..ba1749a770eb 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
return kvm_mmu_memory_cache_nr_free_objects(mc);
}
-static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
-{
- struct kvm_memory_slot *slot;
- struct kvm_mmu_page *sp;
- struct kvm_rmap_head *rmap_head;
-
- sp = sptep_to_sp(spte);
- kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
- return pte_list_add(vcpu, spte, rmap_head);
-}
-
-
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_memslots *slots;
@@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
/*
- * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
- * context of a vCPU so have to determine which memslots to use based
- * on context information in sp->role.
+ * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
+ * so we have to determine which memslots to use based on context
+ * information in sp->role.
*/
slots = kvm_memslots_for_spte_role(kvm, sp->role);
@@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
#define RMAP_RECYCLE_THRESHOLD 1000
-static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
struct kvm_memory_slot *slot;
- struct kvm_rmap_head *rmap_head;
struct kvm_mmu_page *sp;
+ struct kvm_rmap_head *rmap_head;
+ int rmap_count;
sp = sptep_to_sp(spte);
+ kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
+ rmap_count = pte_list_add(vcpu, spte, rmap_head);
- kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
- kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
- KVM_PAGES_PER_HPAGE(sp->role.level));
+ if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
+ kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
+ kvm_flush_remote_tlbs_with_address(
+ vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+ }
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -2718,7 +2709,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
bool host_writable)
{
int was_rmapped = 0;
- int rmap_count;
int set_spte_ret;
int ret = RET_PF_FIXED;
bool flush = false;
@@ -2778,9 +2768,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!was_rmapped) {
kvm_update_page_stats(vcpu->kvm, level, 1);
- rmap_count = rmap_add(vcpu, sptep, gfn);
- if (rmap_count > RMAP_RECYCLE_THRESHOLD)
- rmap_recycle(vcpu, sptep, gfn);
+ rmap_add(vcpu, sptep, gfn);
}
return ret;
--
2.35.1
Powered by blists - more mailing lists