[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <61d53c8121b5b906bbed363e493ac75aebbda693.1621191552.git.maciej.szmigiero@oracle.com>
Date: Sun, 16 May 2021 23:44:34 +0200
From: "Maciej S. Szmigiero" <mail@...iej.szmigiero.name>
To: Paolo Bonzini <pbonzini@...hat.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Igor Mammedov <imammedo@...hat.com>,
Marc Zyngier <maz@...nel.org>,
James Morse <james.morse@....com>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Huacai Chen <chenhuacai@...nel.org>,
Aleksandar Markovic <aleksandar.qemu.devel@...il.com>,
Paul Mackerras <paulus@...abs.org>,
Christian Borntraeger <borntraeger@...ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>,
David Hildenbrand <david@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Claudio Imbrenda <imbrenda@...ux.ibm.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 8/8] KVM: Optimize overlapping memslots check
From: "Maciej S. Szmigiero" <maciej.szmigiero@...cle.com>
Do a quick lookup for possibly overlapping gfns when creating or moving
a memslot instead of performing a linear scan of the whole memslot set.
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@...cle.com>
---
virt/kvm/kvm_main.c | 65 ++++++++++++++++++++++++++++++++++++++-------
1 file changed, 56 insertions(+), 9 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 189504b27ca6..11814730cbcb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1474,6 +1474,59 @@ static int kvm_delete_memslot(struct kvm *kvm,
return kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
}
+static bool kvm_check_memslot_overlap(struct kvm_memslots *slots,
+ struct kvm_memory_slot *nslot)
+{
+ int idxactive = kvm_memslots_idx(slots);
+ struct rb_node *node;
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the new slot, so we'll ideally have slot start <= nslot start
+ */
+ node = kvm_memslots_gfn_upper_bound(slots, nslot->base_gfn);
+ if (node) {
+ struct rb_node *pnode;
+
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > nslot start.
+ */
+ pnode = rb_prev(node);
+ if (pnode)
+ node = pnode;
+ } else {
+ /* a NULL node below means no existing slots */
+ node = rb_last(&slots->gfn_tree);
+ }
+
+ for ( ; node; node = rb_next(node)) {
+ struct kvm_memory_slot *cslot;
+
+ cslot = container_of(node, struct kvm_memory_slot,
+ gfn_node[idxactive]);
+
+ /*
+ * if this slot starts beyond or at the end of the new slot
+ * so does every next one
+ */
+ if (cslot->base_gfn >= nslot->base_gfn + nslot->npages)
+ break;
+
+ if (cslot->id == nslot->id)
+ continue;
+
+ if (cslot->base_gfn >= nslot->base_gfn)
+ return true;
+
+ if (cslot->base_gfn + cslot->npages > nslot->base_gfn)
+ return true;
+ }
+
+ return false;
+}
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -1559,16 +1612,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
- int ctr;
-
/* Check for overlaps */
- kvm_for_each_memslot(tmp, ctr, __kvm_memslots(kvm, as_id)) {
- if (tmp->id == id)
- continue;
- if (!((new.base_gfn + new.npages <= tmp->base_gfn) ||
- (new.base_gfn >= tmp->base_gfn + tmp->npages)))
- return -EEXIST;
- }
+ if (kvm_check_memslot_overlap(__kvm_memslots(kvm, as_id),
+ &new))
+ return -EEXIST;
}
/* Allocate/free page dirty bitmap as needed */
Powered by blists - more mailing lists