[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220829171021.701198-2-pgonda@google.com>
Date: Mon, 29 Aug 2022 10:10:14 -0700
From: Peter Gonda <pgonda@...gle.com>
To: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, marcorr@...gle.com,
seanjc@...gle.com, michael.roth@....com, thomas.lendacky@....com,
joro@...tes.org, mizhang@...gle.com, pbonzini@...hat.com,
andrew.jones@...ux.dev, Peter Gonda <pgonda@...gle.com>
Subject: [V4 1/8] KVM: selftests: move vm_phy_pages_alloc() earlier in file
From: Michael Roth <michael.roth@....com>
Subsequent patches will break some of this code out into file-local
helper functions, which will be used by functions like vm_vaddr_alloc(),
which currently are defined earlier in the file, so a forward
declaration would be needed.
Instead, move it earlier in the file, just above vm_vaddr_alloc() and
and friends, which are the main users.
Reviewed-by: Mingwei Zhang <mizhang@...gle.com>
Reviewed-by: Andrew Jones <andrew.jones@...ux.dev>
Signed-off-by: Michael Roth <michael.roth@....com>
Signed-off-by: Peter Gonda <pgonda@...gle.com>
---
tools/testing/selftests/kvm/lib/kvm_util.c | 145 ++++++++++-----------
1 file changed, 72 insertions(+), 73 deletions(-)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 846f9f6c5a17..06559994711e 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1100,6 +1100,78 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
return vcpu;
}
+/*
+ * Physical Contiguous Page Allocator
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * num - number of pages
+ * paddr_min - Physical address minimum
+ * memslot - Memory region to allocate page from
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Starting physical address
+ *
+ * Within the VM specified by vm, locates a range of available physical
+ * pages at or above paddr_min. If found, the pages are marked as in use
+ * and their base address is returned. A TEST_ASSERT failure occurs if
+ * not enough pages are available at or above paddr_min.
+ */
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
+{
+ struct userspace_mem_region *region;
+ sparsebit_idx_t pg, base;
+
+ TEST_ASSERT(num > 0, "Must allocate at least one page");
+
+ TEST_ASSERT((paddr_min % vm->page_size) == 0,
+ "Min physical address not divisible by page size.\n paddr_min: 0x%lx page_size: 0x%x",
+ paddr_min, vm->page_size);
+
+ region = memslot2region(vm, memslot);
+ base = pg = paddr_min >> vm->page_shift;
+
+ do {
+ for (; pg < base + num; ++pg) {
+ if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
+ base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
+ break;
+ }
+ }
+ } while (pg && pg != base + num);
+
+ if (pg == 0) {
+ fprintf(stderr,
+ "No guest physical page available, paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
+ paddr_min, vm->page_size, memslot);
+ fputs("---- vm dump ----\n", stderr);
+ vm_dump(stderr, vm, 2);
+ abort();
+ }
+
+ for (pg = base; pg < base + num; ++pg)
+ sparsebit_clear(region->unused_phy_pages, pg);
+
+ return base * vm->page_size;
+}
+
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+ uint32_t memslot)
+{
+ return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+}
+
+/* Arbitrary minimum physical address used for virtual translation tables. */
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+{
+ return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+}
+
/*
* VM Virtual Address Unused Gap
*
@@ -1746,79 +1818,6 @@ const char *exit_reason_str(unsigned int exit_reason)
return "Unknown";
}
-/*
- * Physical Contiguous Page Allocator
- *
- * Input Args:
- * vm - Virtual Machine
- * num - number of pages
- * paddr_min - Physical address minimum
- * memslot - Memory region to allocate page from
- *
- * Output Args: None
- *
- * Return:
- * Starting physical address
- *
- * Within the VM specified by vm, locates a range of available physical
- * pages at or above paddr_min. If found, the pages are marked as in use
- * and their base address is returned. A TEST_ASSERT failure occurs if
- * not enough pages are available at or above paddr_min.
- */
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
-{
- struct userspace_mem_region *region;
- sparsebit_idx_t pg, base;
-
- TEST_ASSERT(num > 0, "Must allocate at least one page");
-
- TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
- "not divisible by page size.\n"
- " paddr_min: 0x%lx page_size: 0x%x",
- paddr_min, vm->page_size);
-
- region = memslot2region(vm, memslot);
- base = pg = paddr_min >> vm->page_shift;
-
- do {
- for (; pg < base + num; ++pg) {
- if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
- base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
- break;
- }
- }
- } while (pg && pg != base + num);
-
- if (pg == 0) {
- fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
- paddr_min, vm->page_size, memslot);
- fputs("---- vm dump ----\n", stderr);
- vm_dump(stderr, vm, 2);
- abort();
- }
-
- for (pg = base; pg < base + num; ++pg)
- sparsebit_clear(region->unused_phy_pages, pg);
-
- return base * vm->page_size;
-}
-
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
- uint32_t memslot)
-{
- return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
-}
-
-/* Arbitrary minimum physical address used for virtual translation tables. */
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
-{
- return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-}
-
/*
* Address Guest Virtual to Host Virtual
*
--
2.37.2.672.g94769d06f0-goog
Powered by blists - more mailing lists