[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221024113445.1022147-2-wei.w.wang@intel.com>
Date: Mon, 24 Oct 2022 19:34:28 +0800
From: Wei Wang <wei.w.wang@...el.com>
To: seanjc@...gle.com, pbonzini@...hat.com
Cc: dmatlack@...gle.com, vipinsh@...gle.com, ajones@...tanamicro.com,
eric.auger@...hat.com, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Wei Wang <wei.w.wang@...el.com>
Subject: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm
Each vcpu has an id associated with it and is intrinsically faster
and easier to be referenced by indexing into an array with "vcpu->id",
compared to using a list of vcpus in the current implementation. Change
the vcpu list to an array of vcpu pointers. Users then don't need to
allocate such a vcpu array on their own, and instead, they can reuse
the one maintained in kvm_vm.
Signed-off-by: Wei Wang <wei.w.wang@...el.com>
---
.../testing/selftests/kvm/include/kvm_util.h | 4 +++
.../selftests/kvm/include/kvm_util_base.h | 3 +-
tools/testing/selftests/kvm/lib/kvm_util.c | 34 ++++++-------------
tools/testing/selftests/kvm/lib/x86_64/vmx.c | 2 +-
4 files changed, 17 insertions(+), 26 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index c9286811a4cb..5d5c8968fb06 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -10,4 +10,8 @@
#include "kvm_util_base.h"
#include "ucall_common.h"
+#define vm_iterate_over_vcpus(vm, vcpu, i) \
+ for (i = 0, vcpu = vm->vcpus[0]; \
+ vcpu && i < KVM_MAX_VCPUS; vcpu = vm->vcpus[++i])
+
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index e42a09cd24a0..c90a9609b853 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -45,7 +45,6 @@ struct userspace_mem_region {
};
struct kvm_vcpu {
- struct list_head list;
uint32_t id;
int fd;
struct kvm_vm *vm;
@@ -75,7 +74,6 @@ struct kvm_vm {
unsigned int pa_bits;
unsigned int va_bits;
uint64_t max_gfn;
- struct list_head vcpus;
struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
@@ -92,6 +90,7 @@ struct kvm_vm {
int stats_fd;
struct kvm_stats_header stats_header;
struct kvm_stats_desc *stats_desc;
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index f1cb1627161f..941f6c3ea9dc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -195,7 +195,6 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
- INIT_LIST_HEAD(&vm->vcpus);
vm->regions.gpa_tree = RB_ROOT;
vm->regions.hva_tree = RB_ROOT;
hash_init(vm->regions.slot_hash);
@@ -534,6 +533,10 @@ __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
int ret;
+ uint32_t vcpu_id = vcpu->id;
+
+ TEST_ASSERT(!!vm->vcpus[vcpu_id], "vCPU%d wasn't added\n", vcpu_id);
+ vm->vcpus[vcpu_id] = NULL;
if (vcpu->dirty_gfns) {
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
@@ -547,18 +550,16 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
ret = close(vcpu->fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
- list_del(&vcpu->list);
-
vcpu_arch_free(vcpu);
free(vcpu);
}
void kvm_vm_release(struct kvm_vm *vmp)
{
- struct kvm_vcpu *vcpu, *tmp;
- int ret;
+ struct kvm_vcpu *vcpu;
+ int i, ret;
- list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
+ vm_iterate_over_vcpus(vmp, vcpu, i)
vm_vcpu_rm(vmp, vcpu);
ret = close(vmp->fd);
@@ -1085,18 +1086,6 @@ static int vcpu_mmap_sz(void)
return ret;
}
-static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
-{
- struct kvm_vcpu *vcpu;
-
- list_for_each_entry(vcpu, &vm->vcpus, list) {
- if (vcpu->id == vcpu_id)
- return true;
- }
-
- return false;
-}
-
/*
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
@@ -1106,7 +1095,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */
- TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
+ TEST_ASSERT(!vm->vcpus[vcpu_id], "vCPU%d already exists\n", vcpu_id);
/* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu));
@@ -1125,8 +1114,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT(vcpu->run != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
- /* Add to linked-list of VCPUs. */
- list_add(&vcpu->list, &vm->vcpus);
+ vm->vcpus[vcpu_id] = vcpu;
return vcpu;
}
@@ -1684,7 +1672,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
*/
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
- int ctr;
+ int i, ctr;
struct userspace_mem_region *region;
struct kvm_vcpu *vcpu;
@@ -1712,7 +1700,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
fprintf(stream, "%*sVCPUs:\n", indent, "");
- list_for_each_entry(vcpu, &vm->vcpus, list)
+ vm_iterate_over_vcpus(vm, vcpu, i)
vcpu_dump(stream, vcpu, indent + 2);
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index d21049c38fc5..77812dd03647 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -549,7 +549,7 @@ bool kvm_vm_has_ept(struct kvm_vm *vm)
struct kvm_vcpu *vcpu;
uint64_t ctrl;
- vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
+ vcpu = vm->vcpus[0];
TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
--
2.27.0
Powered by blists - more mailing lists