lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221024113445.1022147-18-wei.w.wang@intel.com>
Date:   Mon, 24 Oct 2022 19:34:44 +0800
From:   Wei Wang <wei.w.wang@...el.com>
To:     seanjc@...gle.com, pbonzini@...hat.com
Cc:     dmatlack@...gle.com, vipinsh@...gle.com, ajones@...tanamicro.com,
        eric.auger@...hat.com, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org, Wei Wang <wei.w.wang@...el.com>
Subject: [PATCH v1 17/18] KVM: selftests: remove the *vcpu[] input from __vm_create_with_vcpus

kvm_vm has included an array of vcpu pointers (i.e. *vcpu[]) to the
added vcpus, so there is no need for users to supply its own *vcpu[].
Remove the *vcpu[] from __vm_create_with_vcpus and the related callers.

Signed-off-by: Wei Wang <wei.w.wang@...el.com>
---
 tools/testing/selftests/kvm/aarch64/arch_timer.c      | 2 +-
 tools/testing/selftests/kvm/aarch64/vgic_init.c       | 4 ++--
 tools/testing/selftests/kvm/hardware_disable_test.c   | 2 +-
 tools/testing/selftests/kvm/include/kvm_util_base.h   | 7 +++----
 tools/testing/selftests/kvm/kvm_page_table_test.c     | 2 +-
 tools/testing/selftests/kvm/lib/kvm_util.c            | 4 ++--
 tools/testing/selftests/kvm/lib/perf_test_util.c      | 2 +-
 tools/testing/selftests/kvm/max_guest_memory_test.c   | 2 +-
 tools/testing/selftests/kvm/steal_time.c              | 2 +-
 tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c | 2 +-
 10 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index 7c1057e8fca7..1373e41ef365 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -357,7 +357,7 @@ static struct kvm_vm *test_vm_create(void)
 	struct kvm_vcpu *vcpu;
 	int i, nr_vcpus = test_args.nr_vcpus;
 
-	vm = vm_create_with_vcpus(nr_vcpus, guest_code, NULL);
+	vm = vm_create_with_vcpus(nr_vcpus, guest_code);
 
 	vm_init_descriptor_tables(vm);
 	vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
index e24130a49581..b5defd94dd2e 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -79,7 +79,7 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
 	struct vm_gic v;
 
 	v.gic_dev_type = gic_dev_type;
-	v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, NULL);
+	v.vm = vm_create_with_vcpus(nr_vcpus, guest_code);
 	v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
 
 	return v;
@@ -641,7 +641,7 @@ int test_kvm_device(uint32_t gic_dev_type)
 	uint32_t other;
 	int ret;
 
-	v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, NULL);
+	v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code);
 
 	/* try to create a non existing KVM device */
 	ret = __kvm_test_create_device(v.vm, 0);
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index c212d34a6714..f16e07485380 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -76,7 +76,7 @@ static void run_test(uint32_t run)
 	r = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpu_set);
 	TEST_ASSERT(!r, "%s: failed to set affinity, r = %d", __func__, r);
 
-	vm = vm_create_with_vcpus(VCPU_NUM, guest_code, NULL);
+	vm = vm_create_with_vcpus(VCPU_NUM, guest_code);
 
 	pr_debug("%s: [%d] start vcpus\n", __func__, run);
 	vm_iterate_over_vcpus(vm, vcpu, i) {
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index d0d6aaec0098..5a5b7210cf7c 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -663,14 +663,13 @@ static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
 
 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
 				      uint64_t extra_mem_pages,
-				      void *guest_code, struct kvm_vcpu *vcpus[]);
+				      void *guest_code);
 
 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
-						  void *guest_code,
-						  struct kvm_vcpu *vcpus[])
+						  void *guest_code)
 {
 	return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
-				      guest_code, vcpus);
+				      guest_code);
 }
 
 /*
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 4c3df48d80fc..1a9dd189c225 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -253,7 +253,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 	/* Create a VM with enough guest pages */
 	guest_num_pages = test_mem_size / guest_page_size;
 	vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
-				    guest_code, NULL);
+				    guest_code);
 
 	/* Align down GPA of the testing memslot */
 	if (!p->phys_offset)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index ba3e774087fb..69dad4fa9ca1 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -366,7 +366,7 @@ struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
  */
 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
 				      uint64_t extra_mem_pages,
-				      void *guest_code, struct kvm_vcpu *vcpus[])
+				      void *guest_code)
 {
 	struct kvm_vm *vm;
 	int i;
@@ -386,7 +386,7 @@ struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 	struct kvm_vm *vm;
 
 	vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
-				    guest_code, NULL);
+				    guest_code);
 
 	*vcpu = vm->vcpus[0];
 	return vm;
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 94c0f496c9c1..3103c9f40e76 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -140,7 +140,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
 	 * effect as KVM allows aliasing HVAs in meslots.
 	 */
 	vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
-				    perf_test_guest_code, NULL);
+				    perf_test_guest_code);
 
 	pta->vm = vm;
 
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 2d9c83e36e65..7480730caeeb 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -205,7 +205,7 @@ int main(int argc, char *argv[])
 		}
 	}
 
-	vm = vm_create_with_vcpus(nr_vcpus, guest_code, NULL);
+	vm = vm_create_with_vcpus(nr_vcpus, guest_code);
 
 	max_gpa = vm->max_gfn << vm->page_shift;
 	TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index 857ed2c073fc..530b08e64846 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -261,7 +261,7 @@ int main(int ac, char **av)
 	pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
 
 	/* Create a VM and an identity mapped memslot for the steal time structure */
-	vm = vm_create_with_vcpus(NR_VCPUS, guest_code, NULL);
+	vm = vm_create_with_vcpus(NR_VCPUS, guest_code);
 	vcpus = vm->vcpus;
 	gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
index 34a8beef42b6..3c050ffe5edb 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
@@ -91,7 +91,7 @@ int main(int argc, char *argv[])
 {
 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL));
 
-	vm = vm_create_with_vcpus(NR_TEST_VCPUS, guest_code, NULL);
+	vm = vm_create_with_vcpus(NR_TEST_VCPUS, guest_code);
 	vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
 
 	pthread_spin_init(&create_lock, PTHREAD_PROCESS_PRIVATE);
-- 
2.27.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ