lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210408114303.30310-5-eesposit@redhat.com>
Date:   Thu,  8 Apr 2021 13:43:03 +0200
From:   Emanuele Giuseppe Esposito <eesposit@...hat.com>
To:     kvm@...r.kernel.org
Cc:     Paolo Bonzini <pbonzini@...hat.com>,
        Jonathan Corbet <corbet@....net>,
        Sean Christopherson <seanjc@...gle.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Emanuele Giuseppe Esposito <eesposit@...hat.com>,
        Jim Mattson <jmattson@...gle.com>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        "H. Peter Anvin" <hpa@...or.com>, Shuah Khan <shuah@...nel.org>,
        Alexander Graf <graf@...zon.com>,
        Andrew Jones <drjones@...hat.com>, linux-doc@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-kselftest@...r.kernel.org
Subject: [PATCH v4 4/4] selftests: KVM: extend get_cpuid_test to include KVM_GET_EMULATED_CPUID

Extend the get_cpuid_test.c selftest to include the KVM_GET_EMULATED_CPUID
ioctl. Since the behavior and functionality is similar to
KVM_GET_SUPPORTED_CPUID, we only check additionally:

1) checks for corner case in the nent field of the struct kvm_cpuid2.
2) sets and gets it as cpuid from the guest VM

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@...hat.com>
---
 .../selftests/kvm/x86_64/get_cpuid_test.c     | 90 ++++++++++++++++++-
 1 file changed, 88 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
index 9b78e8889638..b9f0fba1b0ea 100644
--- a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c
@@ -13,6 +13,7 @@
 #include "processor.h"
 
 #define VCPU_ID 0
+#define MAX_NENT 1000
 
 /* CPUIDs known to differ */
 struct {
@@ -137,7 +138,8 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
 	}
 }
 
-struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
+static struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva,
+					struct kvm_cpuid2 *cpuid)
 {
 	int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
 	vm_vaddr_t gva = vm_vaddr_alloc(vm, size,
@@ -150,9 +152,84 @@ struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct
 	return guest_cpuids;
 }
 
+static struct kvm_cpuid2 *alloc_custom_kvm_cpuid2(int nent)
+{
+	struct kvm_cpuid2 *cpuid;
+	size_t size;
+
+	size = sizeof(*cpuid);
+	size += nent * sizeof(struct kvm_cpuid_entry2);
+	cpuid = calloc(1, size);
+	if (!cpuid) {
+		perror("malloc");
+		abort();
+	}
+
+	cpuid->nent = nent;
+
+	return cpuid;
+}
+
+static void clean_entries_kvm_cpuid2(struct kvm_cpuid2 *cpuid)
+{
+	size_t size;
+	int old_nent = cpuid->nent;
+
+	size = sizeof(*cpuid);
+	size += MAX_NENT * sizeof(struct kvm_cpuid_entry2);
+	memset(cpuid, 0, size);
+	cpuid->nent = old_nent;
+}
+
+static void test_emulated_entries(struct kvm_vm *vm)
+{
+	int res, right_nent;
+	struct kvm_cpuid2 *cpuid;
+
+	cpuid = alloc_custom_kvm_cpuid2(MAX_NENT);
+
+	/* 0 nent, return E2BIG */
+	cpuid->nent = 0;
+	res = _kvm_ioctl(vm, KVM_GET_EMULATED_CPUID, cpuid);
+	TEST_ASSERT(res == -1 && errno == E2BIG, "nent=0 should fail as E2BIG");
+	clean_entries_kvm_cpuid2(cpuid);
+
+	/* high nent, set the entries and adjust */
+	cpuid->nent = MAX_NENT;
+	res = _kvm_ioctl(vm, KVM_GET_EMULATED_CPUID, cpuid);
+	TEST_ASSERT(res == 0, "nent > actual nent should not fail");
+	right_nent = cpuid->nent;
+	clean_entries_kvm_cpuid2(cpuid);
+
+	/* high nent, set the entries and adjust */
+	cpuid->nent++;
+	res = _kvm_ioctl(vm, KVM_GET_EMULATED_CPUID, cpuid);
+	TEST_ASSERT(res == 0, "nent > actual nent should not fail");
+	TEST_ASSERT(right_nent == cpuid->nent, "nent should be always the same");
+	clean_entries_kvm_cpuid2(cpuid);
+
+	/* low nent, return E2BIG */
+	if (right_nent > 1) {
+		cpuid->nent = 1;
+		res = _kvm_ioctl(vm, KVM_GET_EMULATED_CPUID, cpuid);
+		TEST_ASSERT(res == -1 && errno == E2BIG, "nent=1 should fail");
+		clean_entries_kvm_cpuid2(cpuid);
+	}
+
+	/* exact nent */
+	cpuid->nent = right_nent;
+	res = _kvm_ioctl(vm, KVM_GET_EMULATED_CPUID, cpuid);
+	TEST_ASSERT(res == 0, "nent == actual nent should not fail");
+	TEST_ASSERT(cpuid->nent == right_nent,
+		"KVM_GET_EMULATED_CPUID should be invaried when nent is exact");
+	clean_entries_kvm_cpuid2(cpuid);
+
+	free(cpuid);
+}
+
 int main(void)
 {
-	struct kvm_cpuid2 *supp_cpuid, *cpuid2;
+	struct kvm_cpuid2 *supp_cpuid, *emul_cpuid, *cpuid2;
 	vm_vaddr_t cpuid_gva;
 	struct kvm_vm *vm;
 	int stage;
@@ -171,5 +248,14 @@ int main(void)
 	for (stage = 0; stage < 3; stage++)
 		run_vcpu(vm, VCPU_ID, stage);
 
+	if (kvm_check_cap(KVM_CAP_EXT_EMUL_CPUID)) {
+		emul_cpuid = kvm_get_emulated_cpuid();
+		vcpu_set_cpuid(vm, VCPU_ID, emul_cpuid);
+		cpuid2 = vcpu_get_cpuid(vm, VCPU_ID);
+
+		test_emulated_entries(vm);
+		compare_cpuids(emul_cpuid, cpuid2);
+	}
+
 	kvm_vm_free(vm);
 }
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ