[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221004123956.188909-46-vkuznets@redhat.com>
Date: Tue, 4 Oct 2022 14:39:55 +0200
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: kvm@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>
Cc: Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
Siddharth Chandrasekaran <sidcha@...zon.de>,
Yuan Yao <yuan.yao@...ux.intel.com>,
Maxim Levitsky <mlevitsk@...hat.com>,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v11 45/46] KVM: selftests: hyperv_svm_test: Introduce L2 TLB flush test
Enable Hyper-V L2 TLB flush and check that Hyper-V TLB flush hypercalls
from L2 don't exit to L1 unless 'TlbLockCount' is set in the Partition
assist page.
Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
---
.../selftests/kvm/include/x86_64/svm.h | 4 ++
.../selftests/kvm/x86_64/hyperv_svm_test.c | 61 +++++++++++++++++--
2 files changed, 60 insertions(+), 5 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm.h b/tools/testing/selftests/kvm/include/x86_64/svm.h
index 483e6ae12f69..4803e1056055 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm.h
@@ -76,6 +76,10 @@ struct hv_vmcb_enlightenments {
*/
#define HV_VMCB_NESTED_ENLIGHTENMENTS (1U << 31)
+/* Synthetic VM-Exit */
+#define HV_SVM_EXITCODE_ENL 0xf0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
+
struct __attribute__ ((__packed__)) vmcb_control_area {
u32 intercept_cr;
u32 intercept_dr;
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index 1c3fc38b4f15..edb779615a79 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -25,6 +25,8 @@
void l2_guest_code(void)
{
+ u64 unused;
+
GUEST_SYNC(3);
/* Exit to L1 */
vmmcall();
@@ -38,11 +40,30 @@ void l2_guest_code(void)
GUEST_SYNC(5);
+ /* L2 TLB flush tests */
+ hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+ HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS);
+ rdmsr(MSR_FS_BASE);
+ /*
+ * Note: hypercall status (RAX) is not preserved correctly by L1 after
+ * synthetic vmexit, use unchecked version.
+ */
+ __hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
+ HV_HYPERCALL_FAST_BIT, 0x0,
+ HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
+ HV_FLUSH_ALL_PROCESSORS, &unused);
+ /* Make sure we're not issuing Hyper-V TLB flush call again */
+ __asm__ __volatile__ ("mov $0xdeadbeef, %rcx");
+
/* Done, exit to L1 and never come back. */
vmmcall();
}
-static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
+static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm,
+ struct hyperv_test_pages *hv_pages,
+ vm_vaddr_t pgs_gpa)
{
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
struct vmcb *vmcb = svm->vmcb;
@@ -50,13 +71,23 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
GUEST_SYNC(1);
- wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+ wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
+ enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
GUEST_ASSERT(svm->vmcb_gpa);
/* Prepare for L2 execution. */
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ /* L2 TLB flush setup */
+ hve->partition_assist_page = hv_pages->partition_assist_gpa;
+ hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+ hve->hv_vm_id = 1;
+ hve->hv_vp_id = 1;
+ current_vp_assist->nested_control.features.directhypercall = 1;
+ *(u32 *)(hv_pages->partition_assist) = 0;
+
GUEST_SYNC(2);
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
@@ -91,6 +122,20 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
vmcb->save.rip += 2; /* rdmsr */
+
+ /*
+ * L2 TLB flush test. First VMCALL should be handled directly by L0,
+ * no VMCALL exit expected.
+ */
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR);
+ vmcb->save.rip += 2; /* rdmsr */
+ /* Enable synthetic vmexit */
+ *(u32 *)(hv_pages->partition_assist) = 1;
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL);
+ GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH);
+
run_guest(vmcb, svm->vmcb_gpa);
GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
GUEST_SYNC(6);
@@ -100,8 +145,8 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
- vm_vaddr_t nested_gva = 0;
-
+ vm_vaddr_t nested_gva = 0, hv_pages_gva = 0;
+ vm_vaddr_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
@@ -115,7 +160,13 @@ int main(int argc, char *argv[])
vcpu_set_hv_cpuid(vcpu);
run = vcpu->run;
vcpu_alloc_svm(vm, &nested_gva);
- vcpu_args_set(vcpu, 1, nested_gva);
+ vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
+
+ hcall_page = vm_vaddr_alloc_pages(vm, 1);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize());
+
+ vcpu_args_set(vcpu, 3, nested_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
+ vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
for (stage = 1;; stage++) {
vcpu_run(vcpu);
--
2.37.3
Powered by blists - more mailing lists