[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <23a90397b5ece88a8297d4010d5f53acd17335ff.1770116051.git.isaku.yamahata@intel.com>
Date: Tue, 3 Feb 2026 10:17:13 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 30/32] KVM: selftests: Test cases for L1 APIC timer virtualization
From: Isaku Yamahata <isaku.yamahata@...el.com>
Test nVMX APIC timer virtualization for L1 to see how KVM in L0 works.
It exercises KVM TSC deadline conversion between L0 and L1.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
tools/testing/selftests/kvm/Makefile.kvm | 1 +
.../selftests/kvm/include/x86/processor.h | 6 +
.../kvm/x86/vmx_apic_timer_virt_test.c | 317 ++++++++++++++++++
3 files changed, 324 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86/vmx_apic_timer_virt_test.c
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index df126774f028..aec47a608b87 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -140,6 +140,7 @@ TEST_GEN_PROGS_x86 += x86/triple_fault_event_test
TEST_GEN_PROGS_x86 += x86/recalc_apic_map_test
TEST_GEN_PROGS_x86 += x86/aperfmperf_test
TEST_GEN_PROGS_x86 += x86/timer_latency
+TEST_GEN_PROGS_x86 += x86/vmx_apic_timer_virt_test
TEST_GEN_PROGS_x86 += x86/vmx_apic_timer_virt_vmcs_test
TEST_GEN_PROGS_x86 += access_tracking_perf_test
TEST_GEN_PROGS_x86 += coalesced_io_test
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index 57d62a425109..b6c33bc34ed6 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -1430,6 +1430,12 @@ static inline void cli(void)
asm volatile ("cli");
}
+static inline void serialize(void)
+{
+ /* serialize instruction. binuutils >= 2.35 */
+ kvm_asm_safe(".byte 0x0f, 0x01, 0xe8");
+}
+
void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
#define vm_xsave_require_permission(xfeature) \
diff --git a/tools/testing/selftests/kvm/x86/vmx_apic_timer_virt_test.c b/tools/testing/selftests/kvm/x86/vmx_apic_timer_virt_test.c
new file mode 100644
index 000000000000..ea465e9825d8
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/vmx_apic_timer_virt_test.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Intel Corporation.
+ *
+ * Test timer expiration conversion and exercise various LVTT mode.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+#include <stdatomic.h>
+
+#include <linux/math64.h>
+
+static uint64_t host_tsc_khz;
+static uint64_t max_guest_tsc_khz;
+
+/* Any value [32, 255] for timer vector is okay. */
+#define TIMER_VECTOR 0xec
+
+static atomic_int timer_interrupted;
+
+static void guest_timer_interrupt_handler(struct ex_regs *regs)
+{
+ atomic_fetch_add(&timer_interrupted, 1);
+ x2apic_write_reg(APIC_EOI, 0);
+}
+
+static void reap_interrupt(void)
+{
+ GUEST_ASSERT(!wrmsr_safe(MSR_IA32_TSC_DEADLINE, 0));
+ sti_nop_cli();
+}
+
+static void deadline_write_test(bool do_interrupt, bool mask,
+ uint64_t deadlines[], size_t nr_deadlines)
+{
+ int i;
+
+ for (i = 0; i < nr_deadlines; i++) {
+ uint64_t deadline = deadlines[i];
+ uint64_t val;
+
+ reap_interrupt();
+
+ atomic_store(&timer_interrupted, 0);
+ sti();
+ GUEST_ASSERT(!wrmsr_safe(MSR_IA32_TSC_DEADLINE, deadline));
+ /* serialize to wait for timer interrupt to fire. */
+ serialize();
+ cli();
+
+ GUEST_ASSERT(!rdmsr_safe(MSR_IA32_TSC_DEADLINE, &val));
+
+ if (do_interrupt) {
+ GUEST_ASSERT(val == 0);
+ if (mask || deadline == 0)
+ GUEST_ASSERT(!atomic_load(&timer_interrupted));
+ else
+ GUEST_ASSERT(atomic_load(&timer_interrupted) == 1);
+ } else {
+ GUEST_ASSERT(val == deadline);
+ GUEST_ASSERT(!atomic_load(&timer_interrupted));
+ }
+ }
+}
+
+static void deadline_write_hlt_test(uint64_t deadlines[], size_t nr_deadlines)
+{
+ int i;
+
+ for (i = 0; i < nr_deadlines; i++) {
+ uint64_t deadline = deadlines[i];
+ uint64_t val;
+
+ reap_interrupt();
+
+ GUEST_ASSERT(deadline);
+
+ atomic_store(&timer_interrupted, 0);
+ GUEST_ASSERT(!wrmsr_safe(MSR_IA32_TSC_DEADLINE, deadline));
+
+ GUEST_ASSERT(!rdmsr_safe(MSR_IA32_TSC_DEADLINE, &val));
+ GUEST_ASSERT(val == deadline || val == 0);
+ GUEST_ASSERT(!atomic_load(&timer_interrupted));
+
+ asm volatile ("sti; hlt; nop; cli"
+ /* L1 exit handler doesn't preserve GP registers. */
+ : : : "cc", "memory",
+ "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14",
+ "r15");
+
+ GUEST_ASSERT(!rdmsr_safe(MSR_IA32_TSC_DEADLINE, &val));
+ GUEST_ASSERT(val == 0);
+ GUEST_ASSERT(atomic_load(&timer_interrupted) == 1);
+ }
+}
+
+static void deadline_no_int_test(void)
+{
+ uint64_t tsc = rdtsc();
+ uint64_t deadlines[] = {
+ 0ull,
+ /* big values > tsc. */
+ max(~0ull - tsc, ~0ull / 2 + tsc / 2),
+ ~0ull - 1,
+ ~0ull - 2,
+ ~0ull,
+ };
+
+ deadline_write_test(false, false, deadlines, ARRAY_SIZE(deadlines));
+}
+
+static void __deadline_int_test(bool do_interrupt, bool mask)
+{
+ uint64_t tsc = rdtsc();
+ uint64_t deadlines[] = {
+ 0ull,
+ 1ull,
+ 2ull,
+ /* 1 msec past. tsc /2 is to avoid underflow. */
+ min(tsc - guest_tsc_khz, tsc / 2 + 1),
+ tsc,
+ };
+
+ deadline_write_test(do_interrupt, mask, deadlines, ARRAY_SIZE(deadlines));
+}
+
+static void deadline_int_test(void)
+{
+ __deadline_int_test(true, false);
+}
+
+static void deadline_int_mask_test(void)
+{
+ __deadline_int_test(true, true);
+}
+
+static void deadline_hlt_test(void)
+{
+ uint64_t tsc = rdtsc();
+ /* 1 msec future. */
+ uint64_t future = tsc + guest_tsc_khz;
+ uint64_t deadlines[] = {
+ 1ull,
+ 2ull,
+ /* pick a positive value between [0, tsc]. */
+ tsc > guest_tsc_khz ? tsc - guest_tsc_khz : tsc / 2 + 1,
+ tsc,
+ /* If overflow, pick near future value > tsc. */
+ future > tsc ? future : ~0ull / 2 + tsc / 2,
+ };
+
+ deadline_write_hlt_test(deadlines, ARRAY_SIZE(deadlines));
+}
+
+static void guest_code(void)
+{
+ x2apic_enable();
+
+ x2apic_write_reg(APIC_LVTT, APIC_LVT_TIMER_TSCDEADLINE | TIMER_VECTOR);
+ deadline_no_int_test();
+ deadline_int_test();
+ deadline_hlt_test();
+
+ x2apic_write_reg(APIC_LVTT, APIC_LVT_TIMER_TSCDEADLINE |
+ APIC_LVT_MASKED | TIMER_VECTOR);
+ deadline_no_int_test();
+ deadline_int_mask_test();
+
+ GUEST_DONE();
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu)
+{
+ bool done = false;
+
+ while (!done) {
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_PRINTF:
+ pr_info("%s", uc.buffer);
+ break;
+ case UCALL_DONE:
+ done = true;
+ break;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+}
+
+static int test_tsc_deadline(bool tsc_offset, uint64_t guest_tsc_khz__)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ if (guest_tsc_khz__) {
+ int ret;
+
+ ret = __vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *)guest_tsc_khz__);
+ if (ret) {
+ kvm_vm_free(vm);
+ return ret;
+ }
+
+ guest_tsc_khz = guest_tsc_khz__;
+ }
+
+ if (tsc_offset) {
+ uint64_t offset;
+
+ __TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL,
+ KVM_VCPU_TSC_OFFSET),
+ "KVM_VCPU_TSC_OFFSET not supported; skipping test");
+
+ /*
+ * Make the conversion guest deadline virt(L1) => phy (l0)
+ * can overflow/underflow.
+ */
+ offset = -rdtsc();
+ vcpu_device_attr_set(vcpu, KVM_VCPU_TSC_CTRL,
+ KVM_VCPU_TSC_OFFSET, &offset);
+ }
+
+ vcpu_set_cpuid_feature(vcpu, X86_FEATURE_TSC_DEADLINE_TIMER);
+ vm_install_exception_handler(vm, TIMER_VECTOR,
+ guest_timer_interrupt_handler);
+
+ sync_global_to_guest(vm, host_tsc_khz);
+ sync_global_to_guest(vm, guest_tsc_khz);
+ run_vcpu(vcpu);
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
+
+static void test(void)
+{
+ uint64_t guest_tsc_khz__;
+ int r;
+
+ test_tsc_deadline(false, 0);
+ test_tsc_deadline(true, 0);
+
+ for (guest_tsc_khz__ = host_tsc_khz; guest_tsc_khz__ > 0;
+ guest_tsc_khz__ >>= 1) {
+ r = test_tsc_deadline(false, guest_tsc_khz__);
+ if (r)
+ break;
+
+ test_tsc_deadline(true, guest_tsc_khz__);
+ }
+
+ for (guest_tsc_khz__ = host_tsc_khz; guest_tsc_khz__ < max_guest_tsc_khz;
+ guest_tsc_khz__ <<= 1) {
+ r = test_tsc_deadline(false, guest_tsc_khz__);
+ if (r)
+ break;
+
+ test_tsc_deadline(true, guest_tsc_khz__);
+ }
+
+ test_tsc_deadline(false, max_guest_tsc_khz);
+ test_tsc_deadline(true, max_guest_tsc_khz);
+}
+
+int main(int argc, char *argv[])
+{
+ uint32_t eax_denominator, ebx_numerator, ecx_hz, edx;
+ union vmx_ctrl_msr ctls;
+ uint64_t ctls3;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_X2APIC));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
+
+ cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+ TEST_REQUIRE(ebx_numerator > 0);
+ TEST_REQUIRE(eax_denominator > 0);
+
+ if (ecx_hz > 0)
+ host_tsc_khz = ecx_hz * ebx_numerator / eax_denominator / 1000;
+ else {
+ uint32_t eax_base_mhz, ebx, ecx;
+
+ cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx);
+ host_tsc_khz = eax_base_mhz * 1000 * ebx_numerator /
+ eax_denominator;
+ }
+ TEST_REQUIRE(host_tsc_khz > 0);
+
+ /* See arch/x86/kvm/{x86.c, vmx/vmx.c}. There is no way for userspace to retrieve it. */
+#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
+ max_guest_tsc_khz = min((uint64_t)0x7fffffffULL,
+ mul_u64_u32_shr(KVM_VMX_TSC_MULTIPLIER_MAX, host_tsc_khz, 48));
+
+ test();
+
+ return 0;
+}
--
2.45.2
Powered by blists - more mailing lists