[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260113003016.3511895-11-jmattson@google.com>
Date: Mon, 12 Jan 2026 16:30:05 -0800
From: Jim Mattson <jmattson@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, Shuah Khan <shuah@...nel.org>, Joerg Roedel <joro@...tes.org>,
Avi Kivity <avi@...hat.com>, Alexander Graf <agraf@...e.de>,
"Radim Krčmář" <rkrcmar@...hat.com>, David Hildenbrand <david@...nel.org>, Cathy Avery <cavery@...hat.com>,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: Jim Mattson <jmattson@...gle.com>
Subject: [PATCH 10/10] KVM: selftests: nSVM: Add svm_nested_pat test
Verify KVM's virtualization of the PAT MSR and--when nested NPT is
enabled--the VMCB12 g_pat field and the guest PAT register.
Signed-off-by: Jim Mattson <jmattson@...gle.com>
---
tools/testing/selftests/kvm/Makefile.kvm | 1 +
.../selftests/kvm/x86/svm_nested_pat_test.c | 357 ++++++++++++++++++
2 files changed, 358 insertions(+)
create mode 100644 tools/testing/selftests/kvm/x86/svm_nested_pat_test.c
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index 33ff81606638..27f8087eafec 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -109,6 +109,7 @@ TEST_GEN_PROGS_x86 += x86/state_test
TEST_GEN_PROGS_x86 += x86/vmx_preemption_timer_test
TEST_GEN_PROGS_x86 += x86/svm_vmcall_test
TEST_GEN_PROGS_x86 += x86/svm_int_ctl_test
+TEST_GEN_PROGS_x86 += x86/svm_nested_pat_test
TEST_GEN_PROGS_x86 += x86/svm_nested_shutdown_test
TEST_GEN_PROGS_x86 += x86/svm_nested_soft_inject_test
TEST_GEN_PROGS_x86 += x86/tsc_scaling_sync
diff --git a/tools/testing/selftests/kvm/x86/svm_nested_pat_test.c b/tools/testing/selftests/kvm/x86/svm_nested_pat_test.c
new file mode 100644
index 000000000000..fa016e65dbf6
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/svm_nested_pat_test.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM nested SVM PAT test
+ *
+ * Copyright (C) 2026, Google LLC.
+ *
+ * Test that KVM correctly virtualizes the PAT MSR and VMCB g_pat field
+ * for nested SVM guests:
+ *
+ * o With nested NPT disabled:
+ * - L1 and L2 share the same PAT
+ * - The vmcb12.g_pat is ignored
+ * o With nested NPT enabled:
+ * - Invalid g_pat in vmcb12 should cause VMEXIT_INVALID
+ * - L2 should see vmcb12.g_pat via RDMSR, not L1's PAT
+ * - L2's writes to PAT should be saved to vmcb12 on exit
+ * - L1's PAT should be restored after #VMEXIT from L2
+ * - State save/restore should preserve both L1's and L2's PAT values
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+#define L2_GUEST_STACK_SIZE 256
+
+#define PAT_DEFAULT 0x0007040600070406ULL
+#define L1_PAT_VALUE 0x0007040600070404ULL /* Change PA0 to WT */
+#define L2_VMCB12_PAT 0x0606060606060606ULL /* All WB */
+#define L2_PAT_MODIFIED 0x0606060606060604ULL /* Change PA0 to WT */
+#define INVALID_PAT_VALUE 0x0808080808080808ULL /* 8 is reserved */
+
+/*
+ * Shared state between L1 and L2 for verification.
+ */
+struct pat_test_data {
+ uint64_t l2_pat_read;
+ uint64_t l2_pat_after_write;
+ uint64_t l1_pat_after_vmexit;
+ uint64_t vmcb12_gpat_after_exit;
+ bool l2_done;
+};
+
+static struct pat_test_data *pat_data;
+
+static void l2_guest_code_npt_disabled(void)
+{
+ pat_data->l2_pat_read = rdmsr(MSR_IA32_CR_PAT);
+ wrmsr(MSR_IA32_CR_PAT, L2_PAT_MODIFIED);
+ pat_data->l2_pat_after_write = rdmsr(MSR_IA32_CR_PAT);
+ pat_data->l2_done = true;
+ vmmcall();
+}
+
+static void l2_guest_code_npt_enabled(void)
+{
+ pat_data->l2_pat_read = rdmsr(MSR_IA32_CR_PAT);
+ wrmsr(MSR_IA32_CR_PAT, L2_PAT_MODIFIED);
+ pat_data->l2_pat_after_write = rdmsr(MSR_IA32_CR_PAT);
+ pat_data->l2_done = true;
+ vmmcall();
+}
+
+static void l2_guest_code_saverestoretest(void)
+{
+ pat_data->l2_pat_read = rdmsr(MSR_IA32_CR_PAT);
+
+ GUEST_SYNC(1);
+ GUEST_ASSERT_EQ(rdmsr(MSR_IA32_CR_PAT), pat_data->l2_pat_read);
+
+ wrmsr(MSR_IA32_CR_PAT, L2_PAT_MODIFIED);
+ pat_data->l2_pat_after_write = rdmsr(MSR_IA32_CR_PAT);
+
+ GUEST_SYNC(2);
+ GUEST_ASSERT_EQ(rdmsr(MSR_IA32_CR_PAT), L2_PAT_MODIFIED);
+
+ pat_data->l2_done = true;
+ vmmcall();
+}
+
+static void l1_svm_code_npt_disabled(struct svm_test_data *svm,
+ struct pat_test_data *data)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ pat_data = data;
+
+ wrmsr(MSR_IA32_CR_PAT, L1_PAT_VALUE);
+ GUEST_ASSERT_EQ(rdmsr(MSR_IA32_CR_PAT), L1_PAT_VALUE);
+
+ generic_svm_setup(svm, l2_guest_code_npt_disabled,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->save.g_pat = L2_VMCB12_PAT;
+
+ vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
+ GUEST_ASSERT(data->l2_done);
+
+ GUEST_ASSERT_EQ(data->l2_pat_read, L1_PAT_VALUE);
+
+ GUEST_ASSERT_EQ(data->l2_pat_after_write, L2_PAT_MODIFIED);
+
+ data->l1_pat_after_vmexit = rdmsr(MSR_IA32_CR_PAT);
+ GUEST_ASSERT_EQ(data->l1_pat_after_vmexit, L2_PAT_MODIFIED);
+
+ GUEST_DONE();
+}
+
+static void l1_svm_code_invalid_gpat(struct svm_test_data *svm,
+ struct pat_test_data *data)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ pat_data = data;
+
+ generic_svm_setup(svm, l2_guest_code_npt_enabled,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->save.g_pat = INVALID_PAT_VALUE;
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_ERR);
+
+ GUEST_ASSERT(!data->l2_done);
+
+ GUEST_DONE();
+}
+
+static void l1_svm_code_npt_enabled(struct svm_test_data *svm,
+ struct pat_test_data *data)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+ uint64_t l1_pat_before;
+
+ pat_data = data;
+
+ wrmsr(MSR_IA32_CR_PAT, L1_PAT_VALUE);
+ l1_pat_before = rdmsr(MSR_IA32_CR_PAT);
+ GUEST_ASSERT_EQ(l1_pat_before, L1_PAT_VALUE);
+
+ generic_svm_setup(svm, l2_guest_code_npt_enabled,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->save.g_pat = L2_VMCB12_PAT;
+
+ vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
+ GUEST_ASSERT(data->l2_done);
+
+ GUEST_ASSERT_EQ(data->l2_pat_read, L2_VMCB12_PAT);
+
+ GUEST_ASSERT_EQ(data->l2_pat_after_write, L2_PAT_MODIFIED);
+
+ data->vmcb12_gpat_after_exit = vmcb->save.g_pat;
+ GUEST_ASSERT_EQ(data->vmcb12_gpat_after_exit, L2_PAT_MODIFIED);
+
+ data->l1_pat_after_vmexit = rdmsr(MSR_IA32_CR_PAT);
+ GUEST_ASSERT_EQ(data->l1_pat_after_vmexit, L1_PAT_VALUE);
+
+ GUEST_DONE();
+}
+
+static void l1_svm_code_saverestore(struct svm_test_data *svm,
+ struct pat_test_data *data)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ pat_data = data;
+
+ wrmsr(MSR_IA32_CR_PAT, L1_PAT_VALUE);
+
+ generic_svm_setup(svm, l2_guest_code_saverestoretest,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->save.g_pat = L2_VMCB12_PAT;
+ vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
+ GUEST_ASSERT(data->l2_done);
+
+ GUEST_ASSERT_EQ(rdmsr(MSR_IA32_CR_PAT), L1_PAT_VALUE);
+
+ GUEST_ASSERT_EQ(vmcb->save.g_pat, L2_PAT_MODIFIED);
+
+ GUEST_DONE();
+}
+
+/*
+ * L2 guest code for multiple VM-entry test.
+ * On first VM-entry, read and modify PAT, then VM-exit.
+ * On second VM-entry, verify we see our modified PAT from first VM-entry.
+ */
+static void l2_guest_code_multi_vmentry(void)
+{
+ pat_data->l2_pat_read = rdmsr(MSR_IA32_CR_PAT);
+ wrmsr(MSR_IA32_CR_PAT, L2_PAT_MODIFIED);
+ pat_data->l2_pat_after_write = rdmsr(MSR_IA32_CR_PAT);
+ vmmcall();
+
+ pat_data->l2_pat_read = rdmsr(MSR_IA32_CR_PAT);
+ pat_data->l2_done = true;
+ vmmcall();
+}
+
+static void l1_svm_code_multi_vmentry(struct svm_test_data *svm,
+ struct pat_test_data *data)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ pat_data = data;
+
+ wrmsr(MSR_IA32_CR_PAT, L1_PAT_VALUE);
+
+ generic_svm_setup(svm, l2_guest_code_multi_vmentry,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->save.g_pat = L2_VMCB12_PAT;
+ vmcb->control.intercept &= ~(1ULL << INTERCEPT_MSR_PROT);
+
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
+
+ GUEST_ASSERT_EQ(data->l2_pat_after_write, L2_PAT_MODIFIED);
+
+ GUEST_ASSERT_EQ(vmcb->save.g_pat, L2_PAT_MODIFIED);
+
+ GUEST_ASSERT_EQ(rdmsr(MSR_IA32_CR_PAT), L1_PAT_VALUE);
+
+ vmcb->save.rip += 3; /* vmmcall */
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ GUEST_ASSERT_EQ(vmcb->control.exit_code, SVM_EXIT_VMMCALL);
+ GUEST_ASSERT(data->l2_done);
+
+ GUEST_ASSERT_EQ(data->l2_pat_read, L2_PAT_MODIFIED);
+
+ GUEST_ASSERT_EQ(rdmsr(MSR_IA32_CR_PAT), L1_PAT_VALUE);
+
+ GUEST_DONE();
+}
+
+static void l1_guest_code(struct svm_test_data *svm, struct pat_test_data *data,
+ int test_num)
+{
+ switch (test_num) {
+ case 0:
+ l1_svm_code_npt_disabled(svm, data);
+ break;
+ case 1:
+ l1_svm_code_invalid_gpat(svm, data);
+ break;
+ case 2:
+ l1_svm_code_npt_enabled(svm, data);
+ break;
+ case 3:
+ l1_svm_code_saverestore(svm, data);
+ break;
+ case 4:
+ l1_svm_code_multi_vmentry(svm, data);
+ break;
+ }
+}
+
+static void run_test(int test_number, const char *test_name, bool npt_enabled,
+ bool do_save_restore)
+{
+ struct pat_test_data *data_hva;
+ vm_vaddr_t svm_gva, data_gva;
+ struct kvm_x86_state *state;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ pr_info("Testing: %d: %s\n", test_number, test_name);
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ if (npt_enabled)
+ vm_enable_npt(vm);
+
+ vcpu_alloc_svm(vm, &svm_gva);
+
+ data_gva = vm_vaddr_alloc_page(vm);
+ data_hva = addr_gva2hva(vm, data_gva);
+ memset(data_hva, 0, sizeof(*data_hva));
+
+ if (npt_enabled)
+ tdp_identity_map_default_memslots(vm);
+
+ vcpu_args_set(vcpu, 3, svm_gva, data_gva, test_number);
+
+ for (;;) {
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ if (do_save_restore) {
+ pr_info(" Save/restore at sync point %ld\n",
+ uc.args[1]);
+ state = vcpu_save_state(vcpu);
+ kvm_vm_release(vm);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ kvm_x86_state_cleanup(state);
+ }
+ break;
+ case UCALL_DONE:
+ pr_info(" PASSED\n");
+ kvm_vm_free(vm);
+ return;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_NPT));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
+
+ run_test(0, "nested NPT disabled", false, false);
+
+ run_test(1, "invalid g_pat", true, false);
+
+ run_test(2, "nested NPT enabled", true, false);
+
+ run_test(3, "save/restore", true, true);
+
+ run_test(4, "multiple entries", true, false);
+
+ return 0;
+}
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists