[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251118171113.363528-4-griffoul@gmail.org>
Date: Tue, 18 Nov 2025 17:11:06 +0000
From: griffoul@...il.com
To: kvm@...r.kernel.org
Cc: seanjc@...gle.com,
pbonzini@...hat.com,
vkuznets@...hat.com,
shuah@...nel.org,
dwmw@...zon.co.uk,
linux-kselftest@...r.kernel.org,
linux-kernel@...r.kernel.org,
Fred Griffoul <fgriffo@...zon.co.uk>
Subject: [PATCH v2 03/10] KVM: x86: Add nested state validation for pfncache support
From: Fred Griffoul <fgriffo@...zon.co.uk>
Implement state validation for nested virtualization to enable pfncache
support for L1 guest pages.
This adds a new nested_ops callback 'is_nested_state_invalid()' that
detects when KVM needs to reload nested virtualization state. A
KVM_REQ_GET_NESTED_STATE_PAGES request is triggered to reload affected
pages before L2 execution when it detects invalid state. The callback
monitors L1 guest pages during guest entry/exit while the vCPU runs in
IN_GUEST_MODE.
Currently, VMX implementations return false, with full support planned
for the next patch.
Signed-off-by: Fred Griffoul <fgriffo@...zon.co.uk>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/vmx/nested.c | 6 ++++++
arch/x86/kvm/x86.c | 14 +++++++++++++-
3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 48598d017d6f..4675e71b33a7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1960,6 +1960,7 @@ struct kvm_x86_nested_ops {
struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state);
bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
+ bool (*is_nested_state_invalid)(struct kvm_vcpu *vcpu);
int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int (*enable_evmcs)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0de84b30c41d..627a6c24625d 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3588,6 +3588,11 @@ static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
return true;
}
+static bool vmx_is_nested_state_invalid(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
@@ -7527,6 +7532,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
.get_nested_state_pages = vmx_get_nested_state_pages,
+ .is_nested_state_invalid = vmx_is_nested_state_invalid,
.write_log_dirty = nested_vmx_write_pml_buffer,
#ifdef CONFIG_KVM_HYPERV
.enable_evmcs = nested_enable_evmcs,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4b8138bd4857..1a9c1171df49 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2262,12 +2262,24 @@ int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_monitor);
+static inline bool kvm_invalid_nested_state(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->is_nested_state_invalid &&
+ kvm_x86_ops.nested_ops->is_nested_state_invalid(vcpu)) {
+ kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+ return true;
+ }
+ return false;
+}
+
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
xfer_to_guest_mode_prepare();
return READ_ONCE(vcpu->mode) == EXITING_GUEST_MODE ||
- kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending();
+ kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending() ||
+ kvm_invalid_nested_state(vcpu);
}
static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
--
2.43.0
Powered by blists - more mailing lists