[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <dbb1e23b41e503692b3f825ebb80e0ccc6870684.1770116051.git.isaku.yamahata@intel.com>
Date: Tue, 3 Feb 2026 10:16:58 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 15/32] KVM: nVMX: Enable guest deadline and its shadow VMCS field
From: Isaku Yamahata <isaku.yamahata@...el.com>
Support the guest deadline and the guest deadline shadow VMCS field.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/vmx/nested.c | 48 +++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmcs12.c | 2 ++
arch/x86/kvm/vmx/vmcs12.h | 6 +++++
arch/x86/kvm/vmx/vmx.h | 2 ++
4 files changed, 58 insertions(+)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 5829562145a7..66adc1821671 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2763,6 +2763,22 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx);
}
+static void nested_guest_apic_timer(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ u64 guest_deadline_shadow = vmcs12->guest_deadline_shadow;
+ u64 guest_deadline = vmcs12->guest_deadline;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.guest_deadline_dirty)
+ return;
+
+ guest_deadline = vmx_calc_deadline_l1_to_host(vcpu, guest_deadline);
+
+ vmcs_write64(GUEST_DEADLINE_PHY, guest_deadline);
+ vmcs_write64(GUEST_DEADLINE_VIR, guest_deadline_shadow);
+ vmx->nested.guest_deadline_dirty = false;
+}
+
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -2840,6 +2856,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (kvm_caps.has_tsc_control)
vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
+ if (nested_cpu_has_guest_apic_timer(vmcs12))
+ nested_guest_apic_timer(vcpu, vmcs12);
+
nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
if (nested_cpu_has_ept(vmcs12))
@@ -4637,6 +4656,8 @@ static bool is_vmcs12_ext_field(unsigned long field)
case GUEST_IDTR_BASE:
case GUEST_PENDING_DBG_EXCEPTIONS:
case GUEST_BNDCFGS:
+ case GUEST_DEADLINE_PHY:
+ case GUEST_DEADLINE_VIR:
return true;
default:
break;
@@ -4687,6 +4708,24 @@ static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
vmcs12->guest_pending_dbg_exceptions =
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+ if (nested_cpu_has_guest_apic_timer(vmcs12)) {
+ u64 guest_deadline_shadow = vmcs_read64(GUEST_DEADLINE_VIR);
+ u64 guest_deadline = vmcs_read64(GUEST_DEADLINE_PHY);
+
+ if (guest_deadline) {
+ guest_deadline = kvm_read_l1_tsc(vcpu, guest_deadline);
+ if (!guest_deadline)
+ guest_deadline = 1;
+ }
+
+ vmcs12->guest_deadline = guest_deadline;
+ vmcs12->guest_deadline_shadow = guest_deadline_shadow;
+ } else if (vmx->nested.msrs.tertiary_ctls & TERTIARY_EXEC_GUEST_APIC_TIMER) {
+ vmcs12->guest_deadline = 0;
+ vmcs12->guest_deadline_shadow = 0;
+ }
+ vmx->nested.guest_deadline_dirty = false;
+
vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
}
@@ -5959,6 +5998,13 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
vmx->nested.dirty_vmcs12 = true;
}
+ if (!is_guest_mode(vcpu) &&
+ (field == GUEST_DEADLINE_PHY ||
+ field == GUEST_DEADLINE_PHY_HIGH ||
+ field == GUEST_DEADLINE_VIR ||
+ field == GUEST_DEADLINE_VIR_HIGH))
+ vmx->nested.guest_deadline_dirty = true;
+
return nested_vmx_succeed(vcpu);
}
@@ -5973,6 +6019,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
}
vmx->nested.dirty_vmcs12 = true;
vmx->nested.force_msr_bitmap_recalc = true;
+ vmx->nested.guest_deadline_dirty = true;
}
/* Emulate the VMPTRLD instruction */
@@ -7150,6 +7197,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmx->nested.dirty_vmcs12 = true;
vmx->nested.force_msr_bitmap_recalc = true;
+ vmx->nested.guest_deadline_dirty = true;
ret = nested_vmx_enter_non_root_mode(vcpu, false);
if (ret)
goto error_guest_mode;
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
index 3842ee1ddabf..6849790a0af1 100644
--- a/arch/x86/kvm/vmx/vmcs12.c
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -70,6 +70,8 @@ const unsigned short vmcs12_field_offsets[] = {
FIELD64(HOST_IA32_PAT, host_ia32_pat),
FIELD64(HOST_IA32_EFER, host_ia32_efer),
FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
+ FIELD64(GUEST_DEADLINE_PHY, guest_deadline),
+ FIELD64(GUEST_DEADLINE_VIR, guest_deadline_shadow),
FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
FIELD(EXCEPTION_BITMAP, exception_bitmap),
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
index d8e09de44f2a..c0d5981475b3 100644
--- a/arch/x86/kvm/vmx/vmcs12.h
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -192,6 +192,10 @@ struct __packed vmcs12 {
u16 host_tr_selector;
u16 guest_pml_index;
u16 virtual_timer_vector;
+
+ /* offset 0x3e8 */
+ u64 guest_deadline;
+ u64 guest_deadline_shadow;
};
/*
@@ -375,6 +379,8 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(host_tr_selector, 994);
CHECK_OFFSET(guest_pml_index, 996);
CHECK_OFFSET(virtual_timer_vector, 998);
+ CHECK_OFFSET(guest_deadline, 1000);
+ CHECK_OFFSET(guest_deadline_shadow, 1008);
}
extern const unsigned short vmcs12_field_offsets[];
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 28625a2d17bd..bdeef2e12640 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -169,6 +169,8 @@ struct nested_vmx {
bool has_preemption_timer_deadline;
bool preemption_timer_expired;
+ bool guest_deadline_dirty;
+
/*
* Used to snapshot MSRs that are conditionally loaded on VM-Enter in
* order to propagate the guest's pre-VM-Enter value into vmcs02. For
--
2.45.2
Powered by blists - more mailing lists