[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e1925ecd6e282f11efdd53e4de8ce759098135e2.1770116051.git.isaku.yamahata@intel.com>
Date: Tue, 3 Feb 2026 10:16:57 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org
Cc: isaku.yamahata@...el.com,
isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 14/32] KVM: VMX: Make vmx_calc_deadline_l1_to_host() non-static
From: Isaku Yamahata <isaku.yamahata@...el.com>
Remove static from vmx_calc_deadline_l1_to_host() and declare in vmx.h.
As nVMX APIC timer virtualization will use vmx_calc_deadline_l1_to_host(),
make it available to nested.c. Make u64_shl_div_u64() usable for X86_32
that vmx_calc_deadline_l1_to_host() uses for both X86_32 and X86_64.
Without this change, ARCH=i386 fails to compile.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
arch/x86/kvm/vmx/vmx.c | 41 ++++++++++++++++++++++++-----------------
arch/x86/kvm/vmx/vmx.h | 2 ++
2 files changed, 26 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index dc6b6659a093..41c94f5194f6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8218,25 +8218,32 @@ int vmx_check_intercept(struct kvm_vcpu *vcpu,
return X86EMUL_INTERCEPTED;
}
-#ifdef CONFIG_X86_64
/* (a << shift) / divisor, return 1 if overflow otherwise 0 */
static inline int u64_shl_div_u64(u64 a, unsigned int shift,
u64 divisor, u64 *result)
{
- u64 low = a << shift, high = a >> (64 - shift);
+ u64 high = a >> (64 - shift);
+#ifdef CONFIG_X86_64
+ u64 low = a << shift;
+#endif
/* To avoid the overflow on divq */
if (high >= divisor)
return 1;
+#ifdef CONFIG_X86_64
/* Low hold the result, high hold rem which is discarded */
asm("divq %2\n\t" : "=a" (low), "=d" (high) :
"rm" (divisor), "0" (low), "1" (high));
*result = low;
+#else
+ *result = mul_u64_u64_div_u64(a, 1ULL << shift, divisor);
+#endif
return 0;
}
+#ifdef CONFIG_X86_64
int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
bool *expired)
{
@@ -8314,7 +8321,21 @@ void vmx_cancel_apic_virt_timer(struct kvm_vcpu *vcpu)
tertiary_exec_controls_clearbit(to_vmx(vcpu), TERTIARY_EXEC_GUEST_APIC_TIMER);
}
-static u64 vmx_calc_deadline_l1_to_host(struct kvm_vcpu *vcpu, u64 l1_tsc)
+void vmx_set_guest_tsc_deadline_virt(struct kvm_vcpu *vcpu,
+ u64 guest_deadline_virt)
+{
+ vmcs_write64(GUEST_DEADLINE_VIR, guest_deadline_virt);
+ vmcs_write64(GUEST_DEADLINE_PHY,
+ vmx_calc_deadline_l1_to_host(vcpu, guest_deadline_virt));
+}
+
+u64 vmx_get_guest_tsc_deadline_virt(struct kvm_vcpu *vcpu)
+{
+ return vmcs_read64(GUEST_DEADLINE_VIR);
+}
+#endif
+
+u64 vmx_calc_deadline_l1_to_host(struct kvm_vcpu *vcpu, u64 l1_tsc)
{
u64 host_tsc_now = rdtsc();
u64 l1_tsc_now = kvm_read_l1_tsc(vcpu, host_tsc_now);
@@ -8354,20 +8375,6 @@ static u64 vmx_calc_deadline_l1_to_host(struct kvm_vcpu *vcpu, u64 l1_tsc)
return host_tsc;
}
-void vmx_set_guest_tsc_deadline_virt(struct kvm_vcpu *vcpu,
- u64 guest_deadline_virt)
-{
- vmcs_write64(GUEST_DEADLINE_VIR, guest_deadline_virt);
- vmcs_write64(GUEST_DEADLINE_PHY,
- vmx_calc_deadline_l1_to_host(vcpu, guest_deadline_virt));
-}
-
-u64 vmx_get_guest_tsc_deadline_virt(struct kvm_vcpu *vcpu)
-{
- return vmcs_read64(GUEST_DEADLINE_VIR);
-}
-#endif
-
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index cb32d0fdf3b8..28625a2d17bd 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -405,6 +405,8 @@ static inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+u64 vmx_calc_deadline_l1_to_host(struct kvm_vcpu *vcpu, u64 l1_tsc);
+
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
--
2.45.2
Powered by blists - more mailing lists