[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240418193528.41780-9-dwmw2@infradead.org>
Date: Thu, 18 Apr 2024 20:34:47 +0100
From: David Woodhouse <dwmw2@...radead.org>
To: kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Paul Durrant <paul@....org>,
Shuah Khan <shuah@...nel.org>,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Oliver Upton <oliver.upton@...ux.dev>,
Marcelo Tosatti <mtosatti@...hat.com>,
jalliste@...zon.co.uk,
sveith@...zon.de
Subject: [PATCH 08/10] KVM: x86: Remove periodic global clock updates
From: David Woodhouse <dwmw@...zon.co.uk>
This effectively reverts commit 332967a3eac0 ("x86: kvm: introduce
periodic global clock updates"). The periodic update was introduced to
propagate NTP corrections to the guest KVM clock, when the KVM clock was
based on CLOCK_MONOTONIC.
However, commit 53fafdbb8b21 ("KVM: x86: switch KVMCLOCK base to
monotonic raw clock") switched to using CLOCK_MONOTONIC_RAW as the basis
for the KVM clock, avoiding the NTP frequency skew altogether.
So the periodic update serves no purpose. Remove it.
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
---
arch/x86/kvm/x86.c | 25 -------------------------
1 file changed, 25 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 44b3d2a0da5b..4ec4eb850c5b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -155,9 +155,6 @@ EXPORT_SYMBOL_GPL(report_ignored_msrs);
unsigned int min_timer_period_us = 200;
module_param(min_timer_period_us, uint, 0644);
-static bool __read_mostly kvmclock_periodic_sync = true;
-module_param(kvmclock_periodic_sync, bool, 0444);
-
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, 0644);
@@ -3434,20 +3431,6 @@ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
KVMCLOCK_UPDATE_DELAY);
}
-#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
-
-static void kvmclock_sync_fn(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
- kvmclock_sync_work);
- struct kvm *kvm = container_of(ka, struct kvm, arch);
-
- schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
- schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
- KVMCLOCK_SYNC_PERIOD);
-}
-
/* These helpers are safe iff @msr is known to be an MCx bank MSR. */
static bool is_mci_control_msr(u32 msr)
{
@@ -12416,8 +12399,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
-
if (mutex_lock_killable(&vcpu->mutex))
return;
vcpu_load(vcpu);
@@ -12428,10 +12409,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.msr_kvm_poll_control = 1;
mutex_unlock(&vcpu->mutex);
-
- if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
- schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
- KVMCLOCK_SYNC_PERIOD);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -12804,7 +12781,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
#endif
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
- INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
@@ -12844,7 +12820,6 @@ static void kvm_unload_vcpu_mmus(struct kvm *kvm)
void kvm_arch_sync_events(struct kvm *kvm)
{
- cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
kvm_free_pit(kvm);
}
--
2.44.0
Powered by blists - more mailing lists