[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1587632507-18997-6-git-send-email-wanpengli@tencent.com>
Date: Thu, 23 Apr 2020 17:01:47 +0800
From: Wanpeng Li <kernellwp@...il.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Haiwei Li <lihaiwei@...cent.com>
Subject: [PATCH v2 5/5] KVM: VMX: Handle preemption timer fastpath
From: Wanpeng Li <wanpengli@...cent.com>
This patch implements handle preemption timer fastpath, after timer fire
due to VMX-preemption timer counts down to zero, handle it as soon as
possible and vmentry immediately without checking various kvm stuff when
possible.
Testing on SKX Server.
cyclictest in guest(w/o mwait exposed, adaptive advance lapic timer is default -1):
5632.75ns -> 4559.25ns, 19%
kvm-unit-test/vmexit.flat:
w/o APICv, w/o advance timer:
tscdeadline_immed: 4780.75 -> 3851 19.4%
tscdeadline: 7474 -> 6528.5 12.7%
w/o APICv, w/ adaptive advance timer default -1:
tscdeadline_immed: 4845.75 -> 3930.5 18.9%
tscdeadline: 6048 -> 5871.75 3%
w/ APICv, w/o avanced timer:
tscdeadline_immed: 2919 -> 2467.75 15.5%
tscdeadline: 5661.75 -> 5188.25 8.4%
w/ APICv, w/ adaptive advance timer default -1:
tscdeadline_immed: 3018.5 -> 2561 15.2%
tscdeadline: 4663.75 -> 4537 2.7%
Tested-by: Haiwei Li <lihaiwei@...cent.com>
Cc: Haiwei Li <lihaiwei@...cent.com>
Signed-off-by: Wanpeng Li <wanpengli@...cent.com>
---
arch/x86/kvm/lapic.c | 19 +++++++++++++++++++
arch/x86/kvm/lapic.h | 1 +
arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++++++++
3 files changed, 42 insertions(+)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d652bd9..2741931 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1899,6 +1899,25 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
static void kvm_inject_apic_timer_irqs_fast(struct kvm_vcpu *vcpu);
+bool kvm_lapic_expired_hv_timer_fast(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct kvm_timer *ktimer = &apic->lapic_timer;
+
+ if (!apic_lvtt_tscdeadline(apic) ||
+ !ktimer->hv_timer_in_use ||
+ atomic_read(&ktimer->pending))
+ return 0;
+
+ WARN_ON(swait_active(&vcpu->wq));
+ cancel_hv_timer(apic);
+
+ ktimer->expired_tscdeadline = ktimer->tscdeadline;
+ kvm_inject_apic_timer_irqs_fast(vcpu);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer_fast);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 5ef1364..1b5abd8 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -252,6 +252,7 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu);
int kvm_set_lapic_tscdeadline_msr_fast(struct kvm_vcpu *vcpu, u64 data);
+bool kvm_lapic_expired_hv_timer_fast(struct kvm_vcpu *vcpu);
static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
{
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 2613e58..527d1c1 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6569,12 +6569,34 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
}
}
+static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
+
+static enum exit_fastpath_completion handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (kvm_need_cancel_enter_guest(vcpu) ||
+ kvm_event_needs_reinjection(vcpu))
+ return EXIT_FASTPATH_NONE;
+
+ if (!vmx->req_immediate_exit &&
+ !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled) &&
+ kvm_lapic_expired_hv_timer_fast(vcpu)) {
+ trace_kvm_exit(EXIT_REASON_PREEMPTION_TIMER, vcpu, KVM_ISA_VMX);
+ return EXIT_FASTPATH_CONT_RUN;
+ }
+
+ return EXIT_FASTPATH_NONE;
+}
+
static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
if (!is_guest_mode(vcpu)) {
switch (to_vmx(vcpu)->exit_reason) {
case EXIT_REASON_MSR_WRITE:
return handle_fastpath_set_msr_irqoff(vcpu);
+ case EXIT_REASON_PREEMPTION_TIMER:
+ return handle_fastpath_preemption_timer(vcpu);
default:
return EXIT_FASTPATH_NONE;
}
--
2.7.4
Powered by blists - more mailing lists