[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANRm+Cy69VH+5w4en-Q+N85bRCBoCWNi6oEwpJGgp+MBaUUX8Q@mail.gmail.com>
Date: Mon, 20 May 2019 19:45:52 +0800
From: Wanpeng Li <kernellwp@...il.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: LKML <linux-kernel@...r.kernel.org>, kvm <kvm@...r.kernel.org>,
Radim Krčmář <rkrcmar@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Liran Alon <liran.alon@...cle.com>
Subject: Re: [PATCH v4 4/5] KVM: LAPIC: Delay trace advance expire delta
On Mon, 20 May 2019 at 19:41, Paolo Bonzini <pbonzini@...hat.com> wrote:
>
> On 20/05/19 13:36, Wanpeng Li wrote:
> >> Hmm, yeah, that makes sense. The location of the tracepoint is a bit
> >> weird, but I guess we can add a comment in the code.
> > Do you need me to post a new patchset? :)
>
> No problem. The final patch that I committed is this:
>
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index c12b090f4fad..f8615872ae64 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -1502,27 +1502,27 @@ static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
> }
>
> static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
> - u64 guest_tsc, u64 tsc_deadline)
> + s64 advance_expire_delta)
> {
> struct kvm_lapic *apic = vcpu->arch.apic;
> u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
> u64 ns;
>
> /* too early */
> - if (guest_tsc < tsc_deadline) {
> - ns = (tsc_deadline - guest_tsc) * 1000000ULL;
> + if (advance_expire_delta < 0) {
> + ns = -advance_expire_delta * 1000000ULL;
> do_div(ns, vcpu->arch.virtual_tsc_khz);
> timer_advance_ns -= min((u32)ns,
> timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
> } else {
> /* too late */
> - ns = (guest_tsc - tsc_deadline) * 1000000ULL;
> + ns = advance_expire_delta * 1000000ULL;
> do_div(ns, vcpu->arch.virtual_tsc_khz);
> timer_advance_ns += min((u32)ns,
> timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
> }
>
> - if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
> + if (abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
> apic->lapic_timer.timer_advance_adjust_done = true;
> if (unlikely(timer_advance_ns > 5000)) {
> timer_advance_ns = 0;
> @@ -1545,13 +1545,13 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
> tsc_deadline = apic->lapic_timer.expired_tscdeadline;
> apic->lapic_timer.expired_tscdeadline = 0;
> guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
> - trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
> + apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
>
> if (guest_tsc < tsc_deadline)
> __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
>
> if (unlikely(!apic->lapic_timer.timer_advance_adjust_done))
> - adjust_lapic_timer_advance(vcpu, guest_tsc, tsc_deadline);
> + adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
> }
>
> static void start_sw_tscdeadline(struct kvm_lapic *apic)
> diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
> index d6d049ba3045..3e72a255543d 100644
> --- a/arch/x86/kvm/lapic.h
> +++ b/arch/x86/kvm/lapic.h
> @@ -32,6 +32,7 @@ struct kvm_timer {
> u64 tscdeadline;
> u64 expired_tscdeadline;
> u32 timer_advance_ns;
> + s64 advance_expire_delta;
> atomic_t pending; /* accumulated triggered timers */
> bool hv_timer_in_use;
> bool timer_advance_adjust_done;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index e7e57de50a3c..35631505421c 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -8008,6 +8008,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> ++vcpu->stat.exits;
>
> guest_exit_irqoff();
> + if (lapic_in_kernel(vcpu)) {
> + s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
> + if (delta != S64_MIN) {
> + trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta);
> + vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
> + }
> + }
>
> local_irq_enable();
> preempt_enable();
>
> so that KVM tracks whether wait_lapic_expire was called, and do not
> invoke the tracepoint if not.
Looks good to me, thank you. :)
Regards,
Wanpeng Li
Powered by blists - more mailing lists