[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <15150824.2a36.171c0394538.Coremail.linxl3@wangsu.com>
Date: Tue, 28 Apr 2020 17:58:30 +0800 (GMT+08:00)
From: 林鑫龙 <linxl3@...gsu.com>
To: "Wanpeng Li" <kernellwp@...il.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
"Paolo Bonzini" <pbonzini@...hat.com>,
"Sean Christopherson" <sean.j.christopherson@...el.com>,
"Vitaly Kuznetsov" <vkuznets@...hat.com>,
"Wanpeng Li" <wanpengli@...cent.com>,
"Jim Mattson" <jmattson@...gle.com>,
"Joerg Roedel" <joro@...tes.org>,
"Haiwei Li" <lihaiwei@...cent.com>
Subject: Re: [PATCH v4 6/7] KVM: X86: TSCDEADLINE MSR emulation fastpath
On Tuesday, 28 Apr 2020 at 14:23, Wanpeng Li <kernellwp@...il.com> wrote:
>
> From: Wanpeng Li <wanpengli@...cent.com>
>
> This patch implements tscdealine msr emulation fastpath, after wrmsr
> tscdeadline vmexit, handle it as soon as possible and vmentry immediately
> without checking various kvm stuff when possible.
>
> Tested-by: Haiwei Li <lihaiwei@...cent.com>
> Cc: Haiwei Li <lihaiwei@...cent.com>
> Signed-off-by: Wanpeng Li <wanpengli@...cent.com>
> ---
> arch/x86/kvm/lapic.c | 18 ++++++++++++------
> arch/x86/kvm/vmx/vmx.c | 12 ++++++++----
> arch/x86/kvm/x86.c | 30 ++++++++++++++++++++++++------
> 3 files changed, 44 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 38f7dc9..3589237 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -1593,7 +1593,7 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
> }
> }
>
> -static void apic_timer_expired(struct kvm_lapic *apic)
> +static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
> {
> struct kvm_vcpu *vcpu = apic->vcpu;
> struct kvm_timer *ktimer = &apic->lapic_timer;
> @@ -1604,6 +1604,12 @@ static void apic_timer_expired(struct kvm_lapic *apic)
> if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
> ktimer->expired_tscdeadline = ktimer->tscdeadline;
>
> + if (!from_timer_fn && vcpu->arch.apicv_active) {
> + WARN_ON(kvm_get_running_vcpu() != vcpu);
> + kvm_apic_inject_pending_timer_irqs(apic);
> + return;
> + }
> +
> if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
> if (apic->lapic_timer.timer_advance_ns)
> __kvm_wait_lapic_expire(vcpu);
> @@ -1643,7 +1649,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
> expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
> hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
> } else
> - apic_timer_expired(apic);
> + apic_timer_expired(apic, false);
>
> local_irq_restore(flags);
> }
> @@ -1751,7 +1757,7 @@ static void start_sw_period(struct kvm_lapic *apic)
>
> if (ktime_after(ktime_get(),
> apic->lapic_timer.target_expiration)) {
> - apic_timer_expired(apic);
> + apic_timer_expired(apic, false);
>
> if (apic_lvtt_oneshot(apic))
> return;
> @@ -1813,7 +1819,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
> if (atomic_read(&ktimer->pending)) {
> cancel_hv_timer(apic);
> } else if (expired) {
> - apic_timer_expired(apic);
> + apic_timer_expired(apic, false);
> cancel_hv_timer(apic);
> }
> }
> @@ -1863,7 +1869,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
> goto out;
> WARN_ON(swait_active(&vcpu->wq));
> cancel_hv_timer(apic);
> - apic_timer_expired(apic);
> + apic_timer_expired(apic, false);
>
> if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
> advance_periodic_target_expiration(apic);
> @@ -2369,7 +2375,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
> struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
> struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
>
> - apic_timer_expired(apic);
> + apic_timer_expired(apic, true);
>
> if (lapic_is_periodic(apic)) {
> advance_periodic_target_expiration(apic);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index ce19b0e..bb5c4f1 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -5994,7 +5994,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
> if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
> kvm_skip_emulated_instruction(vcpu);
Can we move this kvm_skip_emulated_instruction to handle_fastpath_set_msr_irqoff? This will keep the style consistent.
> return 1;
> - }
> + } else if (exit_fastpath == EXIT_FASTPATH_NOP)
> + return 1;
>
> if (exit_reason >= kvm_vmx_max_exit_handlers)
> goto unexpected_vmexit;
> @@ -6605,6 +6606,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> unsigned long cr3, cr4;
>
> +REENTER_GUEST:
> /* Record the guest's net vcpu time for enforced NMI injections. */
> if (unlikely(!enable_vnmi &&
> vmx->loaded_vmcs->soft_vnmi_blocked))
> @@ -6779,10 +6781,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
>
> exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
> if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
> - if (!kvm_vcpu_exit_request(vcpu))
> + if (!kvm_vcpu_exit_request(vcpu)) {
> vmx_sync_pir_to_irr(vcpu);
> - else
> - exit_fastpath = EXIT_FASTPATH_NOP;
> + /* static call is better with retpolines */
> + goto REENTER_GUEST;
> + }
> + exit_fastpath = EXIT_FASTPATH_NOP;
> }
>
> return exit_fastpath;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index afe052c..f3a5fe4 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1616,27 +1616,45 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
> return 1;
> }
>
> +static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
> +{
> + if (!kvm_x86_ops.set_hv_timer ||
> + kvm_mwait_in_guest(vcpu->kvm) ||
> + kvm_can_post_timer_interrupt(vcpu))
> + return 1;
> +
> + kvm_set_lapic_tscdeadline_msr(vcpu, data);
> + return 0;
> +}
> +
> fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
> {
> u32 msr = kvm_rcx_read(vcpu);
> u64 data;
> - int ret = 0;
> + int ret = EXIT_FASTPATH_NONE;
>
> switch (msr) {
> case APIC_BASE_MSR + (APIC_ICR >> 4):
> data = kvm_read_edx_eax(vcpu);
> - ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
> + if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data))
> + ret = EXIT_FASTPATH_SKIP_EMUL_INS;
> + break;
> + case MSR_IA32_TSCDEADLINE:
> + data = kvm_read_edx_eax(vcpu);
> + if (!handle_fastpath_set_tscdeadline(vcpu, data))
> + ret = EXIT_FASTPATH_REENTER_GUEST;
> break;
> default:
> - return EXIT_FASTPATH_NONE;
> + ret = EXIT_FASTPATH_NONE;
> }
>
> - if (!ret) {
> + if (ret != EXIT_FASTPATH_NONE) {
> trace_kvm_msr_write(msr, data);
> - return EXIT_FASTPATH_SKIP_EMUL_INS;
> + if (ret == EXIT_FASTPATH_REENTER_GUEST)
> + kvm_skip_emulated_instruction(vcpu);
> }
>
> - return EXIT_FASTPATH_NONE;
> + return ret;
> }
> EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
>
> --
> 2.7.4
</wanpengli@...cent.com></lihaiwei@...cent.com></lihaiwei@...cent.com></wanpengli@...cent.com></kernellwp@...il.com>
Powered by blists - more mailing lists