lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <15150824.2a36.171c0394538.Coremail.linxl3@wangsu.com>
Date:   Tue, 28 Apr 2020 17:58:30 +0800 (GMT+08:00)
From:   林鑫龙 <linxl3@...gsu.com>
To:     "Wanpeng Li" <kernellwp@...il.com>
Cc:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
        "Paolo Bonzini" <pbonzini@...hat.com>,
        "Sean Christopherson" <sean.j.christopherson@...el.com>,
        "Vitaly Kuznetsov" <vkuznets@...hat.com>,
        "Wanpeng Li" <wanpengli@...cent.com>,
        "Jim Mattson" <jmattson@...gle.com>,
        "Joerg Roedel" <joro@...tes.org>,
        "Haiwei Li" <lihaiwei@...cent.com>
Subject: Re: [PATCH v4 6/7] KVM: X86: TSCDEADLINE MSR emulation fastpath

On Tuesday, 28 Apr 2020 at 14:23, Wanpeng Li <kernellwp@...il.com> wrote:
&gt; 
&gt; From: Wanpeng Li <wanpengli@...cent.com>
&gt; 
&gt; This patch implements tscdealine msr emulation fastpath, after wrmsr 
&gt; tscdeadline vmexit, handle it as soon as possible and vmentry immediately 
&gt; without checking various kvm stuff when possible.
&gt; 
&gt; Tested-by: Haiwei Li <lihaiwei@...cent.com>
&gt; Cc: Haiwei Li <lihaiwei@...cent.com>
&gt; Signed-off-by: Wanpeng Li <wanpengli@...cent.com>
&gt; ---
&gt;  arch/x86/kvm/lapic.c   | 18 ++++++++++++------
&gt;  arch/x86/kvm/vmx/vmx.c | 12 ++++++++----
&gt;  arch/x86/kvm/x86.c     | 30 ++++++++++++++++++++++++------
&gt;  3 files changed, 44 insertions(+), 16 deletions(-)
&gt; 
&gt; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
&gt; index 38f7dc9..3589237 100644
&gt; --- a/arch/x86/kvm/lapic.c
&gt; +++ b/arch/x86/kvm/lapic.c
&gt; @@ -1593,7 +1593,7 @@ static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
&gt;  	}
&gt;  }
&gt;  
&gt; -static void apic_timer_expired(struct kvm_lapic *apic)
&gt; +static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
&gt;  {
&gt;  	struct kvm_vcpu *vcpu = apic-&gt;vcpu;
&gt;  	struct kvm_timer *ktimer = &amp;apic-&gt;lapic_timer;
&gt; @@ -1604,6 +1604,12 @@ static void apic_timer_expired(struct kvm_lapic *apic)
&gt;  	if (apic_lvtt_tscdeadline(apic) || ktimer-&gt;hv_timer_in_use)
&gt;  		ktimer-&gt;expired_tscdeadline = ktimer-&gt;tscdeadline;
&gt;  
&gt; +	if (!from_timer_fn &amp;&amp; vcpu-&gt;arch.apicv_active) {
&gt; +		WARN_ON(kvm_get_running_vcpu() != vcpu);
&gt; +		kvm_apic_inject_pending_timer_irqs(apic);
&gt; +		return;
&gt; +	}
&gt; +
&gt;  	if (kvm_use_posted_timer_interrupt(apic-&gt;vcpu)) {
&gt;  		if (apic-&gt;lapic_timer.timer_advance_ns)
&gt;  			__kvm_wait_lapic_expire(vcpu);
&gt; @@ -1643,7 +1649,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
&gt;  		expire = ktime_sub_ns(expire, ktimer-&gt;timer_advance_ns);
&gt;  		hrtimer_start(&amp;ktimer-&gt;timer, expire, HRTIMER_MODE_ABS_HARD);
&gt;  	} else
&gt; -		apic_timer_expired(apic);
&gt; +		apic_timer_expired(apic, false);
&gt;  
&gt;  	local_irq_restore(flags);
&gt;  }
&gt; @@ -1751,7 +1757,7 @@ static void start_sw_period(struct kvm_lapic *apic)
&gt;  
&gt;  	if (ktime_after(ktime_get(),
&gt;  			apic-&gt;lapic_timer.target_expiration)) {
&gt; -		apic_timer_expired(apic);
&gt; +		apic_timer_expired(apic, false);
&gt;  
&gt;  		if (apic_lvtt_oneshot(apic))
&gt;  			return;
&gt; @@ -1813,7 +1819,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
&gt;  		if (atomic_read(&amp;ktimer-&gt;pending)) {
&gt;  			cancel_hv_timer(apic);
&gt;  		} else if (expired) {
&gt; -			apic_timer_expired(apic);
&gt; +			apic_timer_expired(apic, false);
&gt;  			cancel_hv_timer(apic);
&gt;  		}
&gt;  	}
&gt; @@ -1863,7 +1869,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
&gt;  		goto out;
&gt;  	WARN_ON(swait_active(&amp;vcpu-&gt;wq));
&gt;  	cancel_hv_timer(apic);
&gt; -	apic_timer_expired(apic);
&gt; +	apic_timer_expired(apic, false);
&gt;  
&gt;  	if (apic_lvtt_period(apic) &amp;&amp; apic-&gt;lapic_timer.period) {
&gt;  		advance_periodic_target_expiration(apic);
&gt; @@ -2369,7 +2375,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
&gt;  	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
&gt;  	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
&gt;  
&gt; -	apic_timer_expired(apic);
&gt; +	apic_timer_expired(apic, true);
&gt;  
&gt;  	if (lapic_is_periodic(apic)) {
&gt;  		advance_periodic_target_expiration(apic);
&gt; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
&gt; index ce19b0e..bb5c4f1 100644
&gt; --- a/arch/x86/kvm/vmx/vmx.c
&gt; +++ b/arch/x86/kvm/vmx/vmx.c
&gt; @@ -5994,7 +5994,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
&gt;  	if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
&gt;  		kvm_skip_emulated_instruction(vcpu);
Can we move this kvm_skip_emulated_instruction to handle_fastpath_set_msr_irqoff? This will keep the style consistent.
&gt;  		return 1;
&gt; -	}
&gt; +	} else if (exit_fastpath == EXIT_FASTPATH_NOP)
&gt; +		return 1;
&gt;  
&gt;  	if (exit_reason &gt;= kvm_vmx_max_exit_handlers)
&gt;  		goto unexpected_vmexit;
&gt; @@ -6605,6 +6606,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
&gt;  	struct vcpu_vmx *vmx = to_vmx(vcpu);
&gt;  	unsigned long cr3, cr4;
&gt;  
&gt; +REENTER_GUEST:
&gt;  	/* Record the guest's net vcpu time for enforced NMI injections. */
&gt;  	if (unlikely(!enable_vnmi &amp;&amp;
&gt;  		     vmx-&gt;loaded_vmcs-&gt;soft_vnmi_blocked))
&gt; @@ -6779,10 +6781,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
&gt;  
&gt;  	exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
&gt;  	if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
&gt; -		if (!kvm_vcpu_exit_request(vcpu))
&gt; +		if (!kvm_vcpu_exit_request(vcpu)) {
&gt;  			vmx_sync_pir_to_irr(vcpu);
&gt; -		else
&gt; -			exit_fastpath = EXIT_FASTPATH_NOP;
&gt; +			/* static call is better with retpolines */
&gt; +			goto REENTER_GUEST;
&gt; +		}
&gt; +		exit_fastpath = EXIT_FASTPATH_NOP;
&gt;  	}
&gt;  
&gt;  	return exit_fastpath;
&gt; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
&gt; index afe052c..f3a5fe4 100644
&gt; --- a/arch/x86/kvm/x86.c
&gt; +++ b/arch/x86/kvm/x86.c
&gt; @@ -1616,27 +1616,45 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
&gt;  	return 1;
&gt;  }
&gt;  
&gt; +static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
&gt; +{
&gt; +	if (!kvm_x86_ops.set_hv_timer ||
&gt; +		kvm_mwait_in_guest(vcpu-&gt;kvm) ||
&gt; +		kvm_can_post_timer_interrupt(vcpu))
&gt; +		return 1;
&gt; +
&gt; +	kvm_set_lapic_tscdeadline_msr(vcpu, data);
&gt; +	return 0;
&gt; +}
&gt; +
&gt;  fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
&gt;  {
&gt;  	u32 msr = kvm_rcx_read(vcpu);
&gt;  	u64 data;
&gt; -	int ret = 0;
&gt; +	int ret = EXIT_FASTPATH_NONE;
&gt;  
&gt;  	switch (msr) {
&gt;  	case APIC_BASE_MSR + (APIC_ICR &gt;&gt; 4):
&gt;  		data = kvm_read_edx_eax(vcpu);
&gt; -		ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
&gt; +		if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data))
&gt; +			ret = EXIT_FASTPATH_SKIP_EMUL_INS;
&gt; +		break;
&gt; +	case MSR_IA32_TSCDEADLINE:
&gt; +		data = kvm_read_edx_eax(vcpu);
&gt; +		if (!handle_fastpath_set_tscdeadline(vcpu, data))
&gt; +			ret = EXIT_FASTPATH_REENTER_GUEST;
&gt;  		break;
&gt;  	default:
&gt; -		return EXIT_FASTPATH_NONE;
&gt; +		ret = EXIT_FASTPATH_NONE;
&gt;  	}
&gt;  
&gt; -	if (!ret) {
&gt; +	if (ret != EXIT_FASTPATH_NONE) {
&gt;  		trace_kvm_msr_write(msr, data);
&gt; -		return EXIT_FASTPATH_SKIP_EMUL_INS;
&gt; +		if (ret == EXIT_FASTPATH_REENTER_GUEST)
&gt; +			kvm_skip_emulated_instruction(vcpu);

&gt;  	}
&gt;  
&gt; -	return EXIT_FASTPATH_NONE;
&gt; +	return ret;
&gt;  }
&gt;  EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
&gt;  
&gt; -- 
&gt; 2.7.4
</wanpengli@...cent.com></lihaiwei@...cent.com></lihaiwei@...cent.com></wanpengli@...cent.com></kernellwp@...il.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ