lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <914f1aec-de5e-dcc9-9f99-4ffbcd7e8a53@redhat.com>
Date:   Sat, 9 May 2020 15:25:59 +0200
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     Jon Cargille <jcargill@...gle.com>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        "H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
        kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     David Matlack <dmatlack@...gle.com>
Subject: Re: [PATCH] kvm: add halt-polling cpu usage stats

On 08/05/20 20:22, Jon Cargille wrote:
> From: David Matlack <dmatlack@...gle.com>
> 
> Two new stats for exposing halt-polling cpu usage:
> halt_poll_success_ns
> halt_poll_fail_ns
> 
> Thus sum of these 2 stats is the total cpu time spent polling. "success"
> means the VCPU polled until a virtual interrupt was delivered. "fail"
> means the VCPU had to schedule out (either because the maximum poll time
> was reached or it needed to yield the CPU).
> 
> To avoid touching every arch's kvm_vcpu_stat struct, only update and
> export halt-polling cpu usage stats if we're on x86.

I fixed all the other architectures and queued it, thanks.

Paolo

> 
> Exporting cpu usage as a u64 and in nanoseconds means we will overflow at
> ~500 years, which seems reasonably large.
> 
> Signed-off-by: David Matlack <dmatlack@...gle.com>
> Signed-off-by: Jon Cargille <jcargill@...gle.com>
> Reviewed-by: Jim Mattson <jmattson@...gle.com>
> 
> ---
>  arch/x86/include/asm/kvm_host.h |  2 ++
>  arch/x86/kvm/x86.c              |  2 ++
>  virt/kvm/kvm_main.c             | 20 +++++++++++++++++---
>  3 files changed, 21 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index a239a297be33..3287159ab15b 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1032,6 +1032,8 @@ struct kvm_vcpu_stat {
>  	u64 irq_injections;
>  	u64 nmi_injections;
>  	u64 req_event;
> +	u64 halt_poll_success_ns;
> +	u64 halt_poll_fail_ns;
>  };
>  
>  struct x86_instruction_info;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 8c0b77ac8dc6..9736d91ce877 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -217,6 +217,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
>  	VCPU_STAT("nmi_injections", nmi_injections),
>  	VCPU_STAT("req_event", req_event),
>  	VCPU_STAT("l1d_flush", l1d_flush),
> +	VCPU_STAT( "halt_poll_success_ns", halt_poll_success_ns),
> +	VCPU_STAT( "halt_poll_fail_ns", halt_poll_fail_ns),
>  	VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
>  	VM_STAT("mmu_pte_write", mmu_pte_write),
>  	VM_STAT("mmu_pte_updated", mmu_pte_updated),
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 33e1eee96f75..348b4a6bde53 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -2664,19 +2664,30 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
>  	return ret;
>  }
>  
> +static inline void
> +update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
> +{
> +#ifdef CONFIG_X86
> +	if (waited)
> +		vcpu->stat.halt_poll_fail_ns += poll_ns;
> +	else
> +		vcpu->stat.halt_poll_success_ns += poll_ns;
> +#endif
> +}
> +
>  /*
>   * The vCPU has executed a HLT instruction with in-kernel mode enabled.
>   */
>  void kvm_vcpu_block(struct kvm_vcpu *vcpu)
>  {
> -	ktime_t start, cur;
> +	ktime_t start, cur, poll_end;
>  	DECLARE_SWAITQUEUE(wait);
>  	bool waited = false;
>  	u64 block_ns;
>  
>  	kvm_arch_vcpu_blocking(vcpu);
>  
> -	start = cur = ktime_get();
> +	start = cur = poll_end = ktime_get();
>  	if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
>  		ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
>  
> @@ -2692,7 +2703,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
>  					++vcpu->stat.halt_poll_invalid;
>  				goto out;
>  			}
> -			cur = ktime_get();
> +			poll_end = cur = ktime_get();
>  		} while (single_task_running() && ktime_before(cur, stop));
>  	}
>  
> @@ -2712,6 +2723,9 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
>  	kvm_arch_vcpu_unblocking(vcpu);
>  	block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
>  
> +	update_halt_poll_stats(
> +		vcpu, ktime_to_ns(ktime_sub(poll_end, start)), waited);
> +
>  	if (!kvm_arch_no_poll(vcpu)) {
>  		if (!vcpu_valid_wakeup(vcpu)) {
>  			shrink_halt_poll_ns(vcpu);
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ