lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f38141478fa37ddff287b48c146261fe7d878379.camel@redhat.com>
Date:   Wed, 27 Apr 2022 23:40:53 +0300
From:   Maxim Levitsky <mlevitsk@...hat.com>
To:     Paolo Bonzini <pbonzini@...hat.com>, linux-kernel@...r.kernel.org,
        kvm@...r.kernel.org
Cc:     seanjc@...gle.com, stable@...r.kernel.org
Subject: Re: [PATCH 1/3] KVM: x86: make vendor code check for all nested
 events

On Wed, 2022-04-27 at 13:37 -0400, Paolo Bonzini wrote:
> Right now, the VMX preemption timer is special cased via the
> hv_timer_pending, but the purpose of the callback can be easily
> extended to observing any event that can occur only in non-root
> mode.  Interrupts, NMIs etc. are already handled properly by
> the *_interrupt_allowed callbacks, so what is missing is only
> MTF.  Check it in the newly-renamed callback, so that
> kvm_vcpu_running's call to kvm_check_nested_events
> becomes redundant.
> 
> Cc: stable@...r.kernel.org
> Reported-by: Maxim Levitsky <mlevitsk@...hat.com>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
>  arch/x86/include/asm/kvm_host.h | 2 +-
>  arch/x86/kvm/vmx/nested.c       | 7 ++++++-
>  arch/x86/kvm/x86.c              | 8 ++++----
>  3 files changed, 11 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 4ff36610af6a..e2e4f60159e9 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1504,7 +1504,7 @@ struct kvm_x86_ops {
>  struct kvm_x86_nested_ops {
>  	void (*leave_nested)(struct kvm_vcpu *vcpu);
>  	int (*check_events)(struct kvm_vcpu *vcpu);
> -	bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
> +	bool (*has_events)(struct kvm_vcpu *vcpu);
>  	void (*triple_fault)(struct kvm_vcpu *vcpu);
>  	int (*get_state)(struct kvm_vcpu *vcpu,
>  			 struct kvm_nested_state __user *user_kvm_nested_state,
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 856c87563883..54672025c3a1 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3857,6 +3857,11 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
>  	       to_vmx(vcpu)->nested.preemption_timer_expired;
>  }
>  
> +static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
> +{
Typo: needs struct vcpu_vmx *vmx = to_vmx(vcpu);

> +	return nested_vmx_preemption_timer_pending(vcpu) || vmx->nested.mtf_pending;
> +}
> +
>  static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> @@ -6809,7 +6814,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
>  struct kvm_x86_nested_ops vmx_nested_ops = {
>  	.leave_nested = vmx_leave_nested,
>  	.check_events = vmx_check_nested_events,
> -	.hv_timer_pending = nested_vmx_preemption_timer_pending,
> +	.has_events = vmx_has_nested_events,
>  	.triple_fault = nested_vmx_triple_fault,
>  	.get_state = vmx_get_nested_state,
>  	.set_state = vmx_set_nested_state,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index a6ab19afc638..0e73607b02bd 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -9471,8 +9471,8 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
>  	}
>  
>  	if (is_guest_mode(vcpu) &&
> -	    kvm_x86_ops.nested_ops->hv_timer_pending &&
> -	    kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
> +	    kvm_x86_ops.nested_ops->has_events &&
> +	    kvm_x86_ops.nested_ops->has_events(vcpu))
>  		*req_immediate_exit = true;
>  
>  	WARN_ON(vcpu->arch.exception.pending);
> @@ -12183,8 +12183,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
>  		return true;
>  
>  	if (is_guest_mode(vcpu) &&
> -	    kvm_x86_ops.nested_ops->hv_timer_pending &&
> -	    kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
> +	    kvm_x86_ops.nested_ops->has_events &&
> +	    kvm_x86_ops.nested_ops->has_events(vcpu))
Nitpick: Won't it make sense to use conditional static call here instead?

>  		return true;
>  
>  	return false;


Besides nitpicks,

Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>


Wasn't able to test on my intel laptop, I am getting out of sudden in qemu:

'cpuid_data is full, no space for cpuid(eax:0x8000001d,ecx:0x3e)'

I will investigate tomorrow.

Best regards,
	Maxim Levitsky


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ