lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 6 Apr 2022 14:47:52 +0200
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     isaku.yamahata@...el.com, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Cc:     isaku.yamahata@...il.com, Jim Mattson <jmattson@...gle.com>,
        erdemaktas@...gle.com, Connor Kuehl <ckuehl@...hat.com>,
        Sean Christopherson <seanjc@...gle.com>
Subject: Re: [RFC PATCH v5 080/104] KVM: TDX: Implement methods to inject NMI

On 3/4/22 20:49, isaku.yamahata@...el.com wrote:
> From: Isaku Yamahata <isaku.yamahata@...el.com>
> 
> TDX vcpu control structure defines one bit for pending NMI for VMM to
> inject NMI by setting the bit without knowing TDX vcpu NMI states.  Because
> the vcpu state is protected, VMM can't know about NMI states of TDX vcpu.
> The TDX module handles actual injection and NMI states transition.
> 
> Add methods for NMI and treat NMI can be injected always.
> 
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
>   arch/x86/kvm/vmx/main.c    | 62 +++++++++++++++++++++++++++++++++++---
>   arch/x86/kvm/vmx/tdx.c     |  5 +++
>   arch/x86/kvm/vmx/x86_ops.h |  2 ++
>   3 files changed, 64 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 404a260796e4..aa84c13f8ee1 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -216,6 +216,58 @@ static void vt_flush_tlb_guest(struct kvm_vcpu *vcpu)
>   	vmx_flush_tlb_guest(vcpu);
>   }
>   
> +static void vt_inject_nmi(struct kvm_vcpu *vcpu)
> +{
> +	if (is_td_vcpu(vcpu))
> +		return tdx_inject_nmi(vcpu);
> +
> +	vmx_inject_nmi(vcpu);
> +}
> +
> +static int vt_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> +{
> +	/*
> +	 * The TDX module manages NMI windows and NMI reinjection, and hides NMI
> +	 * blocking, all KVM can do is throw an NMI over the wall.
> +	 */
> +	if (is_td_vcpu(vcpu))
> +		return true;
> +
> +	return vmx_nmi_allowed(vcpu, for_injection);
> +}
> +
> +static bool vt_get_nmi_mask(struct kvm_vcpu *vcpu)
> +{
> +	/*
> +	 * Assume NMIs are always unmasked.  KVM could query PEND_NMI and treat
> +	 * NMIs as masked if a previous NMI is still pending, but SEAMCALLs are
> +	 * expensive and the end result is unchanged as the only relevant usage
> +	 * of get_nmi_mask() is to limit the number of pending NMIs, i.e. it
> +	 * only changes whether KVM or the TDX module drops an NMI.
> +	 */
> +	if (is_td_vcpu(vcpu))
> +		return false;
> +
> +	return vmx_get_nmi_mask(vcpu);
> +}
> +
> +static void vt_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
> +{
> +	if (is_td_vcpu(vcpu))
> +		return;
> +
> +	vmx_set_nmi_mask(vcpu, masked);
> +}
> +
> +static void vt_enable_nmi_window(struct kvm_vcpu *vcpu)
> +{
> +	/* Refer the comment in vt_get_nmi_mask(). */
> +	if (is_td_vcpu(vcpu))
> +		return;
> +
> +	vmx_enable_nmi_window(vcpu);
> +}
> +
>   static void vt_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
>   			int pgd_level)
>   {
> @@ -366,14 +418,14 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
>   	.get_interrupt_shadow = vt_get_interrupt_shadow,
>   	.patch_hypercall = vmx_patch_hypercall,
>   	.set_irq = vt_inject_irq,
> -	.set_nmi = vmx_inject_nmi,
> +	.set_nmi = vt_inject_nmi,
>   	.queue_exception = vmx_queue_exception,
>   	.cancel_injection = vt_cancel_injection,
>   	.interrupt_allowed = vt_interrupt_allowed,
> -	.nmi_allowed = vmx_nmi_allowed,
> -	.get_nmi_mask = vmx_get_nmi_mask,
> -	.set_nmi_mask = vmx_set_nmi_mask,
> -	.enable_nmi_window = vmx_enable_nmi_window,
> +	.nmi_allowed = vt_nmi_allowed,
> +	.get_nmi_mask = vt_get_nmi_mask,
> +	.set_nmi_mask = vt_set_nmi_mask,
> +	.enable_nmi_window = vt_enable_nmi_window,
>   	.enable_irq_window = vt_enable_irq_window,
>   	.update_cr8_intercept = vmx_update_cr8_intercept,
>   	.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index bdc658ca9e4f..273898de9f7a 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -763,6 +763,11 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu)
>   	return EXIT_FASTPATH_NONE;
>   }
>   
> +void tdx_inject_nmi(struct kvm_vcpu *vcpu)
> +{
> +	td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
> +}
> +
>   void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
>   {
>   	td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa & PAGE_MASK);
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index c3768a20347f..31be5e8a1d5c 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -150,6 +150,7 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
>   void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu);
>   void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
>   			   int trig_mode, int vector);
> +void tdx_inject_nmi(struct kvm_vcpu *vcpu);
>   
>   int tdx_vm_ioctl(struct kvm *kvm, void __user *argp);
>   int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> @@ -180,6 +181,7 @@ static inline void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) {}
>   static inline void tdx_apicv_post_state_restore(struct kvm_vcpu *vcpu) {}
>   static inline void tdx_deliver_interrupt(
>   	struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector) {}
> +static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {}
>   
>   static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; }
>   static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }

Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ