lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Mon, 16 Jul 2018 10:09:12 -0700
From:   Marc Orr <marcorr@...gle.com>
To:     gregkh@...uxfoundation.org
Cc:     linux-kernel@...r.kernel.org, stable@...r.kernel.org,
        syzkaller@...glegroups.com, rkrcmar@...hat.com
Subject: Re: [PATCH 4.17 62/67] kvm: vmx: Nested VM-entry prereqs for event inj.

On Mon, Jul 16, 2018 at 12:39 AM Greg Kroah-Hartman
<gregkh@...uxfoundation.org> wrote:
>
> 4.17-stable review patch.  If anyone has any objections, please let me know.
>
> ------------------
>
> From: Marc Orr <marcorr@...gle.com>
>
> commit 0447378a4a793da008451fad50bc0f93e9675ae6 upstream.
>
> This patch extends the checks done prior to a nested VM entry.
> Specifically, it extends the check_vmentry_prereqs function with checks
> for fields relevant to the VM-entry event injection information, as
> described in the Intel SDM, volume 3.
>
> This patch is motivated by a syzkaller bug, where a bad VM-entry
> interruption information field is generated in the VMCS02, which causes
> the nested VM launch to fail. Then, KVM fails to resume L1.
>
> While KVM should be improved to correctly resume L1 execution after a
> failed nested launch, this change is justified because the existing code
> to resume L1 is flaky/ad-hoc and the test coverage for resuming L1 is
> sparse.
>
> Reported-by: syzbot <syzkaller@...glegroups.com>
> Signed-off-by: Marc Orr <marcorr@...gle.com>
> [Removed comment whose parts were describing previous revisions and the
>  rest was obvious from function/variable naming. - Radim]
> Signed-off-by: Radim Krčmář <rkrcmar@...hat.com>
> Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
>
> ---
>  arch/x86/include/asm/vmx.h |    3 ++
>  arch/x86/kvm/vmx.c         |   67 +++++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/x86.h         |    9 ++++++
>  3 files changed, 79 insertions(+)
>
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -114,6 +114,7 @@
>  #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
>  #define VMX_MISC_SAVE_EFER_LMA                 0x00000020
>  #define VMX_MISC_ACTIVITY_HLT                  0x00000040
> +#define VMX_MISC_ZERO_LEN_INS                  0x40000000
>
>  /* VMFUNC functions */
>  #define VMX_VMFUNC_EPTP_SWITCHING               0x00000001
> @@ -349,11 +350,13 @@ enum vmcs_field {
>  #define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
>
>  #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
> +#define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
>  #define INTR_TYPE_NMI_INTR             (2 << 8) /* NMI */
>  #define INTR_TYPE_HARD_EXCEPTION       (3 << 8) /* processor exception */
>  #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
>  #define INTR_TYPE_PRIV_SW_EXCEPTION    (5 << 8) /* ICE breakpoint - undocumented */
>  #define INTR_TYPE_SOFT_EXCEPTION       (6 << 8) /* software exception */
> +#define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
>
>  /* GUEST_INTERRUPTIBILITY_INFO flags. */
>  #define GUEST_INTR_STATE_STI           0x00000001
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1510,6 +1510,17 @@ static inline unsigned nested_cpu_vmx_mi
>         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
>  }
>
> +static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
> +{
> +       return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
> +}
> +
> +static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
> +{
> +       return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
> +                       CPU_BASED_MONITOR_TRAP_FLAG;
> +}
> +
>  static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
>  {
>         return vmcs12->cpu_based_vm_exec_control & bit;
> @@ -11364,6 +11375,62 @@ static int check_vmentry_prereqs(struct
>             !nested_cr3_valid(vcpu, vmcs12->host_cr3))
>                 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
>
> +       /*
> +        * From the Intel SDM, volume 3:
> +        * Fields relevant to VM-entry event injection must be set properly.
> +        * These fields are the VM-entry interruption-information field, the
> +        * VM-entry exception error code, and the VM-entry instruction length.
> +        */
> +       if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
> +               u32 intr_info = vmcs12->vm_entry_intr_info_field;
> +               u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
> +               u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
> +               bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
> +               bool should_have_error_code;
> +               bool urg = nested_cpu_has2(vmcs12,
> +                                          SECONDARY_EXEC_UNRESTRICTED_GUEST);
> +               bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
> +
> +               /* VM-entry interruption-info field: interruption type */
> +               if (intr_type == INTR_TYPE_RESERVED ||
> +                   (intr_type == INTR_TYPE_OTHER_EVENT &&
> +                    !nested_cpu_supports_monitor_trap_flag(vcpu)))
> +                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
> +               /* VM-entry interruption-info field: vector */
> +               if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
> +                   (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
> +                   (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
> +                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
> +               /* VM-entry interruption-info field: deliver error code */
> +               should_have_error_code =
> +                       intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
> +                       x86_exception_has_error_code(vector);
> +               if (has_error_code != should_have_error_code)
> +                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
> +               /* VM-entry exception error code */
> +               if (has_error_code &&
> +                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
> +                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
> +               /* VM-entry interruption-info field: reserved bits */
> +               if (intr_info & INTR_INFO_RESVD_BITS_MASK)
> +                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +
> +               /* VM-entry instruction length */
> +               switch (intr_type) {
> +               case INTR_TYPE_SOFT_EXCEPTION:
> +               case INTR_TYPE_SOFT_INTR:
> +               case INTR_TYPE_PRIV_SW_EXCEPTION:
> +                       if ((vmcs12->vm_entry_instruction_len > 15) ||
> +                           (vmcs12->vm_entry_instruction_len == 0 &&
> +                            !nested_cpu_has_zero_length_injection(vcpu)))
> +                               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
> +               }
> +       }
> +
>         return 0;
>  }
>
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct k
>  #endif
>  }
>
> +static inline bool x86_exception_has_error_code(unsigned int vector)
> +{
> +       static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
> +                       BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
> +                       BIT(PF_VECTOR) | BIT(AC_VECTOR);
> +
> +       return (1U << vector) & exception_has_error_code;
> +}
> +
>  static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
>  {
>         return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
>
>

Reviewed-by: Marc Orr <marcorr@...gle.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ