lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALMp9eTBs=deSYu1=CMLwZcO8HTpGM2JsgDxvFR1Y220tdUQ3w@mail.gmail.com>
Date:   Mon, 27 Apr 2020 17:28:54 -0700
From:   Jim Mattson <jmattson@...gle.com>
To:     Paolo Bonzini <pbonzini@...hat.com>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        kvm list <kvm@...r.kernel.org>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        Joerg Roedel <joro@...tes.org>, everdox@...il.com
Subject: Re: [PATCH] KVM: x86: handle wrap around 32-bit address space

On Mon, Apr 27, 2020 at 9:59 AM Paolo Bonzini <pbonzini@...hat.com> wrote:
>
> KVM is not handling the case where EIP wraps around the 32-bit address
> space (that is, outside long mode).  This is needed both in vmx.c
> and in emulate.c.  SVM with NRIPS is okay, but it can still print
> an error to dmesg due to integer overflow.
>
> Reported-by: Nick Peterson <everdox@...il.com>
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
>  arch/x86/kvm/emulate.c |  2 ++
>  arch/x86/kvm/svm/svm.c |  3 ---
>  arch/x86/kvm/vmx/vmx.c | 15 ++++++++++++---
>  3 files changed, 14 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index bddaba9c68dd..de5476f8683e 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -5798,6 +5798,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
>         }
>
>         ctxt->eip = ctxt->_eip;
> +       if (ctxt->mode != X86EMUL_MODE_PROT64)
> +               ctxt->eip = (u32)ctxt->_eip;
>
>  done:
>         if (rc == X86EMUL_PROPAGATE_FAULT) {
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 8f8fc65bfa3e..d5e72b22bc87 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -319,9 +319,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
>                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
>                         return 0;
>         } else {
> -               if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
> -                       pr_err("%s: ip 0x%lx next 0x%llx\n",
> -                              __func__, kvm_rip_read(vcpu), svm->next_rip);
>                 kvm_rip_write(vcpu, svm->next_rip);
>         }
>         svm_set_interrupt_shadow(vcpu, 0);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 3ab6ca6062ce..ed1ffc8a727b 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1556,7 +1556,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
>
>  static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
>  {
> -       unsigned long rip;
> +       unsigned long rip, orig_rip;
>
>         /*
>          * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
> @@ -1568,8 +1568,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
>          */
>         if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
>             to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
> -               rip = kvm_rip_read(vcpu);
> -               rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
> +               orig_rip = kvm_rip_read(vcpu);
> +               rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
> +#ifdef CONFIG_X86_64
> +               /*
> +                * We need to mask out the high 32 bits of RIP if not in 64-bit
> +                * mode, but just finding out that we are in 64-bit mode is
> +                * quite expensive.  Only do it if there was a carry.
> +                */
> +               if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))

Is it actually possible to wrap around 0 without getting a segment
limit violation, or is it only possible to wrap *to* 0 (i.e. rip==1ull
<< 32)?

> +                       rip = (u32)rip;
> +#endif
>                 kvm_rip_write(vcpu, rip);
>         } else {
>                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
> --
> 2.18.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ