[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170518173732.12185-2-rkrcmar@redhat.com>
Date: Thu, 18 May 2017 19:37:29 +0200
From: Radim Krčmář <rkrcmar@...hat.com>
To: linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Dan Carpenter <dan.carpenter@...cle.com>
Subject: [PATCH 1/4] KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
kvm_skip_emulated_instruction() will return 0 if userspace is
single-stepping the guest.
kvm_skip_emulated_instruction() uses return status convention of exit
handler: 0 means "exit to userspace" and 1 means "continue vm entries".
The problem is that nested_vmx_check_vmptr() return status means
something else: 0 is ok, 1 is error.
This means we would continue executing after a failure. Static checker
noticed it because vmptr was not initialized.
Reported-by: Dan Carpenter <dan.carpenter@...cle.com>
Fixes: 6affcbedcac7 ("KVM: x86: Add kvm_skip_emulated_instruction and use it.")
Signed-off-by: Radim Krčmář <rkrcmar@...hat.com>
---
TODO: add enum for exit handler return states.
---
arch/x86/kvm/vmx.c | 31 ++++++++++++++++++-------------
1 file changed, 18 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9ff1b04502c9..35e058ddd97a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6955,19 +6955,19 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
nested_release_page_clean(page);
nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
kunmap(page);
nested_release_page_clean(page);
@@ -6977,26 +6977,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
break;
case EXIT_REASON_VMPTRLD:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
+ return 2;
}
break;
default:
@@ -7095,8 +7095,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
- return 1;
+ ret = nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL);
+ if (ret)
+ return ret == 1 ? 1 : kvm_skip_emulated_instruction(vcpu);
ret = enter_vmx_operation(vcpu);
if (ret)
@@ -7209,12 +7210,14 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 zero = 0;
gpa_t vmptr;
+ int ret;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
- return 1;
+ ret = nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr);
+ if (ret)
+ return ret == 1 ? 1 : kvm_skip_emulated_instruction(vcpu);
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx);
@@ -7541,12 +7544,14 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
+ int ret;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
- return 1;
+ ret = nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr);
+ if (ret)
+ return ret == 1 ? 1 : kvm_skip_emulated_instruction(vcpu);
if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12;
--
2.13.0
Powered by blists - more mailing lists