[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <jpg618deb1e.fsf@redhat.com>
Date: Thu, 30 Apr 2015 14:17:01 -0400
From: Bandan Das <bsd@...hat.com>
To: Paolo Bonzini <pbonzini@...hat.com>
Cc: linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
rkrcmar@...hat.com
Subject: Re: [PATCH] KVM: x86: dump VMCS on invalid entry
Paolo Bonzini <pbonzini@...hat.com> writes:
> Code and format roughly based on Xen's vmcs_dump_vcpu.
Nice, but isn't it better to keep the output format and behavior the
same for both dump_vmcb and dump_vmcs() ? why not pr_error ?
Bandan
> Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/x86/kvm/vmx.c | 153 +++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 153 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 31a76ab38267..a0f5952ed0e9 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -7686,6 +7686,158 @@ static void kvm_flush_pml_buffers(struct kvm *kvm)
> kvm_vcpu_kick(vcpu);
> }
>
> +static void vmx_dump_sel(char *name, uint32_t sel)
> +{
> + pr_debug("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
> + name, vmcs_read32(sel),
> + vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
> + vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
> + vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
> +}
> +
> +static void vmx_dump_dtsel(char *name, uint32_t limit)
> +{
> + pr_debug("%s limit=0x%08x, base=0x%016lx\n",
> + name, vmcs_read32(limit),
> + vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
> +}
> +
> +static void dump_vmcs(void)
> +{
> + u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
> + u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
> + u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
> + u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
> + u32 secondary_exec_control = 0;
> + unsigned long cr4 = vmcs_readl(GUEST_CR4);
> + u64 efer = vmcs_readl(GUEST_IA32_EFER);
> + int i, n;
> +
> + if (cpu_has_secondary_exec_ctrls())
> + secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
> +
> + pr_debug("*** Guest State ***\n");
> + pr_debug("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
> + vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
> + vmcs_readl(CR0_GUEST_HOST_MASK));
> + pr_debug("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
> + cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
> + pr_debug("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
> + if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
> + (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
> + {
> + pr_debug("PDPTR0 = 0x%016lx PDPTR1 = 0x%016lx\n",
> + vmcs_readl(GUEST_PDPTR0), vmcs_readl(GUEST_PDPTR1));
> + pr_debug("PDPTR2 = 0x%016lx PDPTR3 = 0x%016lx\n",
> + vmcs_readl(GUEST_PDPTR2), vmcs_readl(GUEST_PDPTR3));
> + }
> + pr_debug("RSP = 0x%016lx RIP = 0x%016lx\n",
> + vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
> + pr_debug("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
> + vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
> + pr_debug("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
> + vmcs_readl(GUEST_SYSENTER_ESP),
> + vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
> + vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
> + vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
> + vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
> + vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
> + vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
> + vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
> + vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
> + vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
> + vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
> + vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
> + if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
> + (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
> + pr_debug("EFER = 0x%016llx PAT = 0x%016lx\n",
> + efer, vmcs_readl(GUEST_IA32_PAT));
> + pr_debug("DebugCtl = 0x%016lx DebugExceptions = 0x%016lx\n",
> + vmcs_readl(GUEST_IA32_DEBUGCTL),
> + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
> + if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
> + pr_debug("PerfGlobCtl = 0x%016lx\n",
> + vmcs_readl(GUEST_IA32_PERF_GLOBAL_CTRL));
> + if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
> + pr_debug("BndCfgS = 0x%016lx\n", vmcs_readl(GUEST_BNDCFGS));
> + pr_debug("Interruptibility = %08x ActivityState = %08x\n",
> + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
> + vmcs_read32(GUEST_ACTIVITY_STATE));
> + if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
> + pr_debug("InterruptStatus = %04x\n",
> + vmcs_read16(GUEST_INTR_STATUS));
> +
> + pr_debug("*** Host State ***\n");
> + pr_debug("RIP = 0x%016lx RSP = 0x%016lx\n",
> + vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
> + pr_debug("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
> + vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
> + vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
> + vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
> + vmcs_read16(HOST_TR_SELECTOR));
> + pr_debug("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
> + vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
> + vmcs_readl(HOST_TR_BASE));
> + pr_debug("GDTBase=%016lx IDTBase=%016lx\n",
> + vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
> + pr_debug("CR0=%016lx CR3=%016lx CR4=%016lx\n",
> + vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
> + vmcs_readl(HOST_CR4));
> + pr_debug("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
> + vmcs_readl(HOST_IA32_SYSENTER_ESP),
> + vmcs_read32(HOST_IA32_SYSENTER_CS),
> + vmcs_readl(HOST_IA32_SYSENTER_EIP));
> + if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
> + pr_debug("EFER = 0x%016lx PAT = 0x%016lx\n",
> + vmcs_readl(HOST_IA32_EFER), vmcs_readl(HOST_IA32_PAT));
> + if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
> + pr_debug("PerfGlobCtl = 0x%016lx\n",
> + vmcs_readl(HOST_IA32_PERF_GLOBAL_CTRL));
> +
> + pr_debug("*** Control State ***\n");
> + pr_debug("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
> + pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
> + pr_debug("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
> + pr_debug("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
> + vmcs_read32(EXCEPTION_BITMAP),
> + vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
> + vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
> + pr_debug("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
> + vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
> + vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
> + vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
> + pr_debug("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
> + vmcs_read32(VM_EXIT_INTR_INFO),
> + vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
> + vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
> + pr_debug(" reason=%08x qualification=%016lx\n",
> + vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
> + pr_debug("IDTVectoring: info=%08x errcode=%08x\n",
> + vmcs_read32(IDT_VECTORING_INFO_FIELD),
> + vmcs_read32(IDT_VECTORING_ERROR_CODE));
> + pr_debug("TSC Offset = 0x%016lx\n", vmcs_readl(TSC_OFFSET));
> + if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
> + pr_debug("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
> + if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
> + pr_debug("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
> + if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
> + pr_debug("EPT pointer = 0x%016lx\n", vmcs_readl(EPT_POINTER));
> + n = vmcs_read32(CR3_TARGET_COUNT);
> + for (i = 0; i + 1 < n; i += 4)
> + pr_debug("CR3 target%u=%016lx target%u=%016lx\n",
> + i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
> + i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
> + if (i < n)
> + pr_debug("CR3 target%u=%016lx\n",
> + i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
> + if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
> + pr_debug("PLE Gap=%08x Window=%08x\n",
> + vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
> + if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
> + pr_debug("Virtual processor ID = 0x%04x\n",
> + vmcs_read16(VIRTUAL_PROCESSOR_ID));
> +}
> +
> /*
> * The guest has exited. See if we can fix it or if we need userspace
> * assistance.
> @@ -7718,6 +7870,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
> }
>
> if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
> + dump_vmcs();
> vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
> vcpu->run->fail_entry.hardware_entry_failure_reason
> = exit_reason;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists