[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <560A6A54.1020504@redhat.com>
Date: Tue, 29 Sep 2015 12:39:16 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Wanpeng Li <wanpeng.li@...mail.com>
Cc: Jan Kiszka <jan.kiszka@...mens.com>, Bandan Das <bsd@...hat.com>,
Wincy Van <fanwenyi0529@...il.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] KVM: nVMX: expose VPID capability to L1
On 29/09/2015 04:55, Wanpeng Li wrote:
> Expose VPID capability to L1.
>
> Signed-off-by: Wanpeng Li <wanpeng.li@...mail.com>
> ---
> v1 -> v2:
> * set only VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT
Thanks. I've checked more thoroughly your implementation against the
SDM now, and there are a few missing things between this patch and the
one that emulates INVVPID:
- you're not setting bit 32 of the VMX_EPT_VPID_CAP MSR
- you were not checking against the supported types in the
implementation of the INVVPID instruction
- the memory operand must always be read even if it isn't needed (e.g.,
for type==global), similar to INVEPT
- for single-context invalidation you're not checking that VPID != 0,
though in practice that doesn't matter because we don't want to support
single-context invalidation
- you're always setting the MSR's bits to 1 even if !enable_vpid
At this point it's better if you resend the whole nested VPID
implementation, i.e. the following five patches:
KVM: VMX: adjust interface to allocate/free_vpid
KVM: VMX: introduce __vmx_flush_tlb to handle specific vpid
KVM: nVMX: emulate the INVVPID instruction
KVM: nVMX: nested VPID emulation
KVM: nVMX: expose VPID capability to L1
with the above issues fixed. Please also send kvm-unit-tests patches
that tests for the error cases.
Thanks,
Paolo
> arch/x86/kvm/vmx.c | 22 +++++++++++-----------
> 1 file changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 75f3ee0..c4ea890 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -442,7 +442,7 @@ struct nested_vmx {
> u32 nested_vmx_true_entry_ctls_low;
> u32 nested_vmx_misc_low;
> u32 nested_vmx_misc_high;
> - u32 nested_vmx_ept_caps;
> + u64 nested_vmx_ept_vpid_caps;
> };
>
> #define POSTED_INTR_ON 0
> @@ -2485,22 +2485,23 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
> SECONDARY_EXEC_WBINVD_EXITING |
> SECONDARY_EXEC_XSAVES;
>
> - if (enable_ept) {
> + if (enable_ept | enable_vpid) {
> /* nested EPT: emulate EPT also to L1 */
> vmx->nested.nested_vmx_secondary_ctls_high |=
> SECONDARY_EXEC_ENABLE_EPT;
> - vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
> + vmx->nested.nested_vmx_ept_vpid_caps = VMX_EPT_PAGE_WALK_4_BIT |
> VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
> VMX_EPT_INVEPT_BIT;
> - vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
> + vmx->nested.nested_vmx_ept_vpid_caps &= vmx_capability.ept;
> /*
> * For nested guests, we don't do anything specific
> * for single context invalidation. Hence, only advertise
> * support for global context invalidation.
> */
> - vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
> + vmx->nested.nested_vmx_ept_vpid_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
> + vmx->nested.nested_vmx_ept_vpid_caps |= VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT << 32;
> } else
> - vmx->nested.nested_vmx_ept_caps = 0;
> + vmx->nested.nested_vmx_ept_vpid_caps = 0;
>
> if (enable_unrestricted_guest)
> vmx->nested.nested_vmx_secondary_ctls_high |=
> @@ -2616,8 +2617,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
> vmx->nested.nested_vmx_secondary_ctls_high);
> break;
> case MSR_IA32_VMX_EPT_VPID_CAP:
> - /* Currently, no nested vpid support */
> - *pdata = vmx->nested.nested_vmx_ept_caps;
> + *pdata = vmx->nested.nested_vmx_ept_vpid_caps;
> break;
> default:
> return 1;
> @@ -7142,7 +7142,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
>
> if (!(vmx->nested.nested_vmx_secondary_ctls_high &
> SECONDARY_EXEC_ENABLE_EPT) ||
> - !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
> + !(vmx->nested.nested_vmx_ept_vpid_caps & VMX_EPT_INVEPT_BIT)) {
> kvm_queue_exception(vcpu, UD_VECTOR);
> return 1;
> }
> @@ -7158,7 +7158,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
> vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
> type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
>
> - types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
> + types = (vmx->nested.nested_vmx_ept_vpid_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
>
> if (!(types & (1UL << type))) {
> nested_vmx_failValid(vcpu,
> @@ -8763,7 +8763,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
> {
> WARN_ON(mmu_is_nested(vcpu));
> kvm_init_shadow_ept_mmu(vcpu,
> - to_vmx(vcpu)->nested.nested_vmx_ept_caps &
> + to_vmx(vcpu)->nested.nested_vmx_ept_vpid_caps &
> VMX_EPT_EXECUTE_ONLY_BIT);
> vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
> vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists