[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <55F93F00.9030408@redhat.com>
Date: Wed, 16 Sep 2015 12:05:52 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Wanpeng Li <wanpeng.li@...mail.com>
Cc: Jan Kiszka <jan.kiszka@...mens.com>, Bandan Das <bsd@...hat.com>,
Wincy Van <fanwenyi0529@...il.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 2/2] KVM: nVMX: nested VPID emulation
On 16/09/2015 11:30, Wanpeng Li wrote:
> VPID is used to tag address space and avoid a TLB flush. Currently L0 use
> the same VPID to run L1 and all its guests. KVM flushes VPID when switching
> between L1 and L2.
>
> This patch advertises VPID to the L1 hypervisor, then address space of L1
> and L2 can be separately treated and avoid TLB flush when swithing between
> L1 and L2. For each nested vmentry, if vpid12 is changed, reuse shadow vpid
> w/ an invvpid.
>
> Performance:
>
> run lmbench on L2 w/ 3.5 kernel.
>
> Context switching - times in microseconds - smaller is better
> -------------------------------------------------------------------------
> Host OS 2p/0K 2p/16K 2p/64K 8p/16K 8p/64K 16p/16K 16p/64K
> ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw ctxsw
> --------- ------------- ------ ------ ------ ------ ------ ------- -------
> kernel Linux 3.5.0-1 1.2200 1.3700 1.4500 4.7800 2.3300 5.60000 2.88000 nested VPID
> kernel Linux 3.5.0-1 1.2600 1.4300 1.5600 12.7 12.9 3.49000 7.46000 vanilla
>
> Reviewed-by: Jan Kiszka <jan.kiszka@...mens.com>
> Suggested-by: Wincy Van <fanwenyi0529@...il.com>
> Signed-off-by: Wanpeng Li <wanpeng.li@...mail.com>
> ---
> arch/x86/kvm/vmx.c | 37 +++++++++++++++++++++++++++++++------
> 1 file changed, 31 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index f8d704d..c23482c 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -424,6 +424,9 @@ struct nested_vmx {
> /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
> u64 vmcs01_debugctl;
>
> + u16 vpid02;
> + u16 last_vpid;
> +
> u32 nested_vmx_procbased_ctls_low;
> u32 nested_vmx_procbased_ctls_high;
> u32 nested_vmx_true_procbased_ctls_low;
> @@ -1155,6 +1158,11 @@ static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
> return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
> }
>
> +static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
> +{
> + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
> +}
> +
> static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
> {
> return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
> @@ -2469,6 +2477,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
> SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
> SECONDARY_EXEC_RDTSCP |
> SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
> + SECONDARY_EXEC_ENABLE_VPID |
> SECONDARY_EXEC_APIC_REGISTER_VIRT |
> SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
> SECONDARY_EXEC_WBINVD_EXITING |
> @@ -6663,6 +6672,7 @@ static void free_nested(struct vcpu_vmx *vmx)
> return;
>
> vmx->nested.vmxon = false;
> + free_vpid(vmx->nested.vpid02);
> nested_release_vmcs12(vmx);
> if (enable_shadow_vmcs)
> free_vmcs(vmx->nested.current_shadow_vmcs);
> @@ -8548,8 +8558,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
> goto free_vmcs;
> }
>
> - if (nested)
> + if (nested) {
> nested_vmx_setup_ctls_msrs(vmx);
> + vmx->nested.vpid02 = allocate_vpid();
> + }
>
> vmx->nested.posted_intr_nv = -1;
> vmx->nested.current_vmptr = -1ull;
> @@ -8570,6 +8582,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
> return &vmx->vcpu;
>
> free_vmcs:
> + free_vpid(vmx->nested.vpid02);
> free_loaded_vmcs(vmx->loaded_vmcs);
> free_msrs:
> kfree(vmx->guest_msrs);
> @@ -9445,12 +9458,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>
> if (enable_vpid) {
> /*
> - * Trivially support vpid by letting L2s share their parent
> - * L1's vpid. TODO: move to a more elaborate solution, giving
> - * each L2 its own vpid and exposing the vpid feature to L1.
> + * There is no direct mapping between vpid02 and vpid12, the
> + * vpid02 is per-vCPU for L0 and reused while the value of
> + * vpid12 is changed w/ one invvpid during nested vmentry.
> + * The vpid12 is allocated by L1 for L2, so it will not
> + * influence global bitmap(for vpid01 and vpid02 allocation)
> + * even if spawn a lot of nested vCPUs.
> */
> - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
> - vmx_flush_tlb(vcpu);
> + if (nested_cpu_has_vpid(vmcs12)) {
> + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
> + if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
> + vmx->nested.last_vpid = vmcs12->virtual_processor_id;
> + vmx_flush_tlb(vcpu);
> + }
> + } else {
> + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
> + vmx_flush_tlb(vcpu);
> + }
> +
> }
>
> if (nested_cpu_has_ept(vmcs12)) {
>
Applying to kvm/queue, thanks.
Paolo
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists