[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <d19cadaf-286e-0f6e-2067-2ea77429fbfa@redhat.com>
Date: Tue, 23 Jun 2020 00:26:27 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Sean Christopherson <sean.j.christopherson@...el.com>
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 4/4] KVM: x86/mmu: Make .write_log_dirty a nested
operation
On 22/06/20 23:58, Sean Christopherson wrote:
> Move .write_log_dirty() into kvm_x86_nested_ops to help differentiate it
> from the non-nested dirty log hooks. And because it's a nested-only
> operation.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
> ---
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu/paging_tmpl.h | 2 +-
> arch/x86/kvm/vmx/nested.c | 38 +++++++++++++++++++++++++++++++++
> arch/x86/kvm/vmx/vmx.c | 38 ---------------------------------
> 4 files changed, 40 insertions(+), 40 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 446ea70a554d..4e6219cb3933 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1220,7 +1220,6 @@ struct kvm_x86_ops {
> void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
> struct kvm_memory_slot *slot,
> gfn_t offset, unsigned long mask);
> - int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
>
> /* pmu operations of sub-arch */
> const struct kvm_pmu_ops *pmu_ops;
> @@ -1281,6 +1280,7 @@ struct kvm_x86_nested_ops {
> struct kvm_nested_state __user *user_kvm_nested_state,
> struct kvm_nested_state *kvm_state);
> bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
> + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
>
> int (*enable_evmcs)(struct kvm_vcpu *vcpu,
> uint16_t *vmcs_version);
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 60e7b2308876..c733196fd45b 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
> !(pte & PT_GUEST_DIRTY_MASK)) {
> trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
> #if PTTYPE == PTTYPE_EPT
> - if (kvm_x86_ops.write_log_dirty(vcpu, addr))
> + if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
> return -EINVAL;
> #endif
> pte |= PT_GUEST_DIRTY_MASK;
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index adb11b504d5c..db9abcbeefd1 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -3205,6 +3205,43 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
> return true;
> }
>
> +static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
> +{
> + struct vmcs12 *vmcs12;
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> + gpa_t dst;
> +
> + if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
> + return 0;
> +
> + if (WARN_ON_ONCE(vmx->nested.pml_full))
> + return 1;
> +
> + /*
> + * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
> + * set is already checked as part of A/D emulation.
> + */
> + vmcs12 = get_vmcs12(vcpu);
> + if (!nested_cpu_has_pml(vmcs12))
> + return 0;
> +
> + if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
> + vmx->nested.pml_full = true;
> + return 1;
> + }
> +
> + gpa &= ~0xFFFull;
> + dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
> +
> + if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
> + offset_in_page(dst), sizeof(gpa)))
> + return 0;
> +
> + vmcs12->guest_pml_index--;
> +
> + return 0;
> +}
> +
> /*
> * Intel's VMX Instruction Reference specifies a common set of prerequisites
> * for running VMX instructions (except VMXON, whose prerequisites are
> @@ -6503,6 +6540,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
> .get_state = vmx_get_nested_state,
> .set_state = vmx_set_nested_state,
> .get_vmcs12_pages = nested_get_vmcs12_pages,
> + .write_log_dirty = nested_vmx_write_pml_buffer,
> .enable_evmcs = nested_enable_evmcs,
> .get_evmcs_version = nested_get_evmcs_version,
> };
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index adf83047bb21..8bf06a59f356 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7501,43 +7501,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
> kvm_flush_pml_buffers(kvm);
> }
>
> -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
> -{
> - struct vmcs12 *vmcs12;
> - struct vcpu_vmx *vmx = to_vmx(vcpu);
> - gpa_t dst;
> -
> - if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
> - return 0;
> -
> - if (WARN_ON_ONCE(vmx->nested.pml_full))
> - return 1;
> -
> - /*
> - * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
> - * set is already checked as part of A/D emulation.
> - */
> - vmcs12 = get_vmcs12(vcpu);
> - if (!nested_cpu_has_pml(vmcs12))
> - return 0;
> -
> - if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
> - vmx->nested.pml_full = true;
> - return 1;
> - }
> -
> - gpa &= ~0xFFFull;
> - dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
> -
> - if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
> - offset_in_page(dst), sizeof(gpa)))
> - return 0;
> -
> - vmcs12->guest_pml_index--;
> -
> - return 0;
> -}
> -
> static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
> struct kvm_memory_slot *memslot,
> gfn_t offset, unsigned long mask)
> @@ -7966,7 +7929,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
> .flush_log_dirty = vmx_flush_log_dirty,
> .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
> - .write_log_dirty = vmx_write_pml_buffer,
>
> .pre_block = vmx_pre_block,
> .post_block = vmx_post_block,
>
Queued, thanks (patch 1 for 5.8).
Paolo
Powered by blists - more mailing lists