[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9bb705c8-101d-5f20-bffd-ccd44cbaf663@redhat.com>
Date: Thu, 18 Feb 2021 13:45:37 +0100
From: Paolo Bonzini <pbonzini@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, Ben Gardon <bgardon@...gle.com>,
Makarand Sonare <makarandsonare@...gle.com>
Subject: Re: [PATCH 08/14] KVM: x86/mmu: Make dirty log size hook (PML) a
value, not a function
On 13/02/21 01:50, Sean Christopherson wrote:
> Store the vendor-specific dirty log size in a variable, there's no need
> to wrap it in a function since the value is constant after
> hardware_setup() runs.
For now... :)
Paolo
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/include/asm/kvm-x86-ops.h | 1 -
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu/mmu.c | 5 +----
> arch/x86/kvm/vmx/vmx.c | 9 ++-------
> 4 files changed, 4 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 355a2ab8fc09..28c07cc01474 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -97,7 +97,6 @@ KVM_X86_OP_NULL(slot_enable_log_dirty)
> KVM_X86_OP_NULL(slot_disable_log_dirty)
> KVM_X86_OP_NULL(flush_log_dirty)
> KVM_X86_OP_NULL(enable_log_dirty_pt_masked)
> -KVM_X86_OP_NULL(cpu_dirty_log_size)
> KVM_X86_OP_NULL(pre_block)
> KVM_X86_OP_NULL(post_block)
> KVM_X86_OP_NULL(vcpu_blocking)
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 84499aad01a4..fb59933610d9 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1294,7 +1294,7 @@ struct kvm_x86_ops {
> void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
> struct kvm_memory_slot *slot,
> gfn_t offset, unsigned long mask);
> - int (*cpu_dirty_log_size)(void);
> + int cpu_dirty_log_size;
>
> /* pmu operations of sub-arch */
> const struct kvm_pmu_ops *pmu_ops;
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index d5849a0e3de1..6c32e8e0f720 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1294,10 +1294,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>
> int kvm_cpu_dirty_log_size(void)
> {
> - if (kvm_x86_ops.cpu_dirty_log_size)
> - return static_call(kvm_x86_cpu_dirty_log_size)();
> -
> - return 0;
> + return kvm_x86_ops.cpu_dirty_log_size;
> }
>
> bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index b47ed3f412ef..f843707dd7df 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7650,11 +7650,6 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit)
> return supported & BIT(bit);
> }
>
> -static int vmx_cpu_dirty_log_size(void)
> -{
> - return enable_pml ? PML_ENTITY_NUM : 0;
> -}
> -
> static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .hardware_unsetup = hardware_unsetup,
>
> @@ -7758,6 +7753,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
> .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
> .flush_log_dirty = vmx_flush_log_dirty,
> .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
> + .cpu_dirty_log_size = PML_ENTITY_NUM,
>
> .pre_block = vmx_pre_block,
> .post_block = vmx_post_block,
> @@ -7785,7 +7781,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
>
> .msr_filter_changed = vmx_msr_filter_changed,
> .complete_emulated_msr = kvm_complete_insn_gp,
> - .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
>
> .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
> };
> @@ -7907,7 +7902,7 @@ static __init int hardware_setup(void)
> vmx_x86_ops.slot_disable_log_dirty = NULL;
> vmx_x86_ops.flush_log_dirty = NULL;
> vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
> - vmx_x86_ops.cpu_dirty_log_size = NULL;
> + vmx_x86_ops.cpu_dirty_log_size = 0;
> }
>
> if (!cpu_has_vmx_preemption_timer())
>
Powered by blists - more mailing lists