[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTymOVqvWYfMxS=NAa1HuhfGj+aN=zPtTUTephmPbOqNzA@mail.gmail.com>
Date: Thu, 31 Jul 2025 09:06:00 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>, Marc Zyngier <maz@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>, kvm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, Ira Weiny <ira.weiny@...el.com>,
Gavin Shan <gshan@...hat.com>, Shivank Garg <shivankg@....com>, Vlastimil Babka <vbabka@...e.cz>,
Xiaoyao Li <xiaoyao.li@...el.com>, David Hildenbrand <david@...hat.com>,
Ackerley Tng <ackerleytng@...gle.com>, Tao Chan <chentao@...inos.cn>,
James Houghton <jthoughton@...gle.com>
Subject: Re: [PATCH v17 13/24] KVM: x86/mmu: Hoist guest_memfd max level/order
helpers "up" in mmu.c
On Tue, 29 Jul 2025 at 23:55, Sean Christopherson <seanjc@...gle.com> wrote:
>
> Move kvm_max_level_for_order() and kvm_max_private_mapping_level() up in
> mmu.c so that they can be used by __kvm_mmu_max_mapping_level().
>
> Opportunistically drop the "inline" from kvm_max_level_for_order().
>
> No functional change intended.
>
> Reviewed-by: Xiaoyao Li <xiaoyao.li@...el.com>
> Reviewed-by: Ackerley Tng <ackerleytng@...gle.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Cheers,
/fuad
> arch/x86/kvm/mmu/mmu.c | 72 +++++++++++++++++++++---------------------
> 1 file changed, 36 insertions(+), 36 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index b735611e8fcd..20dd9f64156e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3285,6 +3285,42 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
> return level;
> }
>
> +static u8 kvm_max_level_for_order(int order)
> +{
> + BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> +
> + KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
> + order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
> + order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
> +
> + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> + return PG_LEVEL_1G;
> +
> + if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> + return PG_LEVEL_2M;
> +
> + return PG_LEVEL_4K;
> +}
> +
> +static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
> + u8 max_level, int gmem_order)
> +{
> + u8 req_max_level;
> +
> + if (max_level == PG_LEVEL_4K)
> + return PG_LEVEL_4K;
> +
> + max_level = min(kvm_max_level_for_order(gmem_order), max_level);
> + if (max_level == PG_LEVEL_4K)
> + return PG_LEVEL_4K;
> +
> + req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn);
> + if (req_max_level)
> + max_level = min(max_level, req_max_level);
> +
> + return max_level;
> +}
> +
> static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
> const struct kvm_memory_slot *slot,
> gfn_t gfn, int max_level, bool is_private)
> @@ -4503,42 +4539,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> vcpu->stat.pf_fixed++;
> }
>
> -static inline u8 kvm_max_level_for_order(int order)
> -{
> - BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
> -
> - KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
> - order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
> - order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
> -
> - if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
> - return PG_LEVEL_1G;
> -
> - if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
> - return PG_LEVEL_2M;
> -
> - return PG_LEVEL_4K;
> -}
> -
> -static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
> - u8 max_level, int gmem_order)
> -{
> - u8 req_max_level;
> -
> - if (max_level == PG_LEVEL_4K)
> - return PG_LEVEL_4K;
> -
> - max_level = min(kvm_max_level_for_order(gmem_order), max_level);
> - if (max_level == PG_LEVEL_4K)
> - return PG_LEVEL_4K;
> -
> - req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn);
> - if (req_max_level)
> - max_level = min(max_level, req_max_level);
> -
> - return max_level;
> -}
> -
> static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
> struct kvm_page_fault *fault, int r)
> {
> --
> 2.50.1.552.g942d659e1b-goog
>
Powered by blists - more mailing lists