[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTwuXT_wcDAOwwKP+yBetE9N46QMb+hUKAOsxBVkkOgCTw@mail.gmail.com>
Date: Thu, 31 Jul 2025 09:15:10 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>, Marc Zyngier <maz@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>, kvm@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, Ira Weiny <ira.weiny@...el.com>,
Gavin Shan <gshan@...hat.com>, Shivank Garg <shivankg@....com>, Vlastimil Babka <vbabka@...e.cz>,
Xiaoyao Li <xiaoyao.li@...el.com>, David Hildenbrand <david@...hat.com>,
Ackerley Tng <ackerleytng@...gle.com>, Tao Chan <chentao@...inos.cn>,
James Houghton <jthoughton@...gle.com>
Subject: Re: [PATCH v17 12/24] KVM: x86/mmu: Rename .private_max_mapping_level()
to .gmem_max_mapping_level()
On Tue, 29 Jul 2025 at 23:55, Sean Christopherson <seanjc@...gle.com> wrote:
>
> From: Ackerley Tng <ackerleytng@...gle.com>
>
> Rename kvm_x86_ops.private_max_mapping_level() to .gmem_max_mapping_level()
> in anticipation of extending guest_memfd support to non-private memory.
>
> No functional change intended.
>
> Reviewed-by: Xiaoyao Li <xiaoyao.li@...el.com>
> Acked-by: David Hildenbrand <david@...hat.com>
> Signed-off-by: Ackerley Tng <ackerleytng@...gle.com>
> Signed-off-by: Fuad Tabba <tabba@...gle.com>
> Co-developed-by: Sean Christopherson <seanjc@...gle.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
nit: remove my "Signed-off-by", since I'm not a co-developer, and instead:
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Cheers,
/fuad
> arch/x86/include/asm/kvm-x86-ops.h | 2 +-
> arch/x86/include/asm/kvm_host.h | 2 +-
> arch/x86/kvm/mmu/mmu.c | 2 +-
> arch/x86/kvm/svm/sev.c | 2 +-
> arch/x86/kvm/svm/svm.c | 2 +-
> arch/x86/kvm/svm/svm.h | 4 ++--
> arch/x86/kvm/vmx/main.c | 6 +++---
> arch/x86/kvm/vmx/tdx.c | 2 +-
> arch/x86/kvm/vmx/x86_ops.h | 2 +-
> 9 files changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 18a5c3119e1a..62c3e4de3303 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -145,7 +145,7 @@ KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> KVM_X86_OP_OPTIONAL(get_untagged_addr)
> KVM_X86_OP_OPTIONAL(alloc_apic_backing_page)
> KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
> -KVM_X86_OP_OPTIONAL_RET0(private_max_mapping_level)
> +KVM_X86_OP_OPTIONAL_RET0(gmem_max_mapping_level)
> KVM_X86_OP_OPTIONAL(gmem_invalidate)
>
> #undef KVM_X86_OP
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 50366a1ca192..c0a739bf3829 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1922,7 +1922,7 @@ struct kvm_x86_ops {
> void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
> int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
> void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
> - int (*private_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
> + int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn);
> };
>
> struct kvm_x86_nested_ops {
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index fdc2824755ee..b735611e8fcd 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4532,7 +4532,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
> if (max_level == PG_LEVEL_4K)
> return PG_LEVEL_4K;
>
> - req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
> + req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn);
> if (req_max_level)
> max_level = min(max_level, req_max_level);
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 7744c210f947..be1c80d79331 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -4947,7 +4947,7 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
> }
> }
>
> -int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> +int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> {
> int level, rc;
> bool assigned;
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index d9931c6c4bc6..8a66e2e985a4 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5180,7 +5180,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>
> .gmem_prepare = sev_gmem_prepare,
> .gmem_invalidate = sev_gmem_invalidate,
> - .private_max_mapping_level = sev_private_max_mapping_level,
> + .gmem_max_mapping_level = sev_gmem_max_mapping_level,
> };
>
> /*
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 58b9d168e0c8..d84a83ae18a1 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -866,7 +866,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
> void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
> int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
> void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
> -int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
> +int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
> struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
> void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
> #else
> @@ -895,7 +895,7 @@ static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, in
> return 0;
> }
> static inline void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) {}
> -static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> +static inline int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> {
> return 0;
> }
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index dbab1c15b0cd..dd7687ef7e2d 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -831,10 +831,10 @@ static int vt_vcpu_mem_enc_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> return tdx_vcpu_ioctl(vcpu, argp);
> }
>
> -static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> +static int vt_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> {
> if (is_td(kvm))
> - return tdx_gmem_private_max_mapping_level(kvm, pfn);
> + return tdx_gmem_max_mapping_level(kvm, pfn);
>
> return 0;
> }
> @@ -1005,7 +1005,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
> .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl),
> .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl),
>
> - .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level)
> + .gmem_max_mapping_level = vt_op_tdx_only(gmem_max_mapping_level)
> };
>
> struct kvm_x86_init_ops vt_init_ops __initdata = {
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 66744f5768c8..b444714e8e8a 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -3318,7 +3318,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp)
> return ret;
> }
>
> -int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> +int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
> {
> return PG_LEVEL_4K;
> }
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index 2b3424f638db..6037d1708485 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -153,7 +153,7 @@ int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
> void tdx_flush_tlb_current(struct kvm_vcpu *vcpu);
> void tdx_flush_tlb_all(struct kvm_vcpu *vcpu);
> void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
> -int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
> +int tdx_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
> #endif
>
> #endif /* __KVM_X86_VMX_X86_OPS_H */
> --
> 2.50.1.552.g942d659e1b-goog
>
Powered by blists - more mailing lists