[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aPcG3LMA0qX5H5YI@yzhao56-desk.sh.intel.com>
Date: Tue, 21 Oct 2025 12:06:52 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: Sean Christopherson <seanjc@...gle.com>
CC: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Tianrui Zhao <zhaotianrui@...ngson.cn>, Bibo Mao <maobibo@...ngson.cn>,
Huacai Chen <chenhuacai@...nel.org>, Madhavan Srinivasan
<maddy@...ux.ibm.com>, Anup Patel <anup@...infault.org>, Paul Walmsley
<pjw@...nel.org>, Palmer Dabbelt <palmer@...belt.com>, Albert Ou
<aou@...s.berkeley.edu>, Christian Borntraeger <borntraeger@...ux.ibm.com>,
Janosch Frank <frankja@...ux.ibm.com>, Claudio Imbrenda
<imbrenda@...ux.ibm.com>, Paolo Bonzini <pbonzini@...hat.com>, "Kirill A.
Shutemov" <kas@...nel.org>, <linux-arm-kernel@...ts.infradead.org>,
<kvmarm@...ts.linux.dev>, <kvm@...r.kernel.org>, <loongarch@...ts.linux.dev>,
<linux-mips@...r.kernel.org>, <linuxppc-dev@...ts.ozlabs.org>,
<kvm-riscv@...ts.infradead.org>, <linux-riscv@...ts.infradead.org>,
<x86@...nel.org>, <linux-coco@...ts.linux.dev>,
<linux-kernel@...r.kernel.org>, Ira Weiny <ira.weiny@...el.com>, Kai Huang
<kai.huang@...el.com>, Michael Roth <michael.roth@....com>, Vishal Annapurve
<vannapurve@...gle.com>, Rick Edgecombe <rick.p.edgecombe@...el.com>,
Ackerley Tng <ackerleytng@...gle.com>, Binbin Wu <binbin.wu@...ux.intel.com>
Subject: Re: [PATCH v3 04/25] KVM: x86/mmu: Add dedicated API to map
guest_memfd pfn into TDP MMU
On Thu, Oct 16, 2025 at 05:32:22PM -0700, Sean Christopherson wrote:
> Add and use a new API for mapping a private pfn from guest_memfd into the
> TDP MMU from TDX's post-populate hook instead of partially open-coding the
> functionality into the TDX code. Sharing code with the pre-fault path
> sounded good on paper, but it's fatally flawed as simulating a fault loses
> the pfn, and calling back into gmem to re-retrieve the pfn creates locking
> problems, e.g. kvm_gmem_populate() already holds the gmem invalidation
> lock.
>
> Providing a dedicated API will also removing several MMU exports that
> ideally would not be exposed outside of the MMU, let alone to vendor code.
> On that topic, opportunistically drop the kvm_mmu_load() export. Leave
> kvm_tdp_mmu_gpa_is_mapped() alone for now; the entire commit that added
> kvm_tdp_mmu_gpa_is_mapped() will be removed in the near future.
>
> Cc: Michael Roth <michael.roth@....com>
> Cc: Yan Zhao <yan.y.zhao@...el.com>
> Cc: Ira Weiny <ira.weiny@...el.com>
> Cc: Vishal Annapurve <vannapurve@...gle.com>
> Cc: Rick Edgecombe <rick.p.edgecombe@...el.com>
> Link: https://lore.kernel.org/all/20250709232103.zwmufocd3l7sqk7y@amd.com
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/mmu.h | 1 +
> arch/x86/kvm/mmu/mmu.c | 60 +++++++++++++++++++++++++++++++++++++++++-
> arch/x86/kvm/vmx/tdx.c | 10 +++----
> 3 files changed, 63 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index f63074048ec6..2f108e381959 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -259,6 +259,7 @@ extern bool tdp_mmu_enabled;
>
> bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa);
> int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level);
> +int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn);
>
> static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
> {
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 18d69d48bc55..ba5cca825a7f 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5014,6 +5014,65 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
> return min(range->size, end - range->gpa);
> }
>
> +int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
> +{
> + struct kvm_page_fault fault = {
> + .addr = gfn_to_gpa(gfn),
> + .error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS,
> + .prefetch = true,
> + .is_tdp = true,
> + .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
> +
> + .max_level = PG_LEVEL_4K,
> + .req_level = PG_LEVEL_4K,
> + .goal_level = PG_LEVEL_4K,
> + .is_private = true,
> +
> + .gfn = gfn,
> + .slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn),
> + .pfn = pfn,
> + .map_writable = true,
> + };
> + struct kvm *kvm = vcpu->kvm;
> + int r;
> +
> + lockdep_assert_held(&kvm->slots_lock);
Do we need to assert that filemap_invalidate_lock() is held as well?
Otherwise, a concurrent kvm_gmem_punch_hole(), which does not hold slots_lock,
could make the pfn stale.
Or check for stale mapping?
> +
> + if (KVM_BUG_ON(!tdp_mmu_enabled, kvm))
> + return -EIO;
> +
> + if (kvm_gfn_is_write_tracked(kvm, fault.slot, fault.gfn))
> + return -EPERM;
> +
> + r = kvm_mmu_reload(vcpu);
> + if (r)
> + return r;
> +
> + r = mmu_topup_memory_caches(vcpu, false);
> + if (r)
> + return r;
> +
> + do {
> + if (signal_pending(current))
> + return -EINTR;
> +
> + if (kvm_test_request(KVM_REQ_VM_DEAD, vcpu))
> + return -EIO;
> +
> + cond_resched();
> +
> + guard(read_lock)(&kvm->mmu_lock);
> +
> + r = kvm_tdp_mmu_map(vcpu, &fault);
> + } while (r == RET_PF_RETRY);
> +
> + if (r != RET_PF_FIXED)
> + return -EIO;
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_map_private_pfn);
> +
> static void nonpaging_init_context(struct kvm_mmu *context)
> {
> context->page_fault = nonpaging_page_fault;
> @@ -5997,7 +6056,6 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
> out:
> return r;
> }
> -EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_load);
>
> void kvm_mmu_unload(struct kvm_vcpu *vcpu)
> {
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index 4c3014befe9f..29f344af4cc2 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -3157,15 +3157,12 @@ struct tdx_gmem_post_populate_arg {
> static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
> void __user *src, int order, void *_arg)
> {
> - u64 error_code = PFERR_GUEST_FINAL_MASK | PFERR_PRIVATE_ACCESS;
> - struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
> struct tdx_gmem_post_populate_arg *arg = _arg;
> - struct kvm_vcpu *vcpu = arg->vcpu;
> + struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
> + u64 err, entry, level_state;
> gpa_t gpa = gfn_to_gpa(gfn);
> - u8 level = PG_LEVEL_4K;
> struct page *src_page;
> int ret, i;
> - u64 err, entry, level_state;
>
> /*
> * Get the source page if it has been faulted in. Return failure if the
> @@ -3177,7 +3174,7 @@ static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
> if (ret != 1)
> return -ENOMEM;
>
> - ret = kvm_tdp_map_page(vcpu, gpa, error_code, &level);
> + ret = kvm_tdp_mmu_map_private_pfn(arg->vcpu, gfn, pfn);
> if (ret < 0)
> goto out;
>
> @@ -3240,7 +3237,6 @@ static int tdx_vcpu_init_mem_region(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *c
> !vt_is_tdx_private_gpa(kvm, region.gpa + (region.nr_pages << PAGE_SHIFT) - 1))
> return -EINVAL;
>
> - kvm_mmu_reload(vcpu);
> ret = 0;
> while (region.nr_pages) {
> if (signal_pending(current)) {
> --
> 2.51.0.858.gf9c4a03a3a-goog
>
Powered by blists - more mailing lists