[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4555c300-5934-4563-a639-3e43d2ce405f@linux.intel.com>
Date: Wed, 6 Mar 2024 15:13:22 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>, Kai Huang <kai.huang@...el.com>,
chen.bo@...el.com, hang.yuan@...el.com, tina.zhang@...el.com,
Sean Christopherson <sean.j.christopherson@...el.com>
Subject: Re: [PATCH v19 016/130] KVM: x86/mmu: Introduce
kvm_mmu_map_tdp_page() for use by TDX
On 2/26/2024 4:25 PM, isaku.yamahata@...el.com wrote:
> From: Sean Christopherson <sean.j.christopherson@...el.com>
>
> Introduce a helper to directly (pun intended) fault-in a TDP page
> without having to go through the full page fault path. This allows
> TDX to get the resulting pfn and also allows the RET_PF_* enums to
> stay in mmu.c where they belong.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> v19:
> - Move up for KVM_MEMORY_MAPPING.
> - Add goal_level for the caller to know how many pages are mapped.
>
> v14 -> v15:
> - Remove loop in kvm_mmu_map_tdp_page() and return error code based on
> RET_FP_xxx value to avoid potential infinite loop. The caller should
> loop on -EAGAIN instead now.
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> arch/x86/kvm/mmu.h | 3 +++
> arch/x86/kvm/mmu/mmu.c | 58 ++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 61 insertions(+)
>
> diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
> index 60f21bb4c27b..d96c93a25b3b 100644
> --- a/arch/x86/kvm/mmu.h
> +++ b/arch/x86/kvm/mmu.h
> @@ -183,6 +183,9 @@ static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
> __kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
> }
>
> +int kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
> + u8 max_level, u8 *goal_level);
> +
> /*
> * Check if a given access (described through the I/D, W/R and U/S bits of a
> * page fault error code pfec) causes a permission fault with the given PTE
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 61674d6b17aa..ca0c91f14063 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4615,6 +4615,64 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
> return direct_page_fault(vcpu, fault);
> }
>
> +int kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
> + u8 max_level, u8 *goal_level)
> +{
> + int r;
> + struct kvm_page_fault fault = (struct kvm_page_fault) {
> + .addr = gpa,
> + .error_code = error_code,
> + .exec = error_code & PFERR_FETCH_MASK,
> + .write = error_code & PFERR_WRITE_MASK,
> + .present = error_code & PFERR_PRESENT_MASK,
> + .rsvd = error_code & PFERR_RSVD_MASK,
> + .user = error_code & PFERR_USER_MASK,
> + .prefetch = false,
> + .is_tdp = true,
> + .is_private = error_code & PFERR_GUEST_ENC_MASK,
> + .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
> + };
> +
> + WARN_ON_ONCE(!vcpu->arch.mmu->root_role.direct);
> + fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
> +
> + r = mmu_topup_memory_caches(vcpu, false);
Does it need a cache topup here?
Both kvm_tdp_mmu_page_fault() and direct_page_fault() will call
mmu_topup_memory_caches() when needed.
> + if (r)
> + return r;
> +
> + fault.max_level = max_level;
> + fault.req_level = PG_LEVEL_4K;
> + fault.goal_level = PG_LEVEL_4K;
> +
> +#ifdef CONFIG_X86_64
> + if (tdp_mmu_enabled)
> + r = kvm_tdp_mmu_page_fault(vcpu, &fault);
> + else
> +#endif
> + r = direct_page_fault(vcpu, &fault);
> +
> + if (is_error_noslot_pfn(fault.pfn) || vcpu->kvm->vm_bugged)
> + return -EFAULT;
> +
> + switch (r) {
> + case RET_PF_RETRY:
> + return -EAGAIN;
> +
> + case RET_PF_FIXED:
> + case RET_PF_SPURIOUS:
> + if (goal_level)
> + *goal_level = fault.goal_level;
> + return 0;
> +
> + case RET_PF_CONTINUE:
> + case RET_PF_EMULATE:
> + case RET_PF_INVALID:
> + default:
> + return -EIO;
> + }
> +}
> +EXPORT_SYMBOL_GPL(kvm_mmu_map_tdp_page);
> +
> static void nonpaging_init_context(struct kvm_mmu *context)
> {
> context->page_fault = nonpaging_page_fault;
Powered by blists - more mailing lists