[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c62e8f7e-46ed-47e3-b7ff-231bd1f343e5@linux.intel.com>
Date: Mon, 20 Nov 2023 19:05:39 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
hang.yuan@...el.com, tina.zhang@...el.com,
Xiaoyao Li <xiaoyao.li@...el.com>
Subject: Re: [PATCH v6 08/16] KVM: TDX: Pin pages via get_page() right before
ADD/AUG'ed to TDs
On 11/7/2023 11:00 PM, isaku.yamahata@...el.com wrote:
> From: Xiaoyao Li <xiaoyao.li@...el.com>
>
> When kvm_faultin_pfn(), it doesn't have the info regarding which page level
> will the gfn be mapped at. Hence it doesn't know to pin a 4K page or a
> 2M page.
>
> Move the guest private pages pinning logic right before
> TDH_MEM_PAGE_ADD/AUG() since at that time it knows the page level info.
This patch looks strange, the code has nothing to do with the shortlog.
It seems that the change of this patch has already been covered by 06/16.
Something went wrong when formatting the patch?
>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
> ---
> arch/x86/kvm/vmx/tdx.c | 15 ++++++++-------
> 1 file changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index e4167f08b58b..7b81811eb404 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1454,7 +1454,8 @@ static void tdx_measure_page(struct kvm_tdx *kvm_tdx, hpa_t gpa, int size)
> }
> }
>
> -static void tdx_unpin(struct kvm *kvm, kvm_pfn_t pfn, int level)
> +static void tdx_unpin(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
> + enum pg_level level)
> {
> int i;
>
> @@ -1476,7 +1477,7 @@ static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
>
> err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, tdx_level, hpa, &out);
> if (unlikely(err == TDX_ERROR_SEPT_BUSY)) {
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EAGAIN;
> }
> if (unlikely(err == (TDX_EPT_ENTRY_STATE_INCORRECT | TDX_OPERAND_ID_RCX))) {
> @@ -1493,7 +1494,7 @@ static int tdx_sept_page_aug(struct kvm *kvm, gfn_t gfn,
> }
> if (KVM_BUG_ON(err, kvm)) {
> pr_tdx_error(TDH_MEM_PAGE_AUG, err, &out);
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EIO;
> }
>
> @@ -1529,7 +1530,7 @@ static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
> * always uses vcpu 0's page table and protected by vcpu->mutex).
> */
> if (KVM_BUG_ON(kvm_tdx->source_pa == INVALID_PAGE, kvm)) {
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EINVAL;
> }
>
> @@ -1547,7 +1548,7 @@ static int tdx_sept_page_add(struct kvm *kvm, gfn_t gfn,
> } while (unlikely(err == TDX_ERROR_SEPT_BUSY));
> if (KVM_BUG_ON(err, kvm)) {
> pr_tdx_error(TDH_MEM_PAGE_ADD, err, &out);
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EIO;
> } else if (measure)
> tdx_measure_page(kvm_tdx, gpa, KVM_HPAGE_SIZE(level));
> @@ -1600,7 +1601,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
> err = tdx_reclaim_page(hpa, level);
> if (KVM_BUG_ON(err, kvm))
> return -EIO;
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return 0;
> }
>
> @@ -1633,7 +1634,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
> r = -EIO;
> } else {
> tdx_clear_page(hpa, PAGE_SIZE);
> - tdx_unpin(kvm, pfn + i, PG_LEVEL_4K);
> + tdx_unpin(kvm, gfn + i, pfn + i, PG_LEVEL_4K);
> }
> hpa += PAGE_SIZE;
> }
Powered by blists - more mailing lists