[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <9552800c-6f32-6677-7dc2-2774e6ea6348@linux.intel.com>
Date: Thu, 7 Sep 2023 13:26:19 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: isaku.yamahata@...el.com, Xiaoyao Li <xiaoyao.li@...el.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
isaku.yamahata@...il.com, Paolo Bonzini <pbonzini@...hat.com>,
erdemaktas@...gle.com, Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
hang.yuan@...el.com, tina.zhang@...el.com
Subject: Re: [RFC PATCH v4 08/16] KVM: TDX: Pin pages via get_page() right
before ADD/AUG'ed to TDs
On 7/26/2023 6:23 AM, isaku.yamahata@...el.com wrote:
> From: Xiaoyao Li <xiaoyao.li@...el.com>
>
> When kvm_faultin_pfn(), it doesn't have the info regarding which page level
> will the gfn be mapped at. Hence it doesn't know to pin a 4K page or a
> 2M page.
>
> Move the guest private pages pinning logic right before
> TDH_MEM_PAGE_ADD/AUG() since at that time it knows the page level info.
The code change of the patch doesn't match the changelog.
>
> Signed-off-by: Xiaoyao Li <xiaoyao.li@...el.com>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
> ---
> arch/x86/kvm/vmx/tdx.c | 15 ++++++++-------
> 1 file changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
> index c122160142fd..bd1582e6b693 100644
> --- a/arch/x86/kvm/vmx/tdx.c
> +++ b/arch/x86/kvm/vmx/tdx.c
> @@ -1361,7 +1361,8 @@ static void tdx_measure_page(struct kvm_tdx *kvm_tdx, hpa_t gpa, int size)
> }
> }
>
> -static void tdx_unpin(struct kvm *kvm, kvm_pfn_t pfn, int level)
> +static void tdx_unpin(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
> + enum pg_level level)
> {
> int i;
>
> @@ -1397,12 +1398,12 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
> if (likely(is_td_finalized(kvm_tdx))) {
> err = tdh_mem_page_aug(kvm_tdx->tdr_pa, gpa, tdx_level, hpa, &out);
> if (unlikely(err == TDX_ERROR_SEPT_BUSY)) {
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EAGAIN;
> }
> if (KVM_BUG_ON(err, kvm)) {
> pr_tdx_error(TDH_MEM_PAGE_AUG, err, &out);
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EIO;
> }
> return 0;
> @@ -1425,7 +1426,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
> * always uses vcpu 0's page table and protected by vcpu->mutex).
> */
> if (KVM_BUG_ON(kvm_tdx->source_pa == INVALID_PAGE, kvm)) {
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EINVAL;
> }
>
> @@ -1443,7 +1444,7 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
> } while (unlikely(err == TDX_ERROR_SEPT_BUSY));
> if (KVM_BUG_ON(err, kvm)) {
> pr_tdx_error(TDH_MEM_PAGE_ADD, err, &out);
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return -EIO;
> } else if (measure)
> tdx_measure_page(kvm_tdx, gpa, KVM_HPAGE_SIZE(level));
> @@ -1472,7 +1473,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
> err = tdx_reclaim_page(hpa, level, false, 0);
> if (KVM_BUG_ON(err, kvm))
> return -EIO;
> - tdx_unpin(kvm, pfn, level);
> + tdx_unpin(kvm, gfn, pfn, level);
> return 0;
> }
>
> @@ -1505,7 +1506,7 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
> r = -EIO;
> } else {
> tdx_clear_page(hpa, PAGE_SIZE);
> - tdx_unpin(kvm, pfn + i, PG_LEVEL_4K);
> + tdx_unpin(kvm, gfn + i, pfn + i, PG_LEVEL_4K);
> }
> hpa += PAGE_SIZE;
> }
Powered by blists - more mailing lists