[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aEeaJH1KqZ38tgKi@intel.com>
Date: Tue, 10 Jun 2025 10:36:20 +0800
From: Chao Gao <chao.gao@...el.com>
To: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
CC: <pbonzini@...hat.com>, <seanjc@...gle.com>, <dave.hansen@...ux.intel.com>,
<rick.p.edgecombe@...el.com>, <isaku.yamahata@...el.com>,
<kai.huang@...el.com>, <yan.y.zhao@...el.com>, <tglx@...utronix.de>,
<mingo@...hat.com>, <bp@...en8.de>, <kvm@...r.kernel.org>, <x86@...nel.org>,
<linux-coco@...ts.linux.dev>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCHv2 04/12] x86/virt/tdx: Add tdx_alloc/free_page() helpers
>+static int tdx_alloc_pamt_pages(struct list_head *pamt_pages)
>+{
>+ for (int i = 0; i < tdx_nr_pamt_pages(); i++) {
>+ struct page *page = alloc_page(GFP_KERNEL);
>+ if (!page)
>+ goto fail;
this goto isn't needed. it is used only once. so we can just free the pages and
return -ENOMEM here.
>+ list_add(&page->lru, pamt_pages);
>+ }
>+ return 0;
>+fail:
>+ tdx_free_pamt_pages(pamt_pages);
>+ return -ENOMEM;
>+}
>+
>+static int tdx_pamt_add(atomic_t *pamt_refcount, unsigned long hpa,
>+ struct list_head *pamt_pages)
>+{
>+ u64 err;
>+
>+ guard(spinlock)(&pamt_lock);
>+
>+ hpa = ALIGN_DOWN(hpa, PMD_SIZE);
>+
>+ /* Lost race to other tdx_pamt_add() */
>+ if (atomic_read(pamt_refcount) != 0) {
>+ atomic_inc(pamt_refcount);
>+ return 1;
>+ }
>+
>+ err = tdh_phymem_pamt_add(hpa | TDX_PS_2M, pamt_pages);
>+
>+ /*
>+ * tdx_hpa_range_not_free() is true if current task won race
>+ * against tdx_pamt_put().
>+ */
>+ if (err && !tdx_hpa_range_not_free(err)) {
>+ pr_err("TDH_PHYMEM_PAMT_ADD failed: %#llx\n", err);
>+ return -EIO;
>+ }
>+
>+ atomic_set(pamt_refcount, 1);
>+
>+ if (tdx_hpa_range_not_free(err))
>+ return 1;
I think this needs a comment for the return values 0/1/-EIO above the function.
>+
>+ return 0;
>+}
>+
>+static int tdx_pamt_get(struct page *page, enum pg_level level)
>+{
>+ unsigned long hpa = page_to_phys(page);
>+ atomic_t *pamt_refcount;
>+ LIST_HEAD(pamt_pages);
>+ int ret;
>+
>+ if (!tdx_supports_dynamic_pamt(&tdx_sysinfo))
>+ return 0;
>+
>+ if (level != PG_LEVEL_4K)
>+ return 0;
This also needs a comment. i.e., why return success directly for large pages?
<snip>
Powered by blists - more mailing lists