[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3b2d90dc8ae619c5d9372d6c5e22c47aeea1ef0b.camel@intel.com>
Date: Thu, 31 Jul 2025 01:06:33 +0000
From: "Edgecombe, Rick P" <rick.p.edgecombe@...el.com>
To: "kirill.shutemov@...ux.intel.com" <kirill.shutemov@...ux.intel.com>,
"pbonzini@...hat.com" <pbonzini@...hat.com>, "Hansen, Dave"
<dave.hansen@...el.com>, "seanjc@...gle.com" <seanjc@...gle.com>,
"dave.hansen@...ux.intel.com" <dave.hansen@...ux.intel.com>
CC: "Gao, Chao" <chao.gao@...el.com>, "bp@...en8.de" <bp@...en8.de>, "Huang,
Kai" <kai.huang@...el.com>, "x86@...nel.org" <x86@...nel.org>,
"mingo@...hat.com" <mingo@...hat.com>, "Zhao, Yan Y" <yan.y.zhao@...el.com>,
"tglx@...utronix.de" <tglx@...utronix.de>, "kvm@...r.kernel.org"
<kvm@...r.kernel.org>, "linux-coco@...ts.linux.dev"
<linux-coco@...ts.linux.dev>, "Yamahata, Isaku" <isaku.yamahata@...el.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCHv2 02/12] x86/virt/tdx: Allocate page bitmap for Dynamic
PAMT
On Wed, 2025-06-25 at 11:06 -0700, Dave Hansen wrote:
> This is the wrong place to do this.
>
> Hide it in tdmr_get_pamt_sz(). Don't inject it in the main code flow
> here and complicate the for loop.
I'm finding this tdmr_get_pamt_sz() maybe too strange to build on top of.
It iterates through these special TDX page sizes once, and calls into
tdmr_get_pamt_sz() for each, which in turn has a case statement for each
index. So the loop doesn't add much - each index still has its own line
of code inside tdmr_get_pamt_sz(). And then despite prepping the base/size
in an array via the loop, it has to be packed manually at the end for each
index. So I'm not sure if the general wisdom of doing things in a single way
is really adding much here.
I'm wondering if something like the below might be a better base to build
on. For dpamt the "tdmr->pamt_4k_size =" line could just branch on
tdx_supports_dynamic_pamt(). Any thoughts on it as an alternative to the
suggestion to add the dpamt logic to tdmr_get_pamt_sz()?
arch/x86/virt/vmx/tdx/tdx.c | 69 ++++++++++++++++++---------------------------------------------------
1 file changed, 18 insertions(+), 51 deletions(-)
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index c7a9a087ccaf..8de6fa3e5773 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -445,30 +445,16 @@ static int fill_out_tdmrs(struct list_head *tmb_list,
* PAMT size is always aligned up to 4K page boundary.
*/
static unsigned long tdmr_get_pamt_sz(struct tdmr_info *tdmr, int pgsz,
- u16 pamt_entry_size)
+ u16 pamt_entry_size[])
{
unsigned long pamt_sz, nr_pamt_entries;
+ const int tdx_pg_size_shift[] = { PAGE_SHIFT, PMD_SHIFT, PUD_SHIFT };
- switch (pgsz) {
- case TDX_PS_4K:
- nr_pamt_entries = tdmr->size >> PAGE_SHIFT;
- break;
- case TDX_PS_2M:
- nr_pamt_entries = tdmr->size >> PMD_SHIFT;
- break;
- case TDX_PS_1G:
- nr_pamt_entries = tdmr->size >> PUD_SHIFT;
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
+ nr_pamt_entries = tdmr->size >> tdx_pg_size_shift[pgsz];
+ pamt_sz = nr_pamt_entries * pamt_entry_size[pgsz];
- pamt_sz = nr_pamt_entries * pamt_entry_size;
/* TDX requires PAMT size must be 4K aligned */
- pamt_sz = ALIGN(pamt_sz, PAGE_SIZE);
-
- return pamt_sz;
+ return PAGE_ALIGN(pamt_sz);
}
/*
@@ -509,25 +495,19 @@ static int tdmr_set_up_pamt(struct tdmr_info *tdmr,
struct list_head *tmb_list,
u16 pamt_entry_size[])
{
- unsigned long pamt_base[TDX_PS_NR];
- unsigned long pamt_size[TDX_PS_NR];
- unsigned long tdmr_pamt_base;
unsigned long tdmr_pamt_size;
struct page *pamt;
- int pgsz, nid;
-
+ int nid;
nid = tdmr_get_nid(tdmr, tmb_list);
/*
* Calculate the PAMT size for each TDX supported page size
* and the total PAMT size.
*/
- tdmr_pamt_size = 0;
- for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) {
- pamt_size[pgsz] = tdmr_get_pamt_sz(tdmr, pgsz,
- pamt_entry_size[pgsz]);
- tdmr_pamt_size += pamt_size[pgsz];
- }
+ tdmr->pamt_4k_size = tdmr_get_pamt_sz(tdmr, TDX_PS_4K, pamt_entry_size);
+ tdmr->pamt_2m_size = tdmr_get_pamt_sz(tdmr, TDX_PS_2M, pamt_entry_size);
+ tdmr->pamt_1g_size = tdmr_get_pamt_sz(tdmr, TDX_PS_1G, pamt_entry_size);
+ tdmr_pamt_size = tdmr->pamt_4k_size + tdmr->pamt_2m_size + tdmr->pamt_1g_size;
/*
* Allocate one chunk of physically contiguous memory for all
@@ -535,26 +515,16 @@ static int tdmr_set_up_pamt(struct tdmr_info *tdmr,
* in overlapped TDMRs.
*/
pamt = alloc_contig_pages(tdmr_pamt_size >> PAGE_SHIFT, GFP_KERNEL,
- nid, &node_online_map);
- if (!pamt)
+ nid, &node_online_map);
+ if (!pamt) {
+ /* Zero base so that the error path will skip freeing. */
+ tdmr->pamt_4k_base = 0;
return -ENOMEM;
-
- /*
- * Break the contiguous allocation back up into the
- * individual PAMTs for each page size.
- */
- tdmr_pamt_base = page_to_pfn(pamt) << PAGE_SHIFT;
- for (pgsz = TDX_PS_4K; pgsz < TDX_PS_NR; pgsz++) {
- pamt_base[pgsz] = tdmr_pamt_base;
- tdmr_pamt_base += pamt_size[pgsz];
}
- tdmr->pamt_4k_base = pamt_base[TDX_PS_4K];
- tdmr->pamt_4k_size = pamt_size[TDX_PS_4K];
- tdmr->pamt_2m_base = pamt_base[TDX_PS_2M];
- tdmr->pamt_2m_size = pamt_size[TDX_PS_2M];
- tdmr->pamt_1g_base = pamt_base[TDX_PS_1G];
- tdmr->pamt_1g_size = pamt_size[TDX_PS_1G];
+ tdmr->pamt_4k_base = page_to_phys(pamt);
+ tdmr->pamt_2m_base = tdmr->pamt_4k_base + tdmr->pamt_4k_size;
+ tdmr->pamt_1g_base = tdmr->pamt_2m_base + tdmr->pamt_2m_size;
return 0;
}
@@ -585,10 +555,7 @@ static void tdmr_do_pamt_func(struct tdmr_info *tdmr,
tdmr_get_pamt(tdmr, &pamt_base, &pamt_size);
/* Do nothing if PAMT hasn't been allocated for this TDMR */
- if (!pamt_size)
- return;
-
- if (WARN_ON_ONCE(!pamt_base))
+ if (!pamt_base)
return;
pamt_func(pamt_base, pamt_size);
Powered by blists - more mailing lists