[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <50ad132f-463f-3b61-6aab-b96a57b6f3a1@intel.com>
Date: Fri, 6 Jan 2023 13:53:40 -0800
From: Dave Hansen <dave.hansen@...el.com>
To: Kai Huang <kai.huang@...el.com>, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: linux-mm@...ck.org, peterz@...radead.org, tglx@...utronix.de,
seanjc@...gle.com, pbonzini@...hat.com, dan.j.williams@...el.com,
rafael.j.wysocki@...el.com, kirill.shutemov@...ux.intel.com,
ying.huang@...el.com, reinette.chatre@...el.com,
len.brown@...el.com, tony.luck@...el.com, ak@...ux.intel.com,
isaku.yamahata@...el.com, chao.gao@...el.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, bagasdotme@...il.com,
sagis@...gle.com, imammedo@...hat.com
Subject: Re: [PATCH v8 10/16] x86/virt/tdx: Allocate and set up PAMTs for
TDMRs
Looks good so far.
> +/*
> + * Allocate PAMTs from the local NUMA node of some memory in @tmb_list
> + * within @tdmr, and set up PAMTs for @tdmr.
> + */
> +static int tdmr_set_up_pamt(struct tdmr_info *tdmr,
> + struct list_head *tmb_list,
> + u16 pamt_entry_size)
> +{
> + unsigned long pamt_base[TDX_PS_1G + 1];
> + unsigned long pamt_size[TDX_PS_1G + 1];
Nit: please define a TDX_PS_NR rather than open-coding this.
> + unsigned long tdmr_pamt_base;
> + unsigned long tdmr_pamt_size;
> + struct page *pamt;
> + int pgsz, nid;
> +
> + nid = tdmr_get_nid(tdmr, tmb_list);
> +
> + /*
> + * Calculate the PAMT size for each TDX supported page size
> + * and the total PAMT size.
> + */
> + tdmr_pamt_size = 0;
> + for (pgsz = TDX_PS_4K; pgsz <= TDX_PS_1G ; pgsz++) {
> + pamt_size[pgsz] = tdmr_get_pamt_sz(tdmr, pgsz,
> + pamt_entry_size);
This alignment is wonky. Should be way over here:
> + pamt_entry_size);
> + tdmr_pamt_size += pamt_size[pgsz];
> + }
> +
> + /*
> + * Allocate one chunk of physically contiguous memory for all
> + * PAMTs. This helps minimize the PAMT's use of reserved areas
> + * in overlapped TDMRs.
> + */
> + pamt = alloc_contig_pages(tdmr_pamt_size >> PAGE_SHIFT, GFP_KERNEL,
> + nid, &node_online_map);
> + if (!pamt)
> + return -ENOMEM;
> +
> + /*
> + * Break the contiguous allocation back up into the
> + * individual PAMTs for each page size.
> + */
> + tdmr_pamt_base = page_to_pfn(pamt) << PAGE_SHIFT;
> + for (pgsz = TDX_PS_4K; pgsz <= TDX_PS_1G; pgsz++) {
> + pamt_base[pgsz] = tdmr_pamt_base;
> + tdmr_pamt_base += pamt_size[pgsz];
> + }
> +
> + tdmr->pamt_4k_base = pamt_base[TDX_PS_4K];
> + tdmr->pamt_4k_size = pamt_size[TDX_PS_4K];
> + tdmr->pamt_2m_base = pamt_base[TDX_PS_2M];
> + tdmr->pamt_2m_size = pamt_size[TDX_PS_2M];
> + tdmr->pamt_1g_base = pamt_base[TDX_PS_1G];
> + tdmr->pamt_1g_size = pamt_size[TDX_PS_1G];
> +
> + return 0;
> +}
> +
> +static void tdmr_get_pamt(struct tdmr_info *tdmr, unsigned long *pamt_pfn,
> + unsigned long *pamt_npages)
> +{
> + unsigned long pamt_base, pamt_sz;
> +
> + /*
> + * The PAMT was allocated in one contiguous unit. The 4K PAMT
> + * should always point to the beginning of that allocation.
> + */
> + pamt_base = tdmr->pamt_4k_base;
> + pamt_sz = tdmr->pamt_4k_size + tdmr->pamt_2m_size + tdmr->pamt_1g_size;
> +
> + *pamt_pfn = PHYS_PFN(pamt_base);
> + *pamt_npages = pamt_sz >> PAGE_SHIFT;
> +}
> +
> +static void tdmr_free_pamt(struct tdmr_info *tdmr)
> +{
> + unsigned long pamt_pfn, pamt_npages;
> +
> + tdmr_get_pamt(tdmr, &pamt_pfn, &pamt_npages);
> +
> + /* Do nothing if PAMT hasn't been allocated for this TDMR */
> + if (!pamt_npages)
> + return;
> +
> + if (WARN_ON_ONCE(!pamt_pfn))
> + return;
> +
> + free_contig_range(pamt_pfn, pamt_npages);
> +}
> +
> +static void tdmrs_free_pamt_all(struct tdmr_info_list *tdmr_list)
> +{
> + int i;
> +
> + for (i = 0; i < tdmr_list->nr_tdmrs; i++)
> + tdmr_free_pamt(tdmr_entry(tdmr_list, i));
> +}
> +
> +/* Allocate and set up PAMTs for all TDMRs */
> +static int tdmrs_set_up_pamt_all(struct tdmr_info_list *tdmr_list,
> + struct list_head *tmb_list,
> + u16 pamt_entry_size)
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < tdmr_list->nr_tdmrs; i++) {
> + ret = tdmr_set_up_pamt(tdmr_entry(tdmr_list, i), tmb_list,
> + pamt_entry_size);
> + if (ret)
> + goto err;
> + }
> +
> + return 0;
> +err:
> + tdmrs_free_pamt_all(tdmr_list);
> + return ret;
> +}
> +
> +static unsigned long tdmrs_count_pamt_pages(struct tdmr_info_list *tdmr_list)
> +{
> + unsigned long pamt_npages = 0;
> + int i;
> +
> + for (i = 0; i < tdmr_list->nr_tdmrs; i++) {
> + unsigned long pfn, npages;
> +
> + tdmr_get_pamt(tdmr_entry(tdmr_list, i), &pfn, &npages);
> + pamt_npages += npages;
> + }
> +
> + return pamt_npages;
> +}
> +
> /*
> * Construct a list of TDMRs on the preallocated space in @tdmr_list
> * to cover all TDX memory regions in @tmb_list based on the TDX module
> @@ -506,15 +702,19 @@ static int construct_tdmrs(struct list_head *tmb_list,
> if (ret)
> goto err;
>
> + ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list,
> + sysinfo->pamt_entry_size);
> + if (ret)
> + goto err;
> /*
> * TODO:
> *
> - * - Allocate and set up PAMTs for each TDMR.
> * - Designate reserved areas for each TDMR.
> *
> * Return -EINVAL until constructing TDMRs is done
> */
> ret = -EINVAL;
> + tdmrs_free_pamt_all(tdmr_list);
> err:
> return ret;
> }
> @@ -574,6 +774,11 @@ static int init_tdx_module(void)
> * Return error before all steps are done.
> */
> ret = -EINVAL;
> + if (ret)
> + tdmrs_free_pamt_all(&tdmr_list);
> + else
> + pr_info("%lu pages allocated for PAMT.\n",
> + tdmrs_count_pamt_pages(&tdmr_list));
> out_free_tdmrs:
> /*
> * Free the space for the TDMRs no matter the initialization is
Other than the two nits:
Reviewed-by: Dave Hansen <dave.hansen@...ux.intel.com>
Powered by blists - more mailing lists