[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e097477f93c17a42f977a75ac0f060ad68f54ec7.1646007267.git.kai.huang@intel.com>
Date: Mon, 28 Feb 2022 15:13:02 +1300
From: Kai Huang <kai.huang@...el.com>
To: x86@...nel.org
Cc: tglx@...utronix.de, mingo@...hat.com, bp@...en8.de,
dave.hansen@...el.com, luto@...nel.org, kvm@...r.kernel.org,
pbonzini@...hat.com, seanjc@...gle.com, hpa@...or.com,
peterz@...radead.org, kirill.shutemov@...ux.intel.com,
sathyanarayanan.kuppuswamy@...ux.intel.com, tony.luck@...el.com,
ak@...ux.intel.com, dan.j.williams@...el.com,
chang.seok.bae@...el.com, keescook@...omium.org,
hengqi.arch@...edance.com, laijs@...ux.alibaba.com,
metze@...ba.org, linux-kernel@...r.kernel.org, kai.huang@...el.com
Subject: [RFC PATCH 14/21] x86/virt/tdx: Set up reserved areas for all TDMRs
As the last step of constructing TDMRs, create reserved area information
for the memory region holes in each TDMR. If any PAMT (or part of it)
resides within a particular TDMR, also it as reserved.
All reserved areas in each TDMR must be in address ascending order,
required by TDX architecture.
Signed-off-by: Kai Huang <kai.huang@...el.com>
---
arch/x86/virt/vmx/tdx.c | 148 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 146 insertions(+), 2 deletions(-)
diff --git a/arch/x86/virt/vmx/tdx.c b/arch/x86/virt/vmx/tdx.c
index d29e7943f890..8dac98b91c77 100644
--- a/arch/x86/virt/vmx/tdx.c
+++ b/arch/x86/virt/vmx/tdx.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/atomic.h>
#include <linux/slab.h>
+#include <linux/sort.h>
#include <asm/msr-index.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
@@ -1031,6 +1032,145 @@ static int tdmrs_setup_pamt_all(struct tdmr_info **tdmr_array, int tdmr_num)
return -ENOMEM;
}
+static int tdmr_add_rsvd_area(struct tdmr_info *tdmr, int *p_idx,
+ u64 addr, u64 size)
+{
+ struct tdmr_reserved_area *rsvd_areas = tdmr->reserved_areas;
+ int idx = *p_idx;
+
+ /* Reserved area must be 4K aligned in offset and size */
+ if (WARN_ON(addr & ~PAGE_MASK || size & ~PAGE_MASK))
+ return -EINVAL;
+
+ /* Cannot exceed maximum reserved areas supported by TDX */
+ if (idx >= tdx_sysinfo.max_reserved_per_tdmr)
+ return -E2BIG;
+
+ rsvd_areas[idx].offset = addr - tdmr->base;
+ rsvd_areas[idx].size = size;
+
+ *p_idx = idx + 1;
+
+ return 0;
+}
+
+/* Compare function called by sort() for TDMR reserved areas */
+static int rsvd_area_cmp_func(const void *a, const void *b)
+{
+ struct tdmr_reserved_area *r1 = (struct tdmr_reserved_area *)a;
+ struct tdmr_reserved_area *r2 = (struct tdmr_reserved_area *)b;
+
+ if (r1->offset + r1->size <= r2->offset)
+ return -1;
+ if (r1->offset >= r2->offset + r2->size)
+ return 1;
+
+ /* Reserved areas cannot overlap. Caller should guarantee. */
+ WARN_ON(1);
+ return -1;
+}
+
+/* Set up reserved areas for a TDMR, including memory holes and PAMTs */
+static int tdmr_setup_rsvd_areas(struct tdmr_info *tdmr,
+ struct tdmr_info **tdmr_array,
+ int tdmr_num)
+{
+ u64 start, end, prev_end;
+ int rsvd_idx, i, ret = 0;
+
+ /* Mark holes between e820 RAM entries as reserved */
+ rsvd_idx = 0;
+ prev_end = TDMR_START(tdmr);
+ e820_for_each_mem(e820_table, i, start, end) {
+ /* Break if this entry is after the TDMR */
+ if (start >= TDMR_END(tdmr))
+ break;
+
+ /* Exclude entries before this TDMR */
+ if (end < TDMR_START(tdmr))
+ continue;
+
+ /*
+ * Skip if no hole exists before this entry. "<=" is
+ * used because one e820 entry might span two TDMRs.
+ * In that case the start address of this entry is
+ * smaller then the start address of the second TDMR.
+ */
+ if (start <= prev_end) {
+ prev_end = end;
+ continue;
+ }
+
+ /* Add the hole before this e820 entry */
+ ret = tdmr_add_rsvd_area(tdmr, &rsvd_idx, prev_end,
+ start - prev_end);
+ if (ret)
+ return ret;
+
+ prev_end = end;
+ }
+
+ /* Add the hole after the last RAM entry if it exists. */
+ if (prev_end < TDMR_END(tdmr)) {
+ ret = tdmr_add_rsvd_area(tdmr, &rsvd_idx, prev_end,
+ TDMR_END(tdmr) - prev_end);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Walk over all TDMRs to find out whether any PAMT falls into
+ * the given TDMR. If yes, mark it as reserved too.
+ */
+ for (i = 0; i < tdmr_num; i++) {
+ struct tdmr_info *tmp = tdmr_array[i];
+ u64 pamt_start, pamt_end;
+
+ pamt_start = tmp->pamt_4k_base;
+ pamt_end = pamt_start + tmp->pamt_4k_size +
+ tmp->pamt_2m_size + tmp->pamt_1g_size;
+
+ /* Skip PAMTs outside of the given TDMR */
+ if ((pamt_end <= TDMR_START(tdmr)) ||
+ (pamt_start >= TDMR_END(tdmr)))
+ continue;
+
+ /* Only mark the part within the TDMR as reserved */
+ if (pamt_start < TDMR_START(tdmr))
+ pamt_start = TDMR_START(tdmr);
+ if (pamt_end > TDMR_END(tdmr))
+ pamt_end = TDMR_END(tdmr);
+
+ ret = tdmr_add_rsvd_area(tdmr, &rsvd_idx, pamt_start,
+ pamt_end - pamt_start);
+ if (ret)
+ return ret;
+ }
+
+ /* TDX requires reserved areas listed in address ascending order */
+ sort(tdmr->reserved_areas, rsvd_idx, sizeof(struct tdmr_reserved_area),
+ rsvd_area_cmp_func, NULL);
+
+ return 0;
+}
+
+static int tdmrs_setup_rsvd_areas_all(struct tdmr_info **tdmr_array,
+ int tdmr_num)
+{
+ int i;
+
+ for (i = 0; i < tdmr_num; i++) {
+ int ret;
+
+ ret = tdmr_setup_rsvd_areas(tdmr_array[i], tdmr_array,
+ tdmr_num);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int construct_tdmrs(struct tdmr_info **tdmr_array, int *tdmr_num)
{
int ret;
@@ -1047,8 +1187,12 @@ static int construct_tdmrs(struct tdmr_info **tdmr_array, int *tdmr_num)
if (ret)
goto err_free_tdmrs;
- /* Return -EFAULT until constructing TDMRs is done */
- ret = -EFAULT;
+ ret = tdmrs_setup_rsvd_areas_all(tdmr_array, *tdmr_num);
+ if (ret)
+ goto err_free_pamts;
+
+ return 0;
+err_free_pamts:
tdmrs_free_pamt_all(tdmr_array, *tdmr_num);
err_free_tdmrs:
free_tdmrs(tdmr_array, *tdmr_num);
--
2.33.1
Powered by blists - more mailing lists