[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230811221851.10244-4-decui@microsoft.com>
Date: Fri, 11 Aug 2023 15:18:45 -0700
From: Dexuan Cui <decui@...rosoft.com>
To: ak@...ux.intel.com, arnd@...db.de, bp@...en8.de,
brijesh.singh@....com, dan.j.williams@...el.com,
dave.hansen@...el.com, dave.hansen@...ux.intel.com,
haiyangz@...rosoft.com, hpa@...or.com, jane.chu@...cle.com,
kirill.shutemov@...ux.intel.com, kys@...rosoft.com,
linux-hyperv@...r.kernel.org, luto@...nel.org, mingo@...hat.com,
peterz@...radead.org, rostedt@...dmis.org,
sathyanarayanan.kuppuswamy@...ux.intel.com, seanjc@...gle.com,
tglx@...utronix.de, tony.luck@...el.com, wei.liu@...nel.org,
Jason@...c4.com, nik.borisov@...e.com, mikelley@...rosoft.com
Cc: x86@...nel.org, linux-kernel@...r.kernel.org,
linux-arch@...r.kernel.org, Tianyu.Lan@...rosoft.com,
rick.p.edgecombe@...el.com, andavis@...hat.com, mheslin@...hat.com,
vkuznets@...hat.com, xiaoyao.li@...el.com,
Dexuan Cui <decui@...rosoft.com>
Subject: [PATCH 3/9] Drivers: hv: vmbus: Support fully enlightened TDX guests
Add Hyper-V specific code so that a fully enlightened TDX guest (i.e.
without the paravisor) can run on Hyper-V:
Don't use hv_vp_assist_page. Use GHCI instead.
Don't try to use the unsupported HV_REGISTER_CRASH_CTL.
Don't trust (use) Hyper-V's TLB-flushing hypercalls.
Don't use lazy EOI.
Share the SynIC Event/Message pages with the hypervisor.
Don't use the Hyper-V TSC page for now, because non-trivial work is
required to share the page with the hypervisor.
Reviewed-by: Michael Kelley <mikelley@...rosoft.com>
Signed-off-by: Dexuan Cui <decui@...rosoft.com>
---
arch/x86/hyperv/hv_apic.c | 15 ++++++++++++---
arch/x86/hyperv/hv_init.c | 19 +++++++++++++++----
arch/x86/kernel/cpu/mshyperv.c | 23 +++++++++++++++++++++++
drivers/hv/hv.c | 17 +++++++++++++++--
4 files changed, 65 insertions(+), 9 deletions(-)
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 1fbda2f94184e..cb7429046d18d 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -177,8 +177,11 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
return true;
- if (!hv_hypercall_pg)
- return false;
+ /* A fully enlightened TDX VM uses GHCI rather than hv_hypercall_pg. */
+ if (!hv_hypercall_pg) {
+ if (ms_hyperv.paravisor_present || !hv_isolation_type_tdx())
+ return false;
+ }
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
@@ -231,9 +234,15 @@ static bool __send_ipi_one(int cpu, int vector)
trace_hyperv_send_ipi_one(cpu, vector);
- if (!hv_hypercall_pg || (vp == VP_INVAL))
+ if (vp == VP_INVAL)
return false;
+ /* A fully enlightened TDX VM uses GHCI rather than hv_hypercall_pg. */
+ if (!hv_hypercall_pg) {
+ if (ms_hyperv.paravisor_present || !hv_isolation_type_tdx())
+ return false;
+ }
+
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index d8ea54663113c..4bcd0a6f94760 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -80,7 +80,7 @@ static int hyperv_init_ghcb(void)
static int hv_cpu_init(unsigned int cpu)
{
union hv_vp_assist_msr_contents msr = { 0 };
- struct hv_vp_assist_page **hvp = &hv_vp_assist_page[cpu];
+ struct hv_vp_assist_page **hvp;
int ret;
ret = hv_common_cpu_init(cpu);
@@ -90,6 +90,7 @@ static int hv_cpu_init(unsigned int cpu)
if (!hv_vp_assist_page)
return 0;
+ hvp = &hv_vp_assist_page[cpu];
if (hv_root_partition) {
/*
* For root partition we get the hypervisor provided VP assist
@@ -447,11 +448,21 @@ void __init hyperv_init(void)
if (hv_common_init())
return;
- hv_vp_assist_page = kcalloc(num_possible_cpus(),
- sizeof(*hv_vp_assist_page), GFP_KERNEL);
+ /*
+ * The VP assist page is useless to a TDX guest: the only use we
+ * would have for it is lazy EOI, which can not be used with TDX.
+ */
+ if (hv_isolation_type_tdx())
+ hv_vp_assist_page = NULL;
+ else
+ hv_vp_assist_page = kcalloc(num_possible_cpus(),
+ sizeof(*hv_vp_assist_page),
+ GFP_KERNEL);
if (!hv_vp_assist_page) {
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
- goto common_free;
+
+ if (!hv_isolation_type_tdx())
+ goto common_free;
}
if (hv_isolation_type_snp()) {
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a50fd3650ea9b..507df0f64ae18 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -420,6 +420,29 @@ static void __init ms_hyperv_init_platform(void)
static_branch_enable(&isolation_type_snp);
} else if (hv_get_isolation_type() == HV_ISOLATION_TYPE_TDX) {
static_branch_enable(&isolation_type_tdx);
+
+ /* A TDX VM must use x2APIC and doesn't use lazy EOI. */
+ ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED;
+
+ if (!ms_hyperv.paravisor_present) {
+ /*
+ * The ms_hyperv.shared_gpa_boundary_active in
+ * a fully enlightened TDX VM is 0, but the GPAs
+ * of the SynIC Event/Message pages and VMBus
+ * Moniter pages in such a VM still need to be
+ * added by this offset.
+ */
+ ms_hyperv.shared_gpa_boundary = cc_mkdec(0);
+
+ /* To be supported: more work is required. */
+ ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
+
+ /* HV_REGISTER_CRASH_CTL is unsupported. */
+ ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+
+ /* Don't trust Hyper-V's TLB-flushing hypercalls. */
+ ms_hyperv.hints &= ~HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ }
}
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index ec6e35a0d9bf6..28bbb354324d6 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -121,11 +121,15 @@ int hv_synic_alloc(void)
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
+
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ hv_cpu->synic_message_page = NULL;
goto err;
}
}
- if (hv_isolation_type_en_snp()) {
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_en_snp() || hv_isolation_type_tdx())) {
ret = set_memory_decrypted((unsigned long)
hv_cpu->synic_message_page, 1);
if (ret) {
@@ -174,7 +178,8 @@ void hv_synic_free(void)
= per_cpu_ptr(hv_context.cpu_context, cpu);
/* It's better to leak the page if the encryption fails. */
- if (hv_isolation_type_en_snp()) {
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_en_snp() || hv_isolation_type_tdx())) {
if (hv_cpu->synic_message_page) {
ret = set_memory_encrypted((unsigned long)
hv_cpu->synic_message_page, 1);
@@ -232,6 +237,10 @@ void hv_synic_enable_regs(unsigned int cpu)
} else {
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT;
+
+ if (hv_isolation_type_tdx())
+ simp.base_simp_gpa |= ms_hyperv.shared_gpa_boundary
+ >> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
@@ -251,6 +260,10 @@ void hv_synic_enable_regs(unsigned int cpu)
} else {
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT;
+
+ if (hv_isolation_type_tdx())
+ siefp.base_siefp_gpa |= ms_hyperv.shared_gpa_boundary
+ >> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
--
2.25.1
Powered by blists - more mailing lists