[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230616044701.15888-3-decui@microsoft.com>
Date: Thu, 15 Jun 2023 21:47:01 -0700
From: Dexuan Cui <decui@...rosoft.com>
To: ak@...ux.intel.com, arnd@...db.de, bp@...en8.de,
brijesh.singh@....com, dan.j.williams@...el.com,
dave.hansen@...el.com, dave.hansen@...ux.intel.com,
haiyangz@...rosoft.com, hpa@...or.com, jane.chu@...cle.com,
kirill.shutemov@...ux.intel.com, kys@...rosoft.com,
linux-arch@...r.kernel.org, linux-hyperv@...r.kernel.org,
luto@...nel.org, mingo@...hat.com, peterz@...radead.org,
rostedt@...dmis.org, sathyanarayanan.kuppuswamy@...ux.intel.com,
seanjc@...gle.com, tglx@...utronix.de, tony.luck@...el.com,
wei.liu@...nel.org, x86@...nel.org, mikelley@...rosoft.com
Cc: linux-kernel@...r.kernel.org, Tianyu.Lan@...rosoft.com,
rick.p.edgecombe@...el.com, Dexuan Cui <decui@...rosoft.com>
Subject: [PATCH v7 2/2] x86/tdx: Support vmalloc() for tdx_enc_status_changed()
When a TDX guest runs on Hyper-V, the hv_netvsc driver's netvsc_init_buf()
allocates buffers using vzalloc(), and needs to share the buffers with the
host OS by calling set_memory_decrypted(), which is not working for
vmalloc() yet. Add the support by handling the pages one by one.
Co-developed-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Reviewed-by: Michael Kelley <mikelley@...rosoft.com>
Signed-off-by: Dexuan Cui <decui@...rosoft.com>
---
arch/x86/coco/tdx/tdx.c | 76 ++++++++++++++++++++++++++++-------------
1 file changed, 52 insertions(+), 24 deletions(-)
Changes in v2:
Changed tdx_enc_status_changed() in place.
Changes in v3:
No change since v2.
Changes in v4:
Added Kirill's Co-developed-by since Kirill helped to improve the
code by adding tdx_enc_status_changed_phys().
Thanks Kirill for the clarification on load_unaligned_zeropad()!
Changes in v5:
Added Kirill's Signed-off-by.
Added Michael's Reviewed-by.
Changes in v6: None.
Changes in v7: None.
Note: there was a race between set_memory_encrypted() and
load_unaligned_zeropad(), which has been fixed by the 3 patches of
Kirill in the x86/tdx branch of the tip tree.
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 5b62a1f5bd79..8b2a2dcb2efd 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -7,6 +7,7 @@
#include <linux/cpufeature.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <asm/coco.h>
#include <asm/tdx.h>
#include <asm/vmx.h>
@@ -778,6 +779,34 @@ static bool try_accept_one(phys_addr_t *start, unsigned long len,
return true;
}
+static bool try_accept_page(phys_addr_t start, phys_addr_t end)
+{
+ /*
+ * For shared->private conversion, accept the page using
+ * TDX_ACCEPT_PAGE TDX module call.
+ */
+ while (start < end) {
+ unsigned long len = end - start;
+
+ /*
+ * Try larger accepts first. It gives chance to VMM to keep
+ * 1G/2M SEPT entries where possible and speeds up process by
+ * cutting number of hypercalls (if successful).
+ */
+
+ if (try_accept_one(&start, len, PG_LEVEL_1G))
+ continue;
+
+ if (try_accept_one(&start, len, PG_LEVEL_2M))
+ continue;
+
+ if (!try_accept_one(&start, len, PG_LEVEL_4K))
+ return false;
+ }
+
+ return true;
+}
+
/*
* Notify the VMM about page mapping conversion. More info about ABI
* can be found in TDX Guest-Host-Communication Interface (GHCI),
@@ -828,6 +857,19 @@ static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
return false;
}
+static bool tdx_enc_status_changed_phys(phys_addr_t start, phys_addr_t end,
+ bool enc)
+{
+ if (!tdx_map_gpa(start, end, enc))
+ return false;
+
+ /* private->shared conversion requires only MapGPA call */
+ if (!enc)
+ return true;
+
+ return try_accept_page(start, end);
+}
+
/*
* Inform the VMM of the guest's intent for this physical page: shared with
* the VMM or private to the guest. The VMM is expected to change its mapping
@@ -835,37 +877,23 @@ static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
*/
static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
{
- phys_addr_t start = __pa(vaddr);
- phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
+ unsigned long start = vaddr;
+ unsigned long end = start + numpages * PAGE_SIZE;
- if (!tdx_map_gpa(start, end, enc))
+ if (offset_in_page(start) != 0)
return false;
- /* private->shared conversion requires only MapGPA call */
- if (!enc)
- return true;
+ if (!is_vmalloc_addr((void *)start))
+ return tdx_enc_status_changed_phys(__pa(start), __pa(end), enc);
- /*
- * For shared->private conversion, accept the page using
- * TDX_ACCEPT_PAGE TDX module call.
- */
while (start < end) {
- unsigned long len = end - start;
+ phys_addr_t start_pa = slow_virt_to_phys((void *)start);
+ phys_addr_t end_pa = start_pa + PAGE_SIZE;
- /*
- * Try larger accepts first. It gives chance to VMM to keep
- * 1G/2M SEPT entries where possible and speeds up process by
- * cutting number of hypercalls (if successful).
- */
-
- if (try_accept_one(&start, len, PG_LEVEL_1G))
- continue;
-
- if (try_accept_one(&start, len, PG_LEVEL_2M))
- continue;
-
- if (!try_accept_one(&start, len, PG_LEVEL_4K))
+ if (!tdx_enc_status_changed_phys(start_pa, end_pa, enc))
return false;
+
+ start += PAGE_SIZE;
}
return true;
--
2.25.1
Powered by blists - more mailing lists