[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <72ebe1575ff79bce7be38b8093aeb41032562a22.1612563142.git.sathyanarayanan.kuppuswamy@linux.intel.com>
Date: Fri, 5 Feb 2021 15:38:43 -0800
From: Kuppuswamy Sathyanarayanan
<sathyanarayanan.kuppuswamy@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>,
Dave Hansen <dave.hansen@...el.com>
Cc: Andi Kleen <ak@...ux.intel.com>,
Kirill Shutemov <kirill.shutemov@...ux.intel.com>,
Kuppuswamy Sathyanarayanan <knsathya@...nel.org>,
Dan Williams <dan.j.williams@...el.com>,
Raj Ashok <ashok.raj@...el.com>,
Sean Christopherson <seanjc@...gle.com>,
linux-kernel@...r.kernel.org,
Kuppuswamy Sathyanarayanan
<sathyanarayanan.kuppuswamy@...ux.intel.com>
Subject: [RFC v1 26/26] x86/kvm: Use bounce buffers for TD guest
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
TDX doesn't allow to perform DMA access to guest private memory.
In order for DMA to work properly in TD guest, user SWIOTLB bounce
buffers.
Move AMD SEV initialization into common code and adopt for TDX.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Reviewed-by: Andi Kleen <ak@...ux.intel.com>
Signed-off-by: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@...ux.intel.com>
---
arch/x86/kernel/pci-swiotlb.c | 2 +-
arch/x86/kernel/tdx.c | 3 +++
arch/x86/mm/mem_encrypt.c | 44 -------------------------------
arch/x86/mm/mem_encrypt_common.c | 45 ++++++++++++++++++++++++++++++++
4 files changed, 49 insertions(+), 45 deletions(-)
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index c2cfa5e7c152..020e13749758 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -49,7 +49,7 @@ int __init pci_swiotlb_detect_4gb(void)
* buffers are allocated and used for devices that do not support
* the addressing range required for the encryption mask.
*/
- if (sme_active())
+ if (sme_active() || is_tdx_guest())
swiotlb = 1;
return swiotlb;
diff --git a/arch/x86/kernel/tdx.c b/arch/x86/kernel/tdx.c
index f51a19168adc..ccb9401bd706 100644
--- a/arch/x86/kernel/tdx.c
+++ b/arch/x86/kernel/tdx.c
@@ -9,6 +9,7 @@
#include <asm/vmx.h>
#include <asm/insn.h>
#include <linux/sched/signal.h> /* force_sig_fault() */
+#include <linux/swiotlb.h>
#ifdef CONFIG_KVM_GUEST
#include "tdx-kvm.c"
@@ -472,6 +473,8 @@ void __init tdx_early_init(void)
legacy_pic = &null_legacy_pic;
+ swiotlb_force = SWIOTLB_FORCE;
+
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "tdx:cpu_hotplug",
NULL, tdx_cpu_offline_prepare);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 11a6a7b3af7e..7fbbb2f3d426 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -408,47 +408,3 @@ void __init mem_encrypt_free_decrypted_mem(void)
free_init_pages("unused decrypted", vaddr, vaddr_end);
}
-
-static void print_mem_encrypt_feature_info(void)
-{
- pr_info("AMD Memory Encryption Features active:");
-
- /* Secure Memory Encryption */
- if (sme_active()) {
- /*
- * SME is mutually exclusive with any of the SEV
- * features below.
- */
- pr_cont(" SME\n");
- return;
- }
-
- /* Secure Encrypted Virtualization */
- if (sev_active())
- pr_cont(" SEV");
-
- /* Encrypted Register State */
- if (sev_es_active())
- pr_cont(" SEV-ES");
-
- pr_cont("\n");
-}
-
-/* Architecture __weak replacement functions */
-void __init mem_encrypt_init(void)
-{
- if (!sme_me_mask)
- return;
-
- /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
- swiotlb_update_mem_attributes();
-
- /*
- * With SEV, we need to unroll the rep string I/O instructions.
- */
- if (sev_active())
- static_branch_enable(&sev_enable_key);
-
- print_mem_encrypt_feature_info();
-}
-
diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c
index b6d93b0c5dcf..6f3d90d4d68e 100644
--- a/arch/x86/mm/mem_encrypt_common.c
+++ b/arch/x86/mm/mem_encrypt_common.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/mem_encrypt.h>
#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
@@ -36,3 +37,47 @@ bool force_dma_unencrypted(struct device *dev)
return false;
}
+
+static void print_mem_encrypt_feature_info(void)
+{
+ pr_info("AMD Memory Encryption Features active:");
+
+ /* Secure Memory Encryption */
+ if (sme_active()) {
+ /*
+ * SME is mutually exclusive with any of the SEV
+ * features below.
+ */
+ pr_cont(" SME\n");
+ return;
+ }
+
+ /* Secure Encrypted Virtualization */
+ if (sev_active())
+ pr_cont(" SEV");
+
+ /* Encrypted Register State */
+ if (sev_es_active())
+ pr_cont(" SEV-ES");
+
+ pr_cont("\n");
+}
+
+/* Architecture __weak replacement functions */
+void __init mem_encrypt_init(void)
+{
+ if (!sme_me_mask && !is_tdx_guest())
+ return;
+
+ /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
+ swiotlb_update_mem_attributes();
+
+ /*
+ * With SEV, we need to unroll the rep string I/O instructions.
+ */
+ if (sev_active())
+ static_branch_enable(&sev_enable_key);
+
+ if (!is_tdx_guest())
+ print_mem_encrypt_feature_info();
+}
--
2.25.1
Powered by blists - more mailing lists