[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <147190836254.9523.17071309814378405604.stgit@brijesh-build-machine>
Date: Mon, 22 Aug 2016 19:26:02 -0400
From: Brijesh Singh <brijesh.singh@....com>
To: <simon.guinot@...uanux.org>, <linux-efi@...r.kernel.org>,
<brijesh.singh@....com>, <kvm@...r.kernel.org>,
<rkrcmar@...hat.com>, <matt@...eblueprint.co.uk>,
<linus.walleij@...aro.org>, <linux-mm@...ck.org>,
<paul.gortmaker@...driver.com>, <hpa@...or.com>,
<dan.j.williams@...el.com>, <aarcange@...hat.com>,
<sfr@...b.auug.org.au>, <andriy.shevchenko@...ux.intel.com>,
<herbert@...dor.apana.org.au>, <bhe@...hat.com>,
<xemul@...allels.com>, <joro@...tes.org>, <x86@...nel.org>,
<mingo@...hat.com>, <msalter@...hat.com>,
<ross.zwisler@...ux.intel.com>, <bp@...e.de>, <dyoung@...hat.com>,
<thomas.lendacky@....com>, <jroedel@...e.de>,
<keescook@...omium.org>, <toshi.kani@....com>,
<mathieu.desnoyers@...icios.com>, <devel@...uxdriverproject.org>,
<tglx@...utronix.de>, <mchehab@...nel.org>,
<iamjoonsoo.kim@....com>, <labbott@...oraproject.org>,
<tony.luck@...el.com>, <alexandre.bounine@....com>,
<kuleshovmail@...il.com>, <linux-kernel@...r.kernel.org>,
<mcgrof@...nel.org>, <linux-crypto@...r.kernel.org>,
<pbonzini@...hat.com>, <akpm@...ux-foundation.org>,
<davem@...emloft.net>
Subject: [RFC PATCH v1 12/28] x86: DMA support for SEV memory encryption
From: Tom Lendacky <thomas.lendacky@....com>
DMA access to memory mapped as encrypted while SEV is active can not be
encrypted during device write or decrypted during device read. In order
for DMA to properly work when SEV is active, the swiotlb bounce buffers
must be used.
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
---
arch/x86/mm/mem_encrypt.c | 48 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 1154353..ce6e3ea 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -173,8 +173,52 @@ void __init sme_early_init(void)
/* Update the protection map with memory encryption mask */
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = __pgprot(pgprot_val(protection_map[i]) | sme_me_mask);
+
+ if (sev_active)
+ swiotlb_force = 1;
}
+static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *vaddr;
+
+ vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, gfp, attrs);
+ if (!vaddr)
+ return NULL;
+
+ /* Clear the SME encryption bit for DMA use */
+ sme_set_mem_dec(vaddr, size);
+
+ /* Remove the encryption bit from the DMA address */
+ *dma_handle &= ~sme_me_mask;
+
+ return vaddr;
+}
+
+static void sme_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ /* Set the SME encryption bit for re-use as encrypted */
+ sme_set_mem_enc(vaddr, size);
+
+ x86_swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
+}
+
+static struct dma_map_ops sme_dma_ops = {
+ .alloc = sme_alloc,
+ .free = sme_free,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
@@ -184,6 +228,10 @@ void __init mem_encrypt_init(void)
/* Make SWIOTLB use an unencrypted DMA area */
swiotlb_clear_encryption();
+ /* Use SEV DMA operations if SEV is active */
+ if (sev_active)
+ dma_ops = &sme_dma_ops;
+
pr_info("memory encryption active\n");
}
Powered by blists - more mailing lists