[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.21.2004081420060.19661@chino.kir.corp.google.com>
Date: Wed, 8 Apr 2020 14:21:08 -0700 (PDT)
From: David Rientjes <rientjes@...gle.com>
To: Christoph Hellwig <hch@....de>,
Tom Lendacky <thomas.lendacky@....com>
cc: "Singh, Brijesh" <brijesh.singh@....com>,
"Grimm, Jon" <jon.grimm@....com>, Joerg Roedel <joro@...tes.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>
Subject: [rfc v2 4/6] dma-direct: atomic allocations must come from atomic
coherent pools
When a device required unencrypted memory and the context does not allow
blocking, memory must be returned from the atomic coherent pools.
This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the
config only requires CONFIG_DMA_COHERENT_POOL. This will be used for
CONFIG_AMD_MEM_ENCRYPT in a subsequent patch.
Keep all memory in these pools unencrypted.
Signed-off-by: David Rientjes <rientjes@...gle.com>
---
kernel/dma/direct.c | 16 ++++++++++++++++
kernel/dma/pool.c | 15 +++++++++++++--
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 70800ca64f13..44165263c185 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -124,6 +124,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
+ /*
+ * Unencrypted memory must come directly from DMA atomic pools if
+ * blocking is not allowed.
+ */
+ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
+ force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp)) {
+ ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
+ if (!ret)
+ return NULL;
+ goto done;
+ }
+
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs) &&
!gfpflags_allow_blocking(gfp)) {
@@ -203,6 +215,10 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
+ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
+ dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
+ return;
+
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index e14c5a2da734..6685ab89cfa7 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -9,6 +9,7 @@
#include <linux/dma-contiguous.h>
#include <linux/init.h>
#include <linux/genalloc.h>
+#include <linux/set_memory.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
@@ -55,12 +56,20 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
arch_dma_prep_coherent(page, pool_size);
+#ifdef CONFIG_DMA_DIRECT_REMAP
addr = dma_common_contiguous_remap(page, pool_size,
pgprot_dmacoherent(PAGE_KERNEL),
__builtin_return_address(0));
if (!addr)
goto free_page;
-
+#else
+ addr = page_to_virt(page);
+#endif
+ /*
+ * Memory in the atomic DMA pools must be unencrypted, the pools do not
+ * shrink so no re-encryption occurs in dma_direct_free_pages().
+ */
+ set_memory_decrypted((unsigned long)page_to_virt(page), 1 << order);
ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
pool_size, NUMA_NO_NODE);
if (ret)
@@ -69,8 +78,10 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
return 0;
remove_mapping:
+#ifdef CONFIG_DMA_DIRECT_REMAP
dma_common_free_remap(addr, pool_size);
-free_page:
+#endif
+free_page: __maybe_unused
if (!dma_release_from_contiguous(NULL, page, 1 << order))
__free_pages(page, order);
out:
Powered by blists - more mailing lists