[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210422081508.3942748-15-tientzu@chromium.org>
Date: Thu, 22 Apr 2021 16:15:06 +0800
From: Claire Chang <tientzu@...omium.org>
To: Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Frank Rowand <frowand.list@...il.com>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
boris.ostrovsky@...cle.com, jgross@...e.com,
Christoph Hellwig <hch@....de>,
Marek Szyprowski <m.szyprowski@...sung.com>
Cc: benh@...nel.crashing.org, paulus@...ba.org,
"list@....net:IOMMU DRIVERS" <iommu@...ts.linux-foundation.org>,
sstabellini@...nel.org, Robin Murphy <robin.murphy@....com>,
grant.likely@....com, xypron.glpk@....de,
Thierry Reding <treding@...dia.com>, mingo@...nel.org,
bauerman@...ux.ibm.com, peterz@...radead.org,
Greg KH <gregkh@...uxfoundation.org>,
Saravana Kannan <saravanak@...gle.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
heikki.krogerus@...ux.intel.com,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Randy Dunlap <rdunlap@...radead.org>,
Dan Williams <dan.j.williams@...el.com>,
Bartosz Golaszewski <bgolaszewski@...libre.com>,
linux-devicetree <devicetree@...r.kernel.org>,
lkml <linux-kernel@...r.kernel.org>,
linuxppc-dev@...ts.ozlabs.org, xen-devel@...ts.xenproject.org,
Nicolas Boichat <drinkcat@...omium.org>,
Jim Quinlan <james.quinlan@...adcom.com>, tfiga@...omium.org,
bskeggs@...hat.com, bhelgaas@...gle.com, chris@...is-wilson.co.uk,
tientzu@...omium.org, daniel@...ll.ch, airlied@...ux.ie,
dri-devel@...ts.freedesktop.org, intel-gfx@...ts.freedesktop.org,
jani.nikula@...ux.intel.com, jxgao@...gle.com,
joonas.lahtinen@...ux.intel.com, linux-pci@...r.kernel.org,
maarten.lankhorst@...ux.intel.com, matthew.auld@...el.com,
nouveau@...ts.freedesktop.org, rodrigo.vivi@...el.com,
thomas.hellstrom@...ux.intel.com
Subject: [PATCH v5 14/16] dma-direct: Allocate memory from restricted DMA pool if available
The restricted DMA pool is preferred if available.
The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.
Signed-off-by: Claire Chang <tientzu@...omium.org>
---
kernel/dma/direct.c | 35 ++++++++++++++++++++++++++---------
1 file changed, 26 insertions(+), 9 deletions(-)
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 7a27f0510fcc..29523d2a9845 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -78,6 +78,10 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ if (swiotlb_free(dev, page, size))
+ return;
+#endif
dma_free_contiguous(dev, page, size);
}
@@ -92,7 +96,17 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
- page = dma_alloc_contiguous(dev, size, gfp);
+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+ page = swiotlb_alloc(dev, size);
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ __dma_direct_free_pages(dev, page, size);
+ page = NULL;
+ }
+#endif
+
+ if (!page)
+ page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
page = NULL;
@@ -148,7 +162,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
gfp |= __GFP_NOWARN;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev)) {
+ !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
@@ -161,8 +175,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !dev_is_dma_coherent(dev))
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+ !is_dev_swiotlb_force(dev))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
/*
@@ -172,7 +186,9 @@ void *dma_direct_alloc(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
!gfpflags_allow_blocking(gfp) &&
(force_dma_unencrypted(dev) ||
- (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
+ (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ !dev_is_dma_coherent(dev))) &&
+ !is_dev_swiotlb_force(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
@@ -253,15 +269,15 @@ void dma_direct_free(struct device *dev, size_t size,
unsigned int page_order = get_order(size);
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev)) {
+ !force_dma_unencrypted(dev) && !is_dev_swiotlb_force(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
dma_free_contiguous(dev, cpu_addr, size);
return;
}
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !dev_is_dma_coherent(dev)) {
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev) &&
+ !is_dev_swiotlb_force(dev)) {
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
return;
}
@@ -289,7 +305,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
void *ret;
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
- force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
+ force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+ !is_dev_swiotlb_force(dev))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp);
--
2.31.1.368.gbe11c130af-goog
Powered by blists - more mailing lists