[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210619034043.199220-11-tientzu@chromium.org>
Date: Sat, 19 Jun 2021 11:40:41 +0800
From: Claire Chang <tientzu@...omium.org>
To: Rob Herring <robh+dt@...nel.org>, mpe@...erman.id.au,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Frank Rowand <frowand.list@...il.com>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
boris.ostrovsky@...cle.com, jgross@...e.com,
Christoph Hellwig <hch@....de>,
Marek Szyprowski <m.szyprowski@...sung.com>
Cc: benh@...nel.crashing.org, paulus@...ba.org,
"list@....net:IOMMU DRIVERS" <iommu@...ts.linux-foundation.org>,
sstabellini@...nel.org, Robin Murphy <robin.murphy@....com>,
grant.likely@....com, xypron.glpk@....de,
Thierry Reding <treding@...dia.com>, mingo@...nel.org,
bauerman@...ux.ibm.com, peterz@...radead.org,
Greg KH <gregkh@...uxfoundation.org>,
Saravana Kannan <saravanak@...gle.com>,
"Rafael J . Wysocki" <rafael.j.wysocki@...el.com>,
heikki.krogerus@...ux.intel.com,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Randy Dunlap <rdunlap@...radead.org>,
Dan Williams <dan.j.williams@...el.com>,
Bartosz Golaszewski <bgolaszewski@...libre.com>,
linux-devicetree <devicetree@...r.kernel.org>,
lkml <linux-kernel@...r.kernel.org>,
linuxppc-dev@...ts.ozlabs.org, xen-devel@...ts.xenproject.org,
Nicolas Boichat <drinkcat@...omium.org>,
Jim Quinlan <james.quinlan@...adcom.com>, tfiga@...omium.org,
bskeggs@...hat.com, bhelgaas@...gle.com, chris@...is-wilson.co.uk,
tientzu@...omium.org, daniel@...ll.ch, airlied@...ux.ie,
dri-devel@...ts.freedesktop.org, intel-gfx@...ts.freedesktop.org,
jani.nikula@...ux.intel.com, jxgao@...gle.com,
joonas.lahtinen@...ux.intel.com, linux-pci@...r.kernel.org,
maarten.lankhorst@...ux.intel.com, matthew.auld@...el.com,
rodrigo.vivi@...el.com, thomas.hellstrom@...ux.intel.com,
thomas.lendacky@....com
Subject: [PATCH v14 10/12] swiotlb: Add restricted DMA pool initialization
Add the initialization function to create restricted DMA pools from
matching reserved-memory nodes.
Regardless of swiotlb setting, the restricted DMA pool is preferred if
available.
The restricted DMA pools provide a basic level of protection against the
DMA overwriting buffer contents at unexpected times. However, to protect
against general data leakage and system memory corruption, the system
needs to provide a way to lock down the memory access, e.g., MPU.
Signed-off-by: Claire Chang <tientzu@...omium.org>
Reviewed-by: Christoph Hellwig <hch@....de>
Tested-by: Stefano Stabellini <sstabellini@...nel.org>
Tested-by: Will Deacon <will@...nel.org>
---
include/linux/swiotlb.h | 3 +-
kernel/dma/Kconfig | 14 ++++++++
kernel/dma/swiotlb.c | 76 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 92 insertions(+), 1 deletion(-)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index a73fad460162..175b6c113ed8 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,7 +73,8 @@ extern enum swiotlb_force swiotlb_force;
* range check to see if the memory was in fact allocated by this
* API.
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
- * @end. This is command line adjustable via setup_io_tlb_npages.
+ * @end. For default swiotlb, this is command line adjustable via
+ * setup_io_tlb_npages.
* @used: The number of used IO TLB block.
* @list: The free list describing the number of free entries available
* from each index.
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 77b405508743..3e961dc39634 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -80,6 +80,20 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE
+config DMA_RESTRICTED_POOL
+ bool "DMA Restricted Pool"
+ depends on OF && OF_RESERVED_MEM
+ select SWIOTLB
+ help
+ This enables support for restricted DMA pools which provide a level of
+ DMA memory protection on systems with limited hardware protection
+ capabilities, such as those lacking an IOMMU.
+
+ For more information see
+ <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
+ and <kernel/dma/swiotlb.c>.
+ If unsure, say "n".
+
#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 273b21090ee8..1aef294c82b5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -39,6 +39,13 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/slab.h>
+#endif
#include <asm/io.h>
#include <asm/dma.h>
@@ -736,4 +743,73 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size)
return true;
}
+static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct io_tlb_mem *mem = rmem->priv;
+ unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
+
+ /*
+ * Since multiple devices can share the same pool, the private data,
+ * io_tlb_mem struct, will be initialized by the first device attached
+ * to it.
+ */
+ if (!mem) {
+ mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
+ rmem->size >> PAGE_SHIFT);
+ swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
+ mem->force_bounce = true;
+ mem->for_alloc = true;
+
+ rmem->priv = mem;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ mem->debugfs =
+ debugfs_create_dir(rmem->name, debugfs_dir);
+ swiotlb_create_debugfs_files(mem);
+ }
+ }
+
+ dev->dma_io_tlb_mem = mem;
+
+ return 0;
+}
+
+static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ dev->dma_io_tlb_mem = io_tlb_default_mem;
+}
+
+static const struct reserved_mem_ops rmem_swiotlb_ops = {
+ .device_init = rmem_swiotlb_device_init,
+ .device_release = rmem_swiotlb_device_release,
+};
+
+static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
+{
+ unsigned long node = rmem->fdt_node;
+
+ if (of_get_flat_dt_prop(node, "reusable", NULL) ||
+ of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
+ of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
+ of_get_flat_dt_prop(node, "no-map", NULL))
+ return -EINVAL;
+
+ if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
+ pr_err("Restricted DMA pool must be accessible within the linear mapping.");
+ return -EINVAL;
+ }
+
+ rmem->ops = &rmem_swiotlb_ops;
+ pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
#endif /* CONFIG_DMA_RESTRICTED_POOL */
--
2.32.0.288.g62a8d224e6-goog
Powered by blists - more mailing lists