[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200713091211.2183368-3-tientzu@chromium.org>
Date: Mon, 13 Jul 2020 17:12:09 +0800
From: Claire Chang <tientzu@...omium.org>
To: robh+dt@...nel.org, frowand.list@...il.com, hch@....de,
m.szyprowski@...sung.com, robin.murphy@....com
Cc: treding@...dia.com, gregkh@...uxfoundation.org,
saravanak@...gle.com, suzuki.poulose@....com,
dan.j.williams@...el.com, heikki.krogerus@...ux.intel.com,
bgolaszewski@...libre.com, devicetree@...r.kernel.org,
linux-kernel@...r.kernel.org, iommu@...ts.linux-foundation.org,
tfiga@...omium.org, drinkcat@...omium.org,
Claire Chang <tientzu@...omium.org>
Subject: [PATCH 2/4] dma-mapping: Add bounced DMA pool
Add the initialization function to create bounce buffer pools from
matching reserved-memory nodes in the device tree.
The bounce buffer pools provide a basic level of protection against
the DMA overwriting buffer contents at unexpected times. However, to
protect against general data leakage and system memory corruption, the
system needs to provide a way to restrict the DMA to a predefined memory
region.
Signed-off-by: Claire Chang <tientzu@...omium.org>
---
kernel/dma/bounced.c | 89 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/kernel/dma/bounced.c b/kernel/dma/bounced.c
index fcaabb5eccf2..0bfd6cf90aee 100644
--- a/kernel/dma/bounced.c
+++ b/kernel/dma/bounced.c
@@ -12,6 +12,9 @@
#include <linux/dma-noncoherent.h>
#include <linux/io.h>
#include <linux/genalloc.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
#include <linux/slab.h>
struct dma_bounced_mem {
@@ -213,3 +216,89 @@ const struct dma_map_ops dma_bounced_ops = {
.max_mapping_size = dma_bounced_max_mapping_size,
.get_merge_boundary = NULL,
};
+
+static int dma_bounced_device_init(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct dma_bounced_mem *mem;
+ int ret;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ mem->virt_base =
+ devm_memremap(dev, rmem->base, rmem->size, MEMREMAP_WB);
+ if (!mem->virt_base) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ mem->size = rmem->size;
+ mem->device_base = phys_to_dma(dev, rmem->base);
+ mem->device_end = mem->device_base + rmem->size;
+
+ mem->orig_addr = kcalloc(mem->size >> PAGE_SHIFT,
+ sizeof(*mem->orig_addr), GFP_KERNEL);
+ if (!mem->orig_addr) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ mem->pool = devm_gen_pool_create(dev, PAGE_SHIFT, NUMA_NO_NODE,
+ "bounced DMA");
+ if (!mem->pool) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = gen_pool_add_virt(mem->pool, (unsigned long)mem->virt_base,
+ rmem->base, rmem->size, NUMA_NO_NODE);
+ if (ret)
+ goto error;
+
+ dev->dma_bounced_mem = mem;
+ set_dma_ops(dev, &dma_bounced_ops);
+
+ return 0;
+
+error:
+ kfree(mem);
+
+ return ret;
+}
+
+static void dma_bounced_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ struct dma_bounced_mem *mem = dev->dma_bounced_mem;
+
+ set_dma_ops(dev, NULL);
+ dev->dma_bounced_mem = NULL;
+
+ kfree(mem->orig_addr);
+ kfree(mem);
+}
+
+static const struct reserved_mem_ops rmem_dma_bounced_ops = {
+ .device_init = dma_bounced_device_init,
+ .device_release = dma_bounced_device_release,
+};
+
+static int __init dma_bounced_setup(struct reserved_mem *rmem)
+{
+ unsigned long node = rmem->fdt_node;
+
+ if (of_get_flat_dt_prop(node, "reusable", NULL) ||
+ of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
+ of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
+ of_get_flat_dt_prop(node, "no-map", NULL))
+ return -EINVAL;
+
+ rmem->ops = &rmem_dma_bounced_ops;
+ pr_info("Reserved memory: created DMA bounced memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(dma, "bounced-dma-pool", dma_bounced_setup);
--
2.27.0.383.g050319c2ae-goog
Powered by blists - more mailing lists