[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210723214031.3251801-4-atish.patra@wdc.com>
Date: Fri, 23 Jul 2021 14:40:29 -0700
From: Atish Patra <atish.patra@....com>
To: linux-kernel@...r.kernel.org
Cc: Atish Patra <atish.patra@....com>,
Albert Ou <aou@...s.berkeley.edu>,
Christoph Hellwig <hch@....de>, devicetree@...r.kernel.org,
Dmitry Vyukov <dvyukov@...gle.com>,
Frank Rowand <frowand.list@...il.com>,
Guo Ren <guoren@...ux.alibaba.com>,
iommu@...ts.linux-foundation.org, linux-riscv@...ts.infradead.org,
Marek Szyprowski <m.szyprowski@...sung.com>,
Palmer Dabbelt <palmer@...belt.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Rob Herring <robh+dt@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Tobias Klauser <tklauser@...tanz.ch>
Subject: [RFC 3/5] dma-mapping: Enable global non-coherent pool support for RISC-V
Currently, linux,dma-default is used to reserve a global non-coherent pool
to allocate memory for dma operations. This can be useful for RISC-V as
well as the ISA specification doesn't specify a method to modify PMA
attributes or page table entries to define non-cacheable area yet.
A non-cacheable memory window is an alternate options for vendors to
support non-coherent devices. "dma-ranges" must be used in conjunction with
"linux,dma-default" property to define one or more mappings between device
and cpu accesible memory regions.
This allows RISC-V to use global pool for non-coherent platforms that
relies on a uncached memory region that is outside of the system ram.
Signed-off-by: Atish Patra <atish.patra@....com>
---
kernel/dma/coherent.c | 49 ++++++++++++++++++++++++++++++++++++-------
1 file changed, 41 insertions(+), 8 deletions(-)
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 97677df5408b..d0b33b1a76f0 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -9,6 +9,8 @@
#include <linux/module.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
+#include <linux/of_address.h>
+#include <linux/libfdt.h>
struct dma_coherent_mem {
void *virt_base;
@@ -302,19 +304,27 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
vaddr, size, ret);
}
-int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
+static int __dma_init_global_coherent(phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem;
- mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
+ if (phys_addr == device_addr)
+ mem = dma_init_coherent_memory(phys_addr, device_addr, size, true);
+ else
+ mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
+
if (IS_ERR(mem))
return PTR_ERR(mem);
dma_coherent_default_memory = mem;
pr_info("DMA: default coherent area is set\n");
return 0;
}
-#endif /* CONFIG_DMA_GLOBAL_POOL */
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
+{
+ return __dma_init_global_coherent(phys_addr, phys_addr, size);
+}
+#endif /* CONFIG_DMA_GLOBAL_POOL */
/*
* Support for reserved memory regions defined in device tree
*/
@@ -329,8 +339,8 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
if (!rmem->priv) {
struct dma_coherent_mem *mem;
- mem = dma_init_coherent_memory(rmem->base, rmem->base,
- rmem->size, true);
+ mem = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, true);
+
if (IS_ERR(mem))
return PTR_ERR(mem);
rmem->priv = mem;
@@ -358,7 +368,7 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
if (of_get_flat_dt_prop(node, "reusable", NULL))
return -EINVAL;
-#ifdef CONFIG_ARM
+#if defined(CONFIG_ARM) || defined(CONFIG_RISCV)
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
pr_err("Reserved memory: regions without no-map are not yet supported\n");
return -EINVAL;
@@ -382,10 +392,33 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
#ifdef CONFIG_DMA_GLOBAL_POOL
static int __init dma_init_reserved_memory(void)
{
+ struct device_node *np;
+ const struct bus_dma_region *map = NULL;
+ int ret;
+ int64_t uc_offset = 0;
+
if (!dma_reserved_default_memory)
return -ENOMEM;
- return dma_init_global_coherent(dma_reserved_default_memory->base,
- dma_reserved_default_memory->size);
+
+ /* dma-ranges is only valid for global pool i.e. dma-default is set */
+ np = of_find_node_with_property(NULL, "linux,dma-default");
+ if (!np)
+ goto global_init;
+ of_node_put(np);
+
+ ret = of_dma_get_range(np, &map);
+ if (ret < 0)
+ goto global_init;
+
+ /* Sanity check for the non-coherent global pool from uncached region */
+ if (map->dma_start == dma_reserved_default_memory->base &&
+ map->size == dma_reserved_default_memory->size)
+ uc_offset = map->offset;
+
+global_init:
+ return __dma_init_global_coherent(dma_reserved_default_memory->base + uc_offset,
+ dma_reserved_default_memory->base,
+ dma_reserved_default_memory->size);
}
core_initcall(dma_init_reserved_memory);
#endif /* CONFIG_DMA_GLOBAL_POOL */
--
2.31.1
Powered by blists - more mailing lists