lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue,  8 Mar 2016 16:22:17 +0700
From:	Stephen Boyd <stephen.boyd@...aro.org>
To:	linux-kernel@...r.kernel.org
Cc:	Laura Abbott <lauraa@...eaurora.org>,
	linux-arm@...ts.infradead.org,
	Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
	Mimi Zohar <zohar@...ux.vnet.ibm.com>,
	Robin Murphy <robin.murphy@....com>,
	Laura Abbott <labbott@...hat.com>,
	Arnd Bergmann <arnd@...db.de>,
	Marek Szyprowski <m.szyprowski@...sung.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Mark Brown <broonie@...nel.org>,
	Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will.deacon@....com>
Subject: [RFC/PATCH 2/4] dma-mapping: Add dma_remap() APIs

From: Laura Abbott <lauraa@...eaurora.org>

Some systems are memory constrained but they need to load very
large firmwares. The firmware subsystem allows drivers to request
this firmware be loaded from the filesystem, but this requires
that the entire firmware be loaded into kernel memory first
before it's provided to the driver. This can lead to a situation
where we map the firmware twice, once to load the firmware into
kernel memory and once to copy the firmware into the final
resting place.

This design creates needless memory pressure and delays loading
because we have to copy from kernel memory to somewhere else.
Let's add a couple DMA APIs that allow us to map DMA buffers into
the CPU's address space in arbitrary sizes. With this API, we can
allocate a DMA buffer with DMA_ATTR_NO_KERNEL_MAPPING and move a
small mapping window across our large DMA buffer to load the
firmware directly into buffer.

Cc: Robin Murphy <robin.murphy@....com>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: Marek Szyprowski <m.szyprowski@...sung.com>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will.deacon@....com>
Signed-off-by: Laura Abbott <lauraa@...eaurora.org>
[stephen.boyd@...aro.org: Add dma_attrs and offset to API, use
dma_common_contiguous_remap() instead of ioremap_page_range(),
support dma_remap() even when DMA_ATTR_NO_KERNEL_MAPPING isn't
specified, rewrite commit text]
Signed-off-by: Stephen Boyd <stephen.boyd@...aro.org>
---

TODO: Split off arm64 part into own patch?

 arch/arm64/mm/dma-mapping.c | 39 +++++++++++++++++++++++++++++++++++++++
 include/linux/dma-mapping.h | 35 +++++++++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+)

diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 06a593653f23..92a313a7309b 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -345,6 +345,43 @@ static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
 	return ret;
 }
 
+static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
+			     dma_addr_t handle, size_t size,
+			     unsigned long offset, struct dma_attrs *attrs)
+{
+	struct page *page = phys_to_page(dma_to_phys(dev, handle) + offset);
+	bool coherent = is_device_dma_coherent(dev);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+	void *ptr;
+
+	if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+		offset &= ~PAGE_MASK;
+		size = PAGE_ALIGN(size + offset);
+
+		ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
+						  NULL);
+	} else {
+		ptr = cpu_addr;
+	}
+	if (!ptr)
+		return NULL;
+
+	return ptr + offset;
+}
+
+static void arm64_dma_unremap(struct device *dev, void *cpu_addr,
+			      size_t size, unsigned long offset,
+			      struct dma_attrs *attrs)
+{
+	if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+		return;
+
+	offset &= ~PAGE_MASK;
+	cpu_addr -= offset;
+
+	vunmap(cpu_addr);
+}
+
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
@@ -360,6 +397,8 @@ static struct dma_map_ops swiotlb_dma_ops = {
 	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
 	.dma_supported = swiotlb_dma_supported,
 	.mapping_error = swiotlb_dma_mapping_error,
+	.remap = arm64_dma_remap,
+	.unremap = arm64_dma_unremap,
 };
 
 static int __init atomic_pool_init(void)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 75857cda38e9..d4ae45746d61 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -64,6 +64,12 @@ struct dma_map_ops {
 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
 	int (*dma_supported)(struct device *dev, u64 mask);
 	int (*set_dma_mask)(struct device *dev, u64 mask);
+	void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
+			size_t size, unsigned long offset,
+			struct dma_attrs *attrs);
+	void (*unremap)(struct device *dev, void *cpu_addr,
+			size_t size, unsigned long offset,
+			struct dma_attrs *attrs);
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
 	u64 (*get_required_mask)(struct device *dev);
 #endif
@@ -465,6 +471,35 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
 }
 #endif
 
+static inline void *dma_remap(struct device *dev, void *cpu_addr,
+		dma_addr_t dma_handle, size_t size, unsigned long offset,
+		struct dma_attrs *attrs)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (!ops)
+		return NULL;
+	if (!ops->remap)
+		return NULL;
+
+	return ops->remap(dev, cpu_addr, dma_handle, size, offset, attrs);
+}
+
+
+static inline void dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size, unsigned long offset,
+				struct dma_attrs *attrs)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (!ops)
+		return;
+	if (!ops->unremap)
+		return;
+
+	return ops->unremap(dev, remapped_addr, size, offset, attrs);
+}
+
 static inline u64 dma_get_mask(struct device *dev)
 {
 	if (dev && dev->dma_mask && *dev->dma_mask)
-- 
2.7.0.25.gfc10eb5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ