lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191113122407.1171-2-laurentiu.tudor@nxp.com>
Date:   Wed, 13 Nov 2019 12:24:19 +0000
From:   Laurentiu Tudor <laurentiu.tudor@....com>
To:     "hch@....de" <hch@....de>,
        "robin.murphy@....com" <robin.murphy@....com>,
        "joro@...tes.org" <joro@...tes.org>,
        Ioana Ciocoi Radulescu <ruxandra.radulescu@....com>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>,
        "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
        Ioana Ciornei <ioana.ciornei@....com>
CC:     Leo Li <leoyang.li@....com>,
        Diana Madalina Craciun <diana.craciun@....com>,
        "davem@...emloft.net" <davem@...emloft.net>,
        Madalin Bucur <madalin.bucur@....com>,
        Camelia Alexandra Groza <camelia.groza@....com>,
        Laurentiu Tudor <laurentiu.tudor@....com>
Subject: [PATCH v3 1/4] dma-mapping: introduce new dma unmap and sync api
 variants

From: Laurentiu Tudor <laurentiu.tudor@....com>

Introduce a few new dma unmap and sync variants that, on top of the
original variants, return the virtual address corresponding to the
input dma address. Additionally, provide an api that can be used to
check at runtime if these variants are actually available.
In order to implement them, a new dma map op is added and used:
    void *get_virt_addr(dev, dma_handle);
It does the actual conversion of an input dma address to the output
virtual address.

Signed-off-by: Laurentiu Tudor <laurentiu.tudor@....com>
---
 include/linux/dma-mapping.h | 45 +++++++++++++++++++++++++++++++
 kernel/dma/mapping.c        | 53 +++++++++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4a1c4fca475a..0940bd75df8e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -132,6 +132,7 @@ struct dma_map_ops {
 	u64 (*get_required_mask)(struct device *dev);
 	size_t (*max_mapping_size)(struct device *dev);
 	unsigned long (*get_merge_boundary)(struct device *dev);
+	void *(*get_virt_addr)(struct device *dev, dma_addr_t dma_handle);
 };
 
 #define DMA_MAPPING_ERROR		(~(dma_addr_t)0)
@@ -442,6 +443,13 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 	return 0;
 }
 
+static inline bool dma_can_unmap_by_dma_addr(struct device *dev)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	return dma_is_direct(ops) || (ops && ops->get_virt_addr);
+}
+
 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
 		gfp_t flag, unsigned long attrs);
 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -458,6 +466,14 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
 		unsigned long attrs);
+void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
+				  size_t size, enum dma_data_direction dir,
+				  unsigned long attrs);
+struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+			  enum dma_data_direction dir, unsigned long attrs);
+void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
+				   size_t size, enum dma_data_direction dir);
 bool dma_can_mmap(struct device *dev);
 int dma_supported(struct device *dev, u64 mask);
 int dma_set_mask(struct device *dev, u64 mask);
@@ -534,6 +550,27 @@ static inline void dmam_free_coherent(struct device *dev, size_t size,
 		void *vaddr, dma_addr_t dma_handle)
 {
 }
+
+static inline void *
+dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+			    enum dma_data_direction dir, unsigned long attrs)
+{
+	return NULL;
+}
+
+static inline struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+			  enum dma_data_direction dir, unsigned long attrs)
+{
+	return NULL;
+}
+
+static inline void *
+dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr, size_t size,
+			     enum dma_data_direction dir)
+{
+	return NULL;
+}
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		enum dma_data_direction dir)
 {
@@ -578,6 +615,11 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
 {
 	return 0;
 }
+
+static inline bool dma_can_unmap_by_dma_addr(struct device *dev)
+{
+	return false;
+}
 #endif /* CONFIG_HAS_DMA */
 
 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
@@ -610,10 +652,13 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
 
 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single_desc(d, a, s, r) \
+		dma_unmap_single_attrs_desc(d, a, s, r, 0)
 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+#define dma_unmap_page_desc(d, a, s, r) dma_unmap_page_attrs_desc(d, a, s, r, 0)
 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
 
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index d9334f31a5af..2b6f245c9bb1 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -345,6 +345,59 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 }
 EXPORT_SYMBOL(dma_free_attrs);
 
+struct page *
+dma_unmap_page_attrs_desc(struct device *dev, dma_addr_t addr, size_t size,
+			  enum dma_data_direction dir, unsigned long attrs)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+	void *ptr = NULL;
+
+	if (dma_is_direct(ops))
+		ptr = phys_to_virt(dma_to_phys(dev, addr));
+	else if (ops && ops->get_virt_addr)
+		ptr = ops->get_virt_addr(dev, addr);
+
+	dma_unmap_page_attrs(dev, addr, size, dir, attrs);
+
+	return ptr ? virt_to_page(ptr) : NULL;
+}
+EXPORT_SYMBOL_GPL(dma_unmap_page_attrs_desc);
+
+void *dma_unmap_single_attrs_desc(struct device *dev, dma_addr_t addr,
+				  size_t size, enum dma_data_direction dir,
+				  unsigned long attrs)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+	void *ptr = NULL;
+
+	if (dma_is_direct(ops))
+		ptr = phys_to_virt(dma_to_phys(dev, addr));
+	else if (ops && ops->get_virt_addr)
+		ptr = ops->get_virt_addr(dev, addr);
+
+	dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+
+	return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_unmap_single_attrs_desc);
+
+void *dma_sync_single_for_cpu_desc(struct device *dev, dma_addr_t addr,
+				   size_t size, enum dma_data_direction dir)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+	void *ptr = NULL;
+
+	if (dma_is_direct(ops))
+		ptr = phys_to_virt(dma_to_phys(dev, addr));
+	else if (ops && ops->get_virt_addr)
+		ptr = ops->get_virt_addr(dev, addr);
+
+	dma_sync_single_for_cpu(dev, addr, size, dir);
+
+	return ptr;
+}
+EXPORT_SYMBOL_GPL(dma_sync_single_for_cpu_desc);
+
 int dma_supported(struct device *dev, u64 mask)
 {
 	const struct dma_map_ops *ops = get_dma_ops(dev);
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ