lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260128-dma_ll_comlib-v1-10-1b1fa2c671f9@nxp.com>
Date: Wed, 28 Jan 2026 13:05:29 -0500
From: Frank Li <Frank.Li@....com>
To: Vinod Koul <vkoul@...nel.org>
Cc: linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org, 
 imx@...ts.linux.dev, joy.zou@....com, Frank Li <Frank.Li@....com>
Subject: [PATCH RFC 10/12] dmaengine: move fsl-edma dma_[un]map_resource()
 to linked list library

Move fsl-edma dma_[un]map_resource() into the common linked list library.
These helpers do not touch hardware resources and can be reused by other
DMA engine controller drivers.

Signed-off-by: Frank Li <Frank.Li@....com>
---
 drivers/dma/fsl-edma-common.c | 85 +++++++++----------------------------------
 drivers/dma/fsl-edma-common.h |  2 -
 drivers/dma/ll-dma.c          | 64 ++++++++++++++++++++++++++++++++
 drivers/dma/virt-dma.h        |  9 +++++
 4 files changed, 91 insertions(+), 69 deletions(-)

diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a8f29830e0172b7e818d209f20145121631743c3..ff1ef067cfcffef876eefd30c62d630c77ac537a 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -264,65 +264,9 @@ int fsl_edma_resume(struct dma_chan *chan)
 	return 0;
 }
 
-static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
-{
-	if (fsl_chan->dma_dir != DMA_NONE)
-		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
-				   fsl_chan->dma_dev_addr,
-				   fsl_chan->dma_dev_size,
-				   fsl_chan->dma_dir, 0);
-	fsl_chan->dma_dir = DMA_NONE;
-}
-
-static enum dma_data_direction
-fsl_dma_dir_trans_to_data(enum dma_transfer_direction dir)
-{
-	if (dir == DMA_MEM_TO_DEV)
-		return DMA_FROM_DEVICE;
-
-	if (dir ==  DMA_DEV_TO_MEM)
-		return DMA_TO_DEVICE;
-
-	return DMA_NONE;
-}
-
-static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
-				    enum dma_transfer_direction dir)
-{
-	struct dma_slave_config *cfg = &fsl_chan->vchan.chan.config;
-	struct dma_slave_cfg *c = dma_slave_get_cfg(cfg, dir);
-	struct device *dev = fsl_chan->vchan.chan.device->dev;
-	enum dma_data_direction dma_dir;
-	phys_addr_t addr = 0;
-	u32 size = 0;
-
-	dma_dir = fsl_dma_dir_trans_to_data(dir);
-
-	addr = c->addr;
-	size = c->maxburst;
-
-	/* Already mapped for this config? */
-	if (fsl_chan->dma_dir == dma_dir)
-		return true;
-
-	fsl_edma_unprep_slave_dma(fsl_chan);
-
-	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
-	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
-		return false;
-	fsl_chan->dma_dev_size = size;
-	fsl_chan->dma_dir = dma_dir;
-
-	return true;
-}
-
 int fsl_edma_slave_config(struct dma_chan *chan,
 				 struct dma_slave_config *cfg)
 {
-	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
-	fsl_edma_unprep_slave_dma(fsl_chan);
-
 	return 0;
 }
 
@@ -611,9 +555,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 	if (!is_slave_direction(direction))
 		return NULL;
 
-	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
-		return NULL;
-
 	sg_len = buf_len / period_len;
 	fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
 	if (!fsl_desc)
@@ -621,6 +562,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 	fsl_desc->iscyclic = true;
 	fsl_desc->dir = direction;
 
+	if (vchan_dma_ll_map_slave_addr(chan, fsl_desc, direction, cfg))
+		goto err;
+
 	dma_buf_next = dma_addr;
 	if (direction == DMA_MEM_TO_DEV) {
 		if (!cfg->src_addr_width)
@@ -649,13 +593,13 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 
 		if (direction == DMA_MEM_TO_DEV) {
 			src_addr = dma_buf_next;
-			dst_addr = fsl_chan->dma_dev_addr;
+			dst_addr = fsl_desc->dst.addr;
 			soff = cfg->dst_addr_width;
 			doff = fsl_chan->is_multi_fifo ? 4 : 0;
 			if (cfg->dst_port_window_size)
 				doff = cfg->dst_addr_width;
 		} else if (direction == DMA_DEV_TO_MEM) {
-			src_addr = fsl_chan->dma_dev_addr;
+			src_addr = fsl_desc->src.addr;
 			dst_addr = dma_buf_next;
 			soff = fsl_chan->is_multi_fifo ? 4 : 0;
 			doff = cfg->src_addr_width;
@@ -676,6 +620,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
 	}
 
 	return __vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc);
+
+err:
+	vchan_dma_ll_free_desc(&fsl_desc->vdesc);
+	return NULL;
 }
 
 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
@@ -695,15 +643,15 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 	if (!is_slave_direction(direction))
 		return NULL;
 
-	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
-		return NULL;
-
 	fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
 	if (!fsl_desc)
 		return NULL;
 	fsl_desc->iscyclic = false;
 	fsl_desc->dir = direction;
 
+	if (vchan_dma_ll_map_slave_addr(chan, fsl_desc, direction, cfg))
+		goto err;
+
 	if (direction == DMA_MEM_TO_DEV) {
 		if (!cfg->src_addr_width)
 			cfg->src_addr_width = cfg->dst_addr_width;
@@ -725,11 +673,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 	for_each_sg(sgl, sg, sg_len, i) {
 		if (direction == DMA_MEM_TO_DEV) {
 			src_addr = sg_dma_address(sg);
-			dst_addr = fsl_chan->dma_dev_addr;
+			dst_addr = fsl_desc->dst.addr;
 			soff = cfg->dst_addr_width;
 			doff = 0;
 		} else if (direction == DMA_DEV_TO_MEM) {
-			src_addr = fsl_chan->dma_dev_addr;
+			src_addr = fsl_desc->src.addr;
 			dst_addr = sg_dma_address(sg);
 			soff = 0;
 			doff = cfg->src_addr_width;
@@ -780,6 +728,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 	}
 
 	return __vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc);
+
+err:
+	vchan_dma_ll_free_desc(&fsl_desc->vdesc);
+	return NULL;
 }
 
 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
@@ -887,7 +839,6 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
 		fsl_edma_chan_mux(fsl_chan, 0, false);
 	fsl_chan->edesc = NULL;
 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
-	fsl_edma_unprep_slave_dma(fsl_chan);
 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 
 	if (fsl_chan->txirq)
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index f2c346cb84f5f15d333cf8547963ea7a717f4d5f..7cba3bc0d39537e675167b42dda644647bf63819 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -164,8 +164,6 @@ struct fsl_edma_chan {
 	u32				attr;
 	bool                            is_sw;
 	struct dma_pool			*tcd_pool;
-	dma_addr_t			dma_dev_addr;
-	u32				dma_dev_size;
 	enum dma_data_direction		dma_dir;
 	char				chan_name[32];
 	char				errirq_name[36];
diff --git a/drivers/dma/ll-dma.c b/drivers/dma/ll-dma.c
index 313ca274df945081fc569ddb6a172298c25bc11c..66e4222ac528f871c75a508c68895078fa38cf7b 100644
--- a/drivers/dma/ll-dma.c
+++ b/drivers/dma/ll-dma.c
@@ -99,12 +99,35 @@ struct dma_ll_desc *vchan_dma_ll_alloc_desc(struct dma_chan *chan, u32 n,
 }
 EXPORT_SYMBOL_GPL(vchan_dma_ll_alloc_desc);
 
+static void
+vchan_dma_ll_unmap_slave_addr_one(struct device *dev,
+				  struct dma_slave_map_addr *map,
+				  enum dma_data_direction dir)
+{
+	if (!dma_mapping_error(dev, map->addr) && map->size)
+		dma_unmap_resource(dev, map->addr, map->size, dir, 0);
+}
+
+static void
+vchan_dma_ll_unmap_slave_addr(struct dma_chan *chan, struct dma_ll_desc *desc)
+{
+	struct device *dev = chan->device->dev;
+
+	if (desc->dir == DMA_MEM_TO_DEV || desc->dir == DMA_DEV_TO_DEV)
+		vchan_dma_ll_unmap_slave_addr_one(dev, &desc->dst, DMA_TO_DEVICE);
+
+	if (desc->dir == DMA_DEV_TO_MEM || desc->dir == DMA_DEV_TO_DEV)
+		vchan_dma_ll_unmap_slave_addr_one(dev, &desc->src, DMA_FROM_DEVICE);
+}
+
 void vchan_dma_ll_free_desc(struct virt_dma_desc *vdesc)
 {
 	struct dma_ll_desc *desc = to_dma_ll_desc(vdesc);
 	struct virt_dma_chan *vchan = to_virt_chan(vdesc->tx.chan);
 	int i;
 
+	vchan_dma_ll_unmap_slave_addr(&vchan->chan, desc);
+
 	for (i = 0; i < desc->n_its; i++)
 		dma_pool_free(vchan->ll.pool, desc->its[i].vaddr,
 			      desc->its[i].paddr);
@@ -112,6 +135,47 @@ void vchan_dma_ll_free_desc(struct virt_dma_desc *vdesc)
 }
 EXPORT_SYMBOL_GPL(vchan_dma_ll_free_desc);
 
+static int
+vchan_dma_ll_map_slave_addr_one(struct device *dev,
+				struct dma_slave_map_addr *map,
+				enum dma_transfer_direction tran_dir,
+				enum dma_data_direction data_dir,
+				struct dma_slave_cfg *cfg)
+{
+	map->addr = dma_map_resource(dev, cfg->addr, cfg->maxburst, data_dir, 0);
+	if (dma_mapping_error(dev, map->addr))
+		return -ENOMEM;
+
+	map->size = cfg->maxburst;
+	return 0;
+}
+
+int vchan_dma_ll_map_slave_addr(struct dma_chan *chan, struct dma_ll_desc *desc,
+				enum dma_transfer_direction dir,
+				struct dma_slave_config *cfg)
+{
+	struct device *dev = chan->device->dev;
+
+	if (dir == DMA_MEM_TO_DEV || dir == DMA_DEV_TO_DEV) {
+		if (vchan_dma_ll_map_slave_addr_one(dev, &desc->dst, dir,
+						    DMA_TO_DEVICE, &cfg->dst))
+			goto err;
+	}
+
+	if (dir == DMA_DEV_TO_MEM || dir == DMA_DEV_TO_DEV) {
+		if (vchan_dma_ll_map_slave_addr_one(dev, &desc->src, dir,
+						    DMA_TO_DEVICE, &cfg->src))
+			goto err;
+	}
+
+	return 0;
+
+err:
+	vchan_dma_ll_unmap_slave_addr(chan, desc);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(vchan_dma_ll_map_slave_addr);
+
 struct dma_async_tx_descriptor *
 vchan_dma_ll_prep_memcpy(struct dma_chan *chan,
 			 dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index f4aec6eb3c3900a5473c8feedc16b06e29751deb..0a18663dc95f323f7a9bab76f2d730701277371a 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -24,6 +24,10 @@ struct dma_linklist_item {
 	void *vaddr;
 };
 
+struct dma_slave_map_addr {
+	dma_addr_t addr;
+	size_t size;
+};
 /*
  * Must put to last one if need extend it
  *   struct vendor_dma_ll_desc {
@@ -35,6 +39,8 @@ struct dma_ll_desc {
 	struct virt_dma_desc vdesc;
 	bool iscyclic;
 	enum dma_transfer_direction dir;
+	struct dma_slave_map_addr src;
+	struct dma_slave_map_addr dst;
 	u32 n_its;
 	struct dma_linklist_item its[];
 };
@@ -304,6 +310,9 @@ vchan_dma_ll_prep_memcpy(struct dma_chan *chan,
 			 unsigned long flags);
 void vchan_dma_ll_free_desc(struct virt_dma_desc *vdesc);
 int vchan_dma_ll_terminate_all(struct dma_chan *chan);
+int vchan_dma_ll_map_slave_addr(struct dma_chan *chan, struct dma_ll_desc *desc,
+				enum dma_transfer_direction dir,
+				struct dma_slave_config *cfg);
 #endif
 
 #endif

-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ