lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260128-dma_ll_comlib-v1-12-1b1fa2c671f9@nxp.com>
Date: Wed, 28 Jan 2026 13:05:31 -0500
From: Frank Li <Frank.Li@....com>
To: Vinod Koul <vkoul@...nel.org>
Cc: linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org, 
 imx@...ts.linux.dev, joy.zou@....com, Frank Li <Frank.Li@....com>
Subject: [PATCH RFC 12/12] dmaengine: add
 vchan_dma_ll_prep_slave_{sg,cyclic} API

Create common vchan_dma_ll_prep_slave_{sg,cyclic} API in the DMA
linked-list library, based on the existing fsl-edma implementation.

Update the fsl-edma driver to use the common API instead of maintaining
its own implementation.

Signed-off-by: Frank Li <Frank.Li@....com>
---
 drivers/dma/fsl-edma-common.c | 226 ++++++++----------------------------------
 drivers/dma/fsl-edma-common.h |   8 --
 drivers/dma/fsl-edma-main.c   |   4 +-
 drivers/dma/ll-dma.c          | 112 +++++++++++++++++++++
 drivers/dma/mcf-edma-main.c   |   4 +-
 drivers/dma/virt-dma.h        |   9 ++
 6 files changed, 169 insertions(+), 194 deletions(-)

diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index fdac0518316914d59df592ad26f6000d2034bcb9..643e8bd30b88a2cf66eebf024505428365b8f0ae 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -534,185 +534,55 @@ static int fsl_edma_set_lli(struct dma_ll_desc *desc, u32 idx,
 		iter = 1;
 		disable_req = true;
 		nbytes = len;
-	}
-
-	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
-		fsl_chan->is_remote = true;
-
-	/* To match with copy_align and max_seg_size so 1 tcd is enough */
-	__fsl_edma_fill_tcd(fsl_chan, tcd, src, dst,
-			    fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
-			    soff, nbytes, 0, iter, iter, doff, irq, disable_req);
-
-	return 0;
-}
-
-struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
-		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-		size_t period_len, enum dma_transfer_direction direction,
-		unsigned long flags)
-{
-	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-	struct dma_slave_config *cfg = &chan->config;
-	struct dma_ll_desc *fsl_desc;
-	dma_addr_t dma_buf_next;
-	bool major_int = true;
-	int sg_len, i;
-	dma_addr_t src_addr, dst_addr, last_sg;
-	u16 soff, doff, iter;
-	u32 nbytes;
-
-	if (!is_slave_direction(direction))
-		return NULL;
-
-	sg_len = buf_len / period_len;
-	fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
-	if (!fsl_desc)
-		return NULL;
-	fsl_desc->iscyclic = true;
-	fsl_desc->dir = direction;
-
-	if (vchan_dma_ll_map_slave_addr(chan, fsl_desc, direction, cfg))
-		goto err;
-
-	dma_buf_next = dma_addr;
-	if (direction == DMA_MEM_TO_DEV) {
-		if (!cfg->src_addr_width)
-			cfg->src_addr_width = cfg->dst_addr_width;
-		fsl_chan->attr =
-			fsl_edma_get_tcd_attr(cfg->src_addr_width,
-					      cfg->dst_addr_width);
-		nbytes = cfg->dst_addr_width * cfg->dst_maxburst;
 	} else {
-		if (!cfg->dst_addr_width)
-			cfg->dst_addr_width = cfg->src_addr_width;
-		fsl_chan->attr =
-			fsl_edma_get_tcd_attr(cfg->src_addr_width,
-					      cfg->dst_addr_width);
-		nbytes = cfg->src_addr_width * cfg->src_maxburst;
-	}
+		enum dma_transfer_direction dir = config->direction;
 
-	iter = period_len / nbytes;
+		if (!desc->iscyclic && idx == desc->n_its - 1)
+			disable_req = true;
+		else
+			disable_req = false;
 
-	for (i = 0; i < sg_len; i++) {
-		if (dma_buf_next >= dma_addr + buf_len)
-			dma_buf_next = dma_addr;
+		fsl_chan->is_sw = false;
 
-		/* get next sg's physical address */
-		last_sg = fsl_desc->its[(i + 1) % sg_len].paddr;
+		if (dir == DMA_MEM_TO_DEV) {
+			dst_bus_width = config->dst_addr_width;
+			if (!config->src_addr_width)
+				src_bus_width = config->dst_addr_width;
+			nbytes = config->dst_addr_width * config->dst_maxburst;
 
-		if (direction == DMA_MEM_TO_DEV) {
-			src_addr = dma_buf_next;
-			dst_addr = fsl_desc->dst.addr;
-			soff = cfg->dst_addr_width;
+			soff = config->dst_addr_width;
 			doff = fsl_chan->is_multi_fifo ? 4 : 0;
-			if (cfg->dst_port_window_size)
-				doff = cfg->dst_addr_width;
-		} else if (direction == DMA_DEV_TO_MEM) {
-			src_addr = fsl_desc->src.addr;
-			dst_addr = dma_buf_next;
+			if (config->dst_port_window_size)
+				doff = config->dst_addr_width;
+		} else if (dir == DMA_DEV_TO_MEM) {
+			src_bus_width = config->src_addr_width;
+			if (!config->dst_addr_width)
+				dst_bus_width = config->src_addr_width;
+			nbytes = config->src_addr_width * config->src_maxburst;
 			soff = fsl_chan->is_multi_fifo ? 4 : 0;
-			doff = cfg->src_addr_width;
-			if (cfg->src_port_window_size)
-				soff = cfg->src_addr_width;
+			doff = config->src_addr_width;
+			if (config->src_port_window_size)
+				soff = config->src_addr_width;
 		} else {
 			/* DMA_DEV_TO_DEV */
-			src_addr = cfg->src_addr;
-			dst_addr = cfg->dst_addr;
 			soff = doff = 0;
-			major_int = false;
-		}
-
-		fsl_edma_fill_tcd(fsl_chan, fsl_desc->its[i].vaddr, src_addr, dst_addr,
-				  fsl_chan->attr, soff, nbytes, 0, iter,
-				  iter, doff, last_sg, major_int, false, true);
-		dma_buf_next += period_len;
-	}
-
-	return __vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc);
-
-err:
-	vchan_dma_ll_free_desc(&fsl_desc->vdesc);
-	return NULL;
-}
-
-struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
-		struct dma_chan *chan, struct scatterlist *sgl,
-		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long flags, void *context)
-{
-	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-	struct dma_slave_config *cfg = &chan->config;
-	struct dma_ll_desc *fsl_desc;
-	struct scatterlist *sg;
-	dma_addr_t src_addr, dst_addr, last_sg;
-	u16 soff, doff, iter;
-	u32 nbytes;
-	int i;
-
-	if (!is_slave_direction(direction))
-		return NULL;
-
-	fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
-	if (!fsl_desc)
-		return NULL;
-	fsl_desc->iscyclic = false;
-	fsl_desc->dir = direction;
-
-	if (vchan_dma_ll_map_slave_addr(chan, fsl_desc, direction, cfg))
-		goto err;
-
-	if (direction == DMA_MEM_TO_DEV) {
-		if (!cfg->src_addr_width)
-			cfg->src_addr_width = cfg->dst_addr_width;
-		fsl_chan->attr =
-			fsl_edma_get_tcd_attr(cfg->src_addr_width,
-					      cfg->dst_addr_width);
-		nbytes = cfg->dst_addr_width *
-			cfg->dst_maxburst;
-	} else {
-		if (!cfg->dst_addr_width)
-			cfg->dst_addr_width = cfg->src_addr_width;
-		fsl_chan->attr =
-			fsl_edma_get_tcd_attr(cfg->src_addr_width,
-					      cfg->dst_addr_width);
-		nbytes = cfg->src_addr_width *
-			cfg->src_maxburst;
-	}
-
-	for_each_sg(sgl, sg, sg_len, i) {
-		if (direction == DMA_MEM_TO_DEV) {
-			src_addr = sg_dma_address(sg);
-			dst_addr = fsl_desc->dst.addr;
-			soff = cfg->dst_addr_width;
-			doff = 0;
-		} else if (direction == DMA_DEV_TO_MEM) {
-			src_addr = fsl_desc->src.addr;
-			dst_addr = sg_dma_address(sg);
-			soff = 0;
-			doff = cfg->src_addr_width;
-		} else {
-			/* DMA_DEV_TO_DEV */
-			src_addr = cfg->src_addr;
-			dst_addr = cfg->dst_addr;
-			soff = 0;
-			doff = 0;
+			irq = false;
 		}
 
-		/*
-		 * Choose the suitable burst length if sg_dma_len is not
-		 * multiple of burst length so that the whole transfer length is
-		 * multiple of minor loop(burst length).
-		 */
-		if (sg_dma_len(sg) % nbytes) {
-			u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
-			u32 burst = (direction == DMA_DEV_TO_MEM) ?
-						cfg->src_maxburst :
-						cfg->dst_maxburst;
+	       /*
+		* Choose the suitable burst length if sg_dma_len is not
+		* multiple of burst length so that the whole transfer length is
+		* multiple of minor loop(burst length).
+		*/
+		if (len % nbytes) {
+			u32 width = (dir == DMA_DEV_TO_MEM) ? doff : soff;
+			u32 burst = (dir == DMA_DEV_TO_MEM) ?
+					config->src_maxburst :
+					config->dst_maxburst;
 			int j;
 
 			for (j = burst; j > 1; j--) {
-				if (!(sg_dma_len(sg) % (j * width))) {
+				if (!(len % (j * width))) {
 					nbytes = j * width;
 					break;
 				}
@@ -721,27 +591,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
 			if (j == 1)
 				nbytes = width;
 		}
-		iter = sg_dma_len(sg) / nbytes;
-		if (i < sg_len - 1) {
-			last_sg = fsl_desc->its[(i + 1)].paddr;
-			fsl_edma_fill_tcd(fsl_chan, fsl_desc->its[i].vaddr, src_addr,
-					  dst_addr, fsl_chan->attr, soff,
-					  nbytes, 0, iter, iter, doff, last_sg,
-					  false, false, true);
-		} else {
-			last_sg = 0;
-			fsl_edma_fill_tcd(fsl_chan, fsl_desc->its[i].vaddr, src_addr,
-					  dst_addr, fsl_chan->attr, soff,
-					  nbytes, 0, iter, iter, doff, last_sg,
-					  true, true, false);
-		}
+
+		iter = len / nbytes;
 	}
 
-	return __vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc);
+	if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
+		fsl_chan->is_remote = true;
+
+	/* To match with copy_align and max_seg_size so 1 tcd is enough */
+	__fsl_edma_fill_tcd(fsl_chan, tcd, src, dst,
+			    fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
+			    soff, nbytes, 0, iter, iter, doff, irq, disable_req);
 
-err:
-	vchan_dma_ll_free_desc(&fsl_desc->vdesc);
-	return NULL;
+	return 0;
 }
 
 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 7cba3bc0d39537e675167b42dda644647bf63819..b5bfd3162237bb9dd585bbf91e6f9f73f0376112 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -469,14 +469,6 @@ int fsl_edma_slave_config(struct dma_chan *chan,
 				 struct dma_slave_config *cfg);
 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
 		dma_cookie_t cookie, struct dma_tx_state *txstate);
-struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
-		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-		size_t period_len, enum dma_transfer_direction direction,
-		unsigned long flags);
-struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
-		struct dma_chan *chan, struct scatterlist *sgl,
-		unsigned int sg_len, enum dma_transfer_direction direction,
-		unsigned long flags, void *context);
 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
 void fsl_edma_issue_pending(struct dma_chan *chan);
 int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 1724a2d1449fe1850d460cefae5899a5ab828afd..e405aa96e625702673b5fc64e1102b50d18eb894 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -848,8 +848,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
 	fsl_edma->dma_dev.device_free_chan_resources
 		= fsl_edma_free_chan_resources;
 	fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
-	fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
-	fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
+	fsl_edma->dma_dev.device_prep_slave_sg = vchan_dma_ll_prep_slave_sg;
+	fsl_edma->dma_dev.device_prep_dma_cyclic = vchan_dma_ll_prep_slave_cyclic;
 	fsl_edma->dma_dev.device_prep_dma_memcpy = vchan_dma_ll_prep_memcpy;
 	fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
 	fsl_edma->dma_dev.device_pause = fsl_edma_pause;
diff --git a/drivers/dma/ll-dma.c b/drivers/dma/ll-dma.c
index 66e4222ac528f871c75a508c68895078fa38cf7b..de289e10468b9c0e6ab6c15b1bd49ab2b627e59d 100644
--- a/drivers/dma/ll-dma.c
+++ b/drivers/dma/ll-dma.c
@@ -216,6 +216,118 @@ vchan_dma_ll_prep_memcpy(struct dma_chan *chan,
 }
 EXPORT_SYMBOL_GPL(vchan_dma_ll_prep_memcpy);
 
+static dma_addr_t
+vchan_dma_get_src_addr(struct dma_ll_desc *desc, dma_addr_t addr,
+		       enum dma_transfer_direction dir)
+{
+	if (dir == DMA_DEV_TO_MEM || dir == DMA_DEV_TO_DEV)
+		return desc->src.addr;
+
+	return addr;
+}
+
+static dma_addr_t
+vchan_dma_get_dst_addr(struct dma_ll_desc *desc, dma_addr_t addr,
+		       enum dma_transfer_direction dir)
+{
+	if (dir == DMA_MEM_TO_DEV || dir == DMA_DEV_TO_DEV)
+		return desc->dst.addr;
+
+	return addr;
+}
+
+struct dma_async_tx_descriptor *
+vchan_dma_ll_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			   unsigned int sg_len, enum dma_transfer_direction dir,
+			   unsigned long flags, void *context)
+{
+	struct virt_dma_chan *vchan = to_virt_chan(chan);
+	struct dma_slave_config *config = &chan->config;
+	struct dma_ll_desc *desc;
+	struct scatterlist *sg;
+	int i, ret;
+
+	if (!is_slave_direction(dir))
+		return NULL;
+
+	desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
+	if (!desc)
+		return NULL;
+	desc->iscyclic = false;
+	desc->dir = dir;
+
+	if (vchan_dma_ll_map_slave_addr(chan, desc, dir, config))
+		goto err;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		dma_addr_t addr = sg_dma_address(sg);
+
+		ret = vchan->ll.ops->set_lli(desc, i,
+					     vchan_dma_get_src_addr(desc, addr, dir),
+					     vchan_dma_get_dst_addr(desc, addr, dir),
+					     sg_dma_len(sg),
+					     i == sg_len - 1, config);
+		if (ret)
+			goto err;
+	}
+
+	return __vchan_tx_prep(vchan, &desc->vdesc);
+
+err:
+	vchan_dma_ll_free_desc(&desc->vdesc);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_dma_ll_prep_slave_sg);
+
+struct dma_async_tx_descriptor *
+vchan_dma_ll_prep_slave_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+			       size_t buf_len, size_t period_len,
+			       enum dma_transfer_direction dir,
+			       unsigned long flags)
+{
+	struct virt_dma_chan *vchan = to_virt_chan(chan);
+	struct dma_slave_config *config = &chan->config;
+	dma_addr_t addr = dma_addr;
+	struct dma_ll_desc *desc;
+	int nItems;
+	int i, ret;
+
+	if (!is_slave_direction(dir))
+		return NULL;
+
+	nItems = buf_len / period_len;
+	desc = vchan_dma_ll_alloc_desc(chan, nItems, flags);
+	if (!desc)
+		return NULL;
+	desc->iscyclic = true;
+	desc->dir = dir;
+
+	if (vchan_dma_ll_map_slave_addr(chan, desc, dir, config))
+		goto err;
+
+	for (i = 0; i < nItems; i++) {
+		ret = vchan->ll.ops->set_lli(desc, i,
+					     vchan_dma_get_src_addr(desc, addr, dir),
+					     vchan_dma_get_dst_addr(desc, addr, dir),
+					     period_len, true, config);
+		if (ret)
+			goto err;
+
+		addr += period_len;
+	}
+
+	ret = vchan->ll.ops->set_ll_next(desc, nItems - 1, desc->its[0].paddr);
+	if (ret)
+		goto err;
+
+	return __vchan_tx_prep(vchan, &desc->vdesc);
+
+err:
+	vchan_dma_ll_free_desc(&desc->vdesc);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_dma_ll_prep_slave_cyclic);
+
 int vchan_dma_ll_terminate_all(struct dma_chan *chan)
 {
 	struct virt_dma_chan *vchan = to_virt_chan(chan);
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index 60c5b928ade74d36c8f4206777921544787f6cd8..6d68dfd97b47c88d2499540b10564b964820b807 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -221,8 +221,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
 			fsl_edma_free_chan_resources;
 	mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
 	mcf_edma->dma_dev.device_prep_dma_cyclic =
-			fsl_edma_prep_dma_cyclic;
-	mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+			vchan_dma_ll_prep_slave_cyclic;
+	mcf_edma->dma_dev.device_prep_slave_sg = vchan_dma_ll_prep_slave_sg;
 	mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
 	mcf_edma->dma_dev.device_pause = fsl_edma_pause;
 	mcf_edma->dma_dev.device_resume = fsl_edma_resume;
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 0a18663dc95f323f7a9bab76f2d730701277371a..d1bb130f0fd798f8ec78cc8f88da3f8d1ae74625 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -308,6 +308,15 @@ struct dma_async_tx_descriptor *
 vchan_dma_ll_prep_memcpy(struct dma_chan *chan,
 			 dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
 			 unsigned long flags);
+struct dma_async_tx_descriptor *
+vchan_dma_ll_prep_slave_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+			       size_t buf_len, size_t period_len,
+			       enum dma_transfer_direction dir,
+			       unsigned long flags);
+struct dma_async_tx_descriptor *
+vchan_dma_ll_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			   unsigned int sg_len, enum dma_transfer_direction dir,
+			   unsigned long flags, void *context);
 void vchan_dma_ll_free_desc(struct virt_dma_desc *vdesc);
 int vchan_dma_ll_terminate_all(struct dma_chan *chan);
 int vchan_dma_ll_map_slave_addr(struct dma_chan *chan, struct dma_ll_desc *desc,

-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ