[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260128-dma_ll_comlib-v1-8-1b1fa2c671f9@nxp.com>
Date: Wed, 28 Jan 2026 13:05:27 -0500
From: Frank Li <Frank.Li@....com>
To: Vinod Koul <vkoul@...nel.org>
Cc: linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org,
imx@...ts.linux.dev, joy.zou@....com, Frank Li <Frank.Li@....com>
Subject: [PATCH RFC 08/12] dmaengine: Factor out fsl-edma prep_memcpy into
common vchan helper
Create a common vchan_dma_ll_prep_memcpy() based on
fsl_edma_prep_memcpy().
Add .set_ll_next() and .set_lli() callbacks to abstract DMA descriptor
format differences between controllers, allowing DMA engine drivers to
focus solely on hardware-specific descriptor programming.
Signed-off-by: Frank Li <Frank.Li@....com>
---
drivers/dma/fsl-edma-common.c | 123 ++++++++++++++++++++++++++----------------
drivers/dma/fsl-edma-common.h | 3 --
drivers/dma/fsl-edma-main.c | 2 +-
drivers/dma/ll-dma.c | 39 +++++++++++++-
drivers/dma/virt-dma.h | 12 ++++-
5 files changed, 127 insertions(+), 52 deletions(-)
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 20b954221c2e9b3b3a6849c1f0d4ca68efecb32e..a8f29830e0172b7e818d209f20145121631743c3 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -461,17 +461,17 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd)
edma_cp_tcd_to_reg(fsl_chan, tcd, csr);
}
-static inline
-void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
- u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
- u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
- bool disable_req, bool enable_sg)
+static inline void
+__fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
+ u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
+ u16 biter, u16 doff, bool major_int,
+ bool disable_req)
{
struct dma_slave_config *cfg = &fsl_chan->vchan.chan.config;
struct dma_slave_cfg *c = dma_slave_get_cfg(cfg, cfg->direction);
+ u16 csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
u32 burst = 0;
- u16 csr = 0;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -509,8 +509,6 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer);
fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff);
- fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga);
-
fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter);
if (major_int)
@@ -519,9 +517,6 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (disable_req)
csr |= EDMA_TCD_CSR_D_REQ;
- if (enable_sg)
- csr |= EDMA_TCD_CSR_E_SG;
-
if (fsl_chan->is_rxchan)
csr |= EDMA_TCD_CSR_ACTIVE;
@@ -533,6 +528,71 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
trace_edma_fill_tcd(fsl_chan, tcd);
}
+static void
+__fsl_edma_set_ll_next(struct fsl_edma_chan *fsl_chan, void *tcd, dma_addr_t next)
+{
+ u32 csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr);
+
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, next, dlast_sga);
+
+ csr |= EDMA_TCD_CSR_E_SG;
+ fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
+}
+
+static int
+fsl_edma_set_ll_next(struct dma_ll_desc *desc, u32 idx, dma_addr_t next)
+{
+ struct dma_chan *chan = desc->vdesc.tx.chan;
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ void *tcd = desc->its[idx].vaddr;
+
+ __fsl_edma_set_ll_next(fsl_chan, tcd, next);
+
+ return 0;
+}
+
+static inline
+void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst,
+ u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer,
+ u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ __fsl_edma_fill_tcd(fsl_chan, tcd, src, dst, attr, soff, nbytes, slast,
+ citer, biter, doff, major_int, disable_req);
+
+ if (enable_sg)
+ __fsl_edma_set_ll_next(fsl_chan, tcd, dlast_sga);
+}
+
+static int fsl_edma_set_lli(struct dma_ll_desc *desc, u32 idx,
+ dma_addr_t src, dma_addr_t dst, size_t len, bool irq,
+ struct dma_slave_config *config)
+{
+ struct dma_chan *chan = desc->vdesc.tx.chan;
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ void *tcd = desc->its[idx].vaddr;
+ u32 src_bus_width, dst_bus_width;
+
+ /* Memory to memory */
+ if (!config) {
+ src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(src) - 1));
+ dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dst) - 1));
+
+ fsl_chan->is_sw = true;
+ }
+
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
+ fsl_chan->is_remote = true;
+
+ /* To match with copy_align and max_seg_size so 1 tcd is enough */
+ __fsl_edma_fill_tcd(fsl_chan, tcd, src, dst,
+ fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
+ src_bus_width, len, 0, 1, 1, dst_bus_width, irq, true);
+
+ return 0;
+}
+
struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
@@ -555,7 +615,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
return NULL;
sg_len = buf_len / period_len;
- fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len);
+ fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
if (!fsl_desc)
return NULL;
fsl_desc->iscyclic = true;
@@ -615,7 +675,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dma_buf_next += period_len;
}
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+ return __vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc);
}
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
@@ -638,7 +698,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
return NULL;
- fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len);
+ fsl_desc = vchan_dma_ll_alloc_desc(chan, sg_len, flags);
if (!fsl_desc)
return NULL;
fsl_desc->iscyclic = false;
@@ -719,36 +779,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
}
}
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
- dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len, unsigned long flags)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct dma_ll_desc *fsl_desc;
- u32 src_bus_width, dst_bus_width;
-
- src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_src) - 1));
- dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_dst) - 1));
-
- fsl_desc = vchan_dma_ll_alloc_desc(chan, 1);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = false;
-
- fsl_chan->is_sw = true;
- if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
- fsl_chan->is_remote = true;
-
- /* To match with copy_align and max_seg_size so 1 tcd is enough */
- fsl_edma_fill_tcd(fsl_chan, fsl_desc->its[0].vaddr, dma_src, dma_dst,
- fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
- src_bus_width, len, 0, 1, 1, dst_bus_width, 0, true,
- true, false);
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+ return __vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc);
}
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
@@ -797,6 +828,8 @@ static int fsl_edma_ll_stop(struct dma_chan *chan)
}
static const struct dma_linklist_ops fsl_edma_ll_ops = {
+ .set_ll_next = fsl_edma_set_ll_next,
+ .set_lli = fsl_edma_set_lli,
.stop = fsl_edma_ll_stop,
};
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 654d05f06b2c1817e68e7afaf9de3439285d2978..f2c346cb84f5f15d333cf8547963ea7a717f4d5f 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -479,9 +479,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context);
-struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src,
- size_t len, unsigned long flags);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
void fsl_edma_issue_pending(struct dma_chan *chan);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 354e4ac5e46c920dd66ec1479a64c75a609c186d..1724a2d1449fe1850d460cefae5899a5ab828afd 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -850,7 +850,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
- fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
+ fsl_edma->dma_dev.device_prep_dma_memcpy = vchan_dma_ll_prep_memcpy;
fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
diff --git a/drivers/dma/ll-dma.c b/drivers/dma/ll-dma.c
index ff9eac43886255c18550c978184c0801456fefe9..da13ba4dcdfe403af0ad3678bf4c0ff60f715a63 100644
--- a/drivers/dma/ll-dma.c
+++ b/drivers/dma/ll-dma.c
@@ -28,10 +28,11 @@ int vchan_dma_ll_init(struct virt_dma_chan *vc,
const struct dma_linklist_ops *ops, size_t size,
size_t align, size_t boundary)
{
- if (!ops || !ops->stop)
+ if (!ops || !ops->stop || !ops->set_ll_next || !ops->set_lli)
return -EINVAL;
vc->ll.ops = ops;
+ vc->ll.size = size;
vc->ll.pool = dma_pool_create(dev_name(vc->chan.device->dev),
vc->chan.device->dev, size, align,
@@ -53,7 +54,8 @@ void vchan_dma_ll_free(struct virt_dma_chan *vc)
}
EXPORT_SYMBOL_GPL(vchan_dma_ll_free);
-struct dma_ll_desc *vchan_dma_ll_alloc_desc(struct dma_chan *chan, u32 n)
+struct dma_ll_desc *vchan_dma_ll_alloc_desc(struct dma_chan *chan, u32 n,
+ unsigned long flags)
{
struct virt_dma_chan *vchan = to_virt_chan(chan);
struct dma_ll_desc *desc;
@@ -65,11 +67,15 @@ struct dma_ll_desc *vchan_dma_ll_alloc_desc(struct dma_chan *chan, u32 n)
desc->n_its = n;
+ vchan_init_dma_async_tx(vchan, &desc->vdesc, flags);
+
for (i = 0; i < n; i++) {
desc->its[i].vaddr = dma_pool_alloc(vchan->ll.pool, GFP_NOWAIT,
&desc->its[i].paddr);
if (!desc->its[i].vaddr)
goto err;
+
+ memset(desc->its[i].vaddr, 0, vchan->ll.size);
}
return desc;
@@ -96,6 +102,35 @@ void vchan_dma_ll_free_desc(struct virt_dma_desc *vdesc)
}
EXPORT_SYMBOL_GPL(vchan_dma_ll_free_desc);
+struct dma_async_tx_descriptor *
+vchan_dma_ll_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+{
+ struct virt_dma_chan *vchan = to_virt_chan(chan);
+ struct dma_ll_desc *desc;
+ int ret;
+
+ desc = vchan_dma_ll_alloc_desc(chan, 1, flags);
+ if (!desc)
+ return NULL;
+
+ desc->iscyclic = false;
+
+ ret = vchan->ll.ops->set_lli(desc, 0, dma_src, dma_dst,
+ len, true, NULL);
+
+ if (ret)
+ goto err;
+
+ return __vchan_tx_prep(vchan, &desc->vdesc);
+
+err:
+ vchan_dma_ll_free_desc(&desc->vdesc);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_dma_ll_prep_memcpy);
+
int vchan_dma_ll_terminate_all(struct dma_chan *chan)
{
struct virt_dma_chan *vchan = to_virt_chan(chan);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index ad5ce489cf8e52aa02a0129bc5657fadd6070da2..f4aec6eb3c3900a5473c8feedc16b06e29751deb 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -40,11 +40,16 @@ struct dma_ll_desc {
};
struct dma_linklist_ops {
+ int (*set_ll_next)(struct dma_ll_desc *desc, u32 idx, dma_addr_t next);
+ int (*set_lli)(struct dma_ll_desc *desc, u32 idx,
+ dma_addr_t src, dma_addr_t dst, size_t size,
+ bool irq, struct dma_slave_config *config);
int (*stop)(struct dma_chan *chan);
};
struct dma_linklist {
struct dma_pool *pool;
+ size_t size;
const struct dma_linklist_ops *ops;
};
@@ -291,7 +296,12 @@ int vchan_dma_ll_init(struct virt_dma_chan *vc,
const struct dma_linklist_ops *ops, size_t size,
size_t align, size_t boundary);
void vchan_dma_ll_free(struct virt_dma_chan *vc);
-struct dma_ll_desc *vchan_dma_ll_alloc_desc(struct dma_chan *chan, u32 n);
+struct dma_ll_desc *vchan_dma_ll_alloc_desc(struct dma_chan *chan, u32 n,
+ unsigned long flags);
+struct dma_async_tx_descriptor *
+vchan_dma_ll_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
+ unsigned long flags);
void vchan_dma_ll_free_desc(struct virt_dma_desc *vdesc);
int vchan_dma_ll_terminate_all(struct dma_chan *chan);
#endif
--
2.34.1
Powered by blists - more mailing lists