[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260126103155.2644586-5-claudiu.beznea.uj@bp.renesas.com>
Date: Mon, 26 Jan 2026 12:31:52 +0200
From: Claudiu <claudiu.beznea@...on.dev>
To: vkoul@...nel.org,
biju.das.jz@...renesas.com,
prabhakar.mahadev-lad.rj@...renesas.com,
lgirdwood@...il.com,
broonie@...nel.org,
perex@...ex.cz,
tiwai@...e.com,
p.zabel@...gutronix.de,
geert+renesas@...der.be,
fabrizio.castro.jz@...esas.com
Cc: claudiu.beznea@...on.dev,
dmaengine@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-sound@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
Claudiu Beznea <claudiu.beznea.uj@...renesas.com>
Subject: [PATCH 4/7] dmaengine: sh: rz-dmac: Add cyclic DMA support
From: Claudiu Beznea <claudiu.beznea.uj@...renesas.com>
Add cyclic DMA support to the RZ DMAC driver. A per-channel status bit is
introduced to mark cyclic channels and is set during the DMA prepare
callback. The IRQ handler checks this status bit and calls
vchan_cyclic_callback() accordingly.
Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@...renesas.com>
---
drivers/dma/sh/rz-dmac.c | 137 +++++++++++++++++++++++++++++++++++++--
1 file changed, 133 insertions(+), 4 deletions(-)
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 4bc7ea9566fd..ab5f49a0b9f2 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -35,6 +35,7 @@
enum rz_dmac_prep_type {
RZ_DMAC_DESC_MEMCPY,
RZ_DMAC_DESC_SLAVE_SG,
+ RZ_DMAC_DESC_CYCLIC,
};
struct rz_lmdesc {
@@ -59,6 +60,7 @@ struct rz_dmac_desc {
/* For slave sg */
struct scatterlist *sg;
unsigned int sgcount;
+ u32 start_lmdesc;
};
#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
@@ -67,10 +69,12 @@ struct rz_dmac_desc {
* enum rz_dmac_chan_status: RZ DMAC channel status
* @RZ_DMAC_CHAN_STATUS_ENABLED: Channel is enabled
* @RZ_DMAC_CHAN_STATUS_PAUSED: Channel is paused though DMA engine callbacks
+ * @RZ_DMAC_CHAN_STATUS_CYCLIC: Channel is cyclic
*/
enum rz_dmac_chan_status {
RZ_DMAC_CHAN_STATUS_ENABLED,
RZ_DMAC_CHAN_STATUS_PAUSED,
+ RZ_DMAC_CHAN_STATUS_CYCLIC,
};
struct rz_dmac_chan {
@@ -194,6 +198,7 @@ struct rz_dmac {
/* LINK MODE DESCRIPTOR */
#define HEADER_LV BIT(0)
+#define HEADER_WBD BIT(2)
#define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
#define RZ_DMAC_MAX_CHANNELS 16
@@ -426,6 +431,60 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
}
+static void rz_dmac_prepare_descs_for_cyclic(struct rz_dmac_chan *channel)
+{
+ struct dma_chan *chan = &channel->vc.chan;
+ struct rz_dmac *dmac = to_rz_dmac(chan->device);
+ struct rz_dmac_desc *d = channel->desc;
+ size_t period_len = d->sgcount;
+ struct rz_lmdesc *lmdesc;
+ size_t buf_len = d->len;
+ size_t periods = buf_len / period_len;
+ u32 start_lmdesc;
+
+ lockdep_assert_held(&channel->vc.lock);
+
+ channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DMS;
+
+ if (d->direction == DMA_DEV_TO_MEM) {
+ channel->chcfg |= CHCFG_SAD;
+ channel->chcfg &= ~CHCFG_REQD;
+ } else {
+ channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
+ }
+
+ lmdesc = channel->lmdesc.tail;
+ start_lmdesc = channel->lmdesc.base_dma +
+ (sizeof(struct rz_lmdesc) * (lmdesc - channel->lmdesc.base));
+ d->start_lmdesc = start_lmdesc;
+
+ for (size_t i = 0; i < periods; i++) {
+ if (d->direction == DMA_DEV_TO_MEM) {
+ lmdesc->sa = d->src;
+ lmdesc->da = d->dest + (i * period_len);
+ } else {
+ lmdesc->sa = d->src + (i * period_len);
+ lmdesc->da = d->dest;
+ }
+
+ lmdesc->tb = period_len;
+ lmdesc->chitvl = 0;
+ lmdesc->chext = 0;
+ lmdesc->chcfg = channel->chcfg;
+ lmdesc->header = HEADER_LV | HEADER_WBD;
+
+ if (i == periods - 1)
+ lmdesc->nxla = start_lmdesc;
+
+ if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
+ lmdesc = channel->lmdesc.base;
+ }
+
+ channel->lmdesc.tail = lmdesc;
+
+ rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
+}
+
static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
{
struct rz_dmac_desc *d = chan->desc;
@@ -446,6 +505,10 @@ static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
rz_dmac_prepare_descs_for_slave_sg(chan);
break;
+ case RZ_DMAC_DESC_CYCLIC:
+ rz_dmac_prepare_descs_for_cyclic(chan);
+ break;
+
default:
return -EINVAL;
}
@@ -580,6 +643,52 @@ rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return vchan_tx_prep(&channel->vc, &desc->vd, flags);
}
+static struct dma_async_tx_descriptor *
+rz_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ size_t periods = buf_len / period_len;
+ struct rz_dmac_desc *desc;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ if (periods > DMAC_NR_LMDESC)
+ return NULL;
+
+ scoped_guard(spinlock_irqsave, &channel->vc.lock) {
+ if (list_empty(&channel->ld_free))
+ return NULL;
+
+ if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC))
+ return NULL;
+
+ channel->status |= BIT(RZ_DMAC_CHAN_STATUS_CYCLIC);
+
+ desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
+
+ desc->type = RZ_DMAC_DESC_CYCLIC;
+ desc->sgcount = period_len;
+ desc->len = buf_len;
+ desc->direction = direction;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ desc->src = channel->src_per_address;
+ desc->dest = buf_addr;
+ } else {
+ desc->src = buf_addr;
+ desc->dest = channel->dst_per_address;
+ }
+
+ list_move_tail(channel->ld_free.next, &channel->ld_queue);
+ }
+
+ return vchan_tx_prep(&channel->vc, &desc->vd, flags);
+}
+
static int rz_dmac_terminate_all(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
@@ -731,9 +840,18 @@ static u32 rz_dmac_calculate_residue_bytes_in_vd(struct rz_dmac_chan *channel)
}
/* Calculate residue from next lmdesc to end of virtual desc */
- while (lmdesc->chcfg & CHCFG_DEM) {
- residue += lmdesc->tb;
- lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
+ if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC)) {
+ struct rz_dmac_desc *desc = channel->desc;
+
+ while (lmdesc->nxla != desc->start_lmdesc) {
+ residue += lmdesc->tb;
+ lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
+ }
+ } else {
+ while (lmdesc->chcfg & CHCFG_DEM) {
+ residue += lmdesc->tb;
+ lmdesc = rz_dmac_get_next_lmdesc(channel->lmdesc.base, lmdesc);
+ }
}
dev_dbg(dmac->dev, "%s: VD residue is %u\n", __func__, residue);
@@ -972,7 +1090,15 @@ static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
}
desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
- vchan_cookie_complete(&desc->vd);
+
+ if (channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC)) {
+ desc = channel->desc;
+ vchan_cyclic_callback(&desc->vd);
+ goto out;
+ } else {
+ vchan_cookie_complete(&desc->vd);
+ }
+
list_move_tail(channel->ld_active.next, &channel->ld_free);
if (!list_empty(&channel->ld_queue)) {
desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
@@ -1239,6 +1365,8 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine = &dmac->engine;
dma_cap_set(DMA_SLAVE, engine->cap_mask);
dma_cap_set(DMA_MEMCPY, engine->cap_mask);
+ dma_cap_set(DMA_CYCLIC, engine->cap_mask);
+ engine->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
@@ -1250,6 +1378,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_tx_status = rz_dmac_tx_status;
engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
+ engine->device_prep_dma_cyclic = rz_dmac_prep_dma_cyclic;
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
--
2.43.0
Powered by blists - more mailing lists