[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1371416058-22047-9-git-send-email-tomasz.figa@gmail.com>
Date: Sun, 16 Jun 2013 22:54:15 +0200
From: Tomasz Figa <tomasz.figa@...il.com>
To: linux-samsung-soc@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-spi@...r.kernel.org, alsa-devel@...a-project.org,
Kukjin Kim <kgene.kim@...sung.com>,
Vinod Koul <vinod.koul@...el.com>, Dan Williams <djbw@...com>,
Linus Walleij <linus.walleij@...aro.org>,
Alessandro Rubini <rubini@...dd.com>,
Giancarlo Asnaghi <giancarlo.asnaghi@...com>,
Mark Brown <broonie@...nel.org>,
Grant Likely <grant.likely@...aro.org>,
Sangbeom Kim <sbkim73@...sung.com>,
Liam Girdwood <lgirdwood@...il.com>,
Jaroslav Kysela <perex@...ex.cz>, Takashi Iwai <tiwai@...e.de>,
Padmavathi Venna <padma.v@...sung.com>,
Thomas Abraham <thomas.abraham@...aro.org>,
Arnd Bergmann <arnd@...db.de>, Olof Johansson <olof@...om.net>,
Heiko Stübner <heiko@...ech.de>,
Sylwester Nawrocki <sylvester.nawrocki@...il.com>,
Russell King - ARM Linux <linux@....linux.org.uk>,
Alban Bedel <alban.bedel@...onic-design.de>,
Tomasz Figa <tomasz.figa@...il.com>
Subject: [RFC PATCH 08/11] dmaengine: PL08x: Add cyclic transfer support
From: Alban Bedel <alban.bedel@...onic-design.de>
Many audio interface drivers require support of cyclic transfers to work
correctly, for example Samsung ASoC DMA driver. This patch adds support
for cyclic transfers to the amba-pl08x driver.
Signed-off-by: Alban Bedel <alban.bedel@...onic-design.de>
Signed-off-by: Tomasz Figa <tomasz.figa@...il.com>
---
drivers/dma/amba-pl08x.c | 181 +++++++++++++++++++++++++++++++++++------------
1 file changed, 136 insertions(+), 45 deletions(-)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index bb3b36b..210a893 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -184,6 +184,7 @@ struct pl08x_sg {
* @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
+ * @cyclic: indicate cyclic transfers
*/
struct pl08x_txd {
struct virt_dma_desc vd;
@@ -198,6 +199,8 @@ struct pl08x_txd {
*/
u32 ccfg;
bool done;
+
+ bool cyclic;
};
/**
@@ -561,9 +564,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
bytes += get_bytes_in_cctl(llis_va[index].cctl);
/*
- * A LLI pointer of 0 terminates the LLI list
++ * A LLI pointer going backward terminates the LLI list
*/
- if (!llis_va[index].lli)
+ if (llis_va[index].lli <= clli)
break;
}
@@ -1075,10 +1078,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
}
llis_va = txd->llis_va;
- /* The final LLI terminates the LLI. */
- llis_va[num_llis - 1].lli = 0;
- /* The final LLI element shall also fire an interrupt. */
- llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ if (txd->cyclic) {
+ /* Link back to the first LLI. */
+ llis_va[num_llis - 1].lli = txd->llis_bus | bd.lli_bus;
+ } else {
+ /* The final LLI terminates the LLI. */
+ llis_va[num_llis - 1].lli = 0;
+ llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
+ }
#ifdef VERBOSE_DEBUG
{
@@ -1470,25 +1477,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
-static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+static struct pl08x_txd *pl08x_init_txd(
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction,
+ dma_addr_t *slave_addr)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- struct pl08x_sg *dsg;
- struct scatterlist *sg;
enum dma_slave_buswidth addr_width;
- dma_addr_t slave_addr;
int ret, tmp;
u8 src_buses, dst_buses;
u32 maxburst, cctl;
- dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sg_dma_len(sgl), plchan->name);
-
txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
@@ -1502,14 +1503,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
if (direction == DMA_MEM_TO_DEV) {
cctl = PL080_CONTROL_SRC_INCR;
- slave_addr = plchan->cfg.dst_addr;
+ *slave_addr = plchan->cfg.dst_addr;
addr_width = plchan->cfg.dst_addr_width;
maxburst = plchan->cfg.dst_maxburst;
src_buses = pl08x->mem_buses;
dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
cctl = PL080_CONTROL_DST_INCR;
- slave_addr = plchan->cfg.src_addr;
+ *slave_addr = plchan->cfg.src_addr;
addr_width = plchan->cfg.src_addr_width;
maxburst = plchan->cfg.src_maxburst;
src_buses = plchan->cd->periph_buses;
@@ -1558,24 +1559,107 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
else
txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+ return txd;
+}
+
+static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+ enum dma_transfer_direction direction,
+ dma_addr_t slave_addr,
+ dma_addr_t buf_addr,
+ unsigned int len)
+{
+ struct pl08x_sg *dsg;
+
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg)
+ return -ENOMEM;
+
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = len;
+ if (direction == DMA_MEM_TO_DEV) {
+ dsg->src_addr = buf_addr;
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = buf_addr;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ struct scatterlist *sg;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+ __func__, sg_dma_len(sgl), plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
- if (!dsg) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ sg_dma_address(sg),
+ sg_dma_len(sg));
+ if (ret) {
pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
return NULL;
}
- list_add_tail(&dsg->node, &txd->dsg_list);
+ }
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else {
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_txd *txd;
+ int ret, tmp;
+ dma_addr_t slave_addr;
+
+ dev_dbg(&pl08x->adev->dev,
+ "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ __func__, period_len, buf_len,
+ direction == DMA_MEM_TO_DEV ? "to" : "from",
+ plchan->name);
+
+ txd = pl08x_init_txd(chan, direction, &slave_addr);
+ if (!txd)
+ return NULL;
+
+ txd->cyclic = true;
+ txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+ for (tmp = 0 ; tmp < buf_len ; tmp += period_len) {
+ ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+ buf_addr + tmp, period_len);
+ if (ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
+ return NULL;
}
}
@@ -1719,23 +1803,28 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
spin_lock(&plchan->vc.lock);
tx = plchan->at;
if (tx) {
- plchan->at = NULL;
- /*
- * This descriptor is done, release its mux
- * reservation.
- */
- pl08x_release_mux(plchan);
- tx->done = true;
- vchan_cookie_complete(&tx->vd);
-
- /*
- * And start the next descriptor (if any),
- * otherwise free this channel.
- */
- if (vchan_next_desc(&plchan->vc))
- pl08x_start_next_txd(plchan);
- else
- pl08x_phy_free(plchan);
+ if (tx->cyclic) {
+ vchan_cyclic_callback(&tx->vd);
+ } else {
+ plchan->at = NULL;
+ /*
+ * This descriptor is done, release
+ * its mux reservation.
+ */
+ pl08x_release_mux(plchan);
+ tx->done = true;
+ vchan_cookie_complete(&tx->vd);
+
+ /*
+ * And start the next descriptor
+ * (if any), otherwise free this
+ * channel.
+ */
+ if (vchan_next_desc(&plchan->vc))
+ pl08x_start_next_txd(plchan);
+ else
+ pl08x_phy_free(plchan);
+ }
}
spin_unlock(&plchan->vc.lock);
@@ -1939,6 +2028,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize slave engine */
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
@@ -1946,6 +2036,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
pl08x->slave.device_issue_pending = pl08x_issue_pending;
pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
pl08x->slave.device_control = pl08x_control;
/* Get the platform data */
--
1.8.2.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists