[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150211002318.GB21387@intel.com>
Date: Tue, 10 Feb 2015 16:23:18 -0800
From: Vinod Koul <vinod.koul@...el.com>
To: Robert Baldyga <r.baldyga@...sung.com>
Cc: dan.j.williams@...el.com, lars@...afoo.de,
dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
m.szyprowski@...sung.com, k.kozlowski@...sung.com,
kyungmin.park@...sung.com, l.czerwinski@...sung.com,
padma.kvr@...il.com
Subject: Re: [PATCH v3 1/2] dma: pl330: improve pl330_tx_status() function
On Wed, Dec 10, 2014 at 11:55:17AM +0100, Robert Baldyga wrote:
> This patch adds possibility to read residue of DMA transfer. It's useful
> when we want to know how many bytes have been transferred before we
> terminate channel. It can take place, for example, on timeout interrupt.
>
> Signed-off-by: Lukasz Czerwinski <l.czerwinski@...sung.com>
> Signed-off-by: Robert Baldyga <r.baldyga@...sung.com>
> ---
> drivers/dma/pl330.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 66 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
> index bdf40b5..2f4d561 100644
> --- a/drivers/dma/pl330.c
> +++ b/drivers/dma/pl330.c
> @@ -504,6 +504,9 @@ struct dma_pl330_desc {
>
> enum desc_status status;
>
> + int bytes_requested;
> + bool last;
> +
> /* The channel which currently holds this desc */
> struct dma_pl330_chan *pchan;
>
> @@ -2182,11 +2185,68 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
> pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
> }
>
> +int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
> + struct dma_pl330_desc *desc)
> +{
> + struct pl330_thread *thrd = pch->thread;
> + struct pl330_dmac *pl330 = pch->dmac;
> + void __iomem *regs = thrd->dmac->base;
> + u32 val, addr;
> +
> + pm_runtime_get_sync(pl330->ddma.dev);
> + val = addr = 0;
> + if (desc->rqcfg.src_inc) {
> + val = readl(regs + SA(thrd->id));
> + addr = desc->px.src_addr;
> + } else {
> + val = readl(regs + DA(thrd->id));
> + addr = desc->px.dst_addr;
> + }
> + pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
> + pm_runtime_put_autosuspend(pl330->ddma.dev);
> + return val - addr;
> +}
> +
> static enum dma_status
> pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
> struct dma_tx_state *txstate)
> {
> - return dma_cookie_status(chan, cookie, txstate);
> + enum dma_status ret;
> + unsigned long flags;
> + struct dma_pl330_desc *desc, *running = NULL;
> + struct dma_pl330_chan *pch = to_pchan(chan);
> + unsigned int transferred, residual = 0;
> +
> + spin_lock_irqsave(&pch->lock, flags);
You want to check the dma_cookie_status here first and then based on status
go into residue calcaultion, that too only when the txstate is NON null.
--
~Vinod
> +
> + if (pch->thread->req_running != -1)
> + running = pch->thread->req[pch->thread->req_running].desc;
> +
> + /* Check in pending list */
> + list_for_each_entry(desc, &pch->work_list, node) {
> + if (desc->status == DONE)
> + transferred = desc->bytes_requested;
> + else if (running && desc == running)
> + transferred =
> + pl330_get_current_xferred_count(pch, desc);
> + else
> + transferred = 0;
> + residual += desc->bytes_requested - transferred;
> + if (desc->txd.cookie == cookie) {
> + dma_set_residue(txstate, residual);
> + ret = desc->status;
> + spin_unlock_irqrestore(&pch->lock, flags);
> + return ret;
> + }
> + if (desc->last)
> + residual = 0;
> + }
> + spin_unlock_irqrestore(&pch->lock, flags);
> +
> + ret = dma_cookie_status(chan, cookie, txstate);
> + dma_set_residue(txstate, 0);
> +
> + return ret;
> }
>
> static void pl330_issue_pending(struct dma_chan *chan)
> @@ -2231,12 +2291,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
> desc->txd.callback = last->txd.callback;
> desc->txd.callback_param = last->txd.callback_param;
> }
> + last->last = false;
>
> dma_cookie_assign(&desc->txd);
>
> list_move_tail(&desc->node, &pch->submitted_list);
> }
>
> + last->last = true;
> cookie = dma_cookie_assign(&last->txd);
> list_add_tail(&last->node, &pch->submitted_list);
> spin_unlock_irqrestore(&pch->lock, flags);
> @@ -2459,6 +2521,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
> desc->rqtype = direction;
> desc->rqcfg.brst_size = pch->burst_sz;
> desc->rqcfg.brst_len = 1;
> + desc->bytes_requested = period_len;
> fill_px(&desc->px, dst, src, period_len);
>
> if (!first)
> @@ -2601,6 +2664,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
> desc->rqcfg.brst_size = pch->burst_sz;
> desc->rqcfg.brst_len = 1;
> desc->rqtype = direction;
> + desc->bytes_requested = sg_dma_len(sg);
> }
>
> /* Return the last desc in the chain */
> @@ -2631,7 +2695,7 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
> caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
> caps->cmd_pause = false;
> caps->cmd_terminate = true;
> - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
> + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
>
> return 0;
> }
> --
> 1.9.1
>
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists