[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CACDNBm1ZU=7xmFQkE9dxWJKmwMMMZSGOR1EsWjj_Cie1k9JvSA@mail.gmail.com>
Date: Tue, 18 Jun 2013 16:57:18 +0800
From: Xiang Wang <wangxfdu@...il.com>
To: Vinod Koul <vinod.koul@...el.com>
Cc: Dan Williams <djbw@...com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
cxie4@...vell.com, Xiang Wang <wangx@...vell.com>
Subject: Re: [PATCH] dma: mmp_pdma: support for getting residual bytes
2013/6/17 Vinod Koul <vinod.koul@...el.com>:
> On Fri, May 31, 2013 at 04:21:25PM +0800, Xiang Wang wrote:
>> From: Xiang Wang <wangx@...vell.com>
>>
>> In some of our drivers (e.g. UART) we may stop a running DMA
>> before it finishes. So we need to know how many bytes have
>> been transferred.
>>
>> Signed-off-by: Xiang Wang <wangx@...vell.com>
>> ---
>> drivers/dma/mmp_pdma.c | 77 +++++++++++++++++++++++++++++++++++++++++++-----
>> 1 files changed, 69 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
>> index 76a8dcf..0c623cc 100644
>> --- a/drivers/dma/mmp_pdma.c
>> +++ b/drivers/dma/mmp_pdma.c
>> @@ -104,7 +104,8 @@ struct mmp_pdma_chan {
>> spinlock_t desc_lock; /* Descriptor list lock */
>> struct list_head chain_pending; /* Link descriptors queue for pending */
>> struct list_head chain_running; /* Link descriptors queue for running */
>> - bool idle; /* channel statue machine */
>> + enum dma_status status; /* channel status machine */
>> + u32 bytes_residue;
>>
>> struct dma_pool *desc_pool; /* Descriptors pool */
>> };
>> @@ -270,7 +271,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
>> struct mmp_pdma_desc_sw *desc;
>>
>> /* still in running, irq will start the pending list */
>> - if (!chan->idle) {
>> + if (chan->status == DMA_IN_PROGRESS) {
>> dev_dbg(chan->dev, "DMA controller still busy\n");
> why are you not reading the residue and filling it here as well
In patch V2, the user can query the residue bytes on the fly. We only
need to read and save the value in some key points where phy channel
are released (the registers will not be accessible).
>
>> return;
>> }
>> @@ -307,7 +308,55 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
>> */
>> set_desc(chan->phy, desc->async_tx.phys);
>> enable_chan(chan->phy);
>> - chan->idle = false;
>> + chan->status = DMA_IN_PROGRESS;
>> + chan->bytes_residue = 0;
>> +}
>> +
>> +/*
>> + * Get the number of bytes untransferred by DMA.
> ^^^^^^^^^^^^^
> The word which might fit better is pending
Updated in patch V2. Thanks!
>> + */
>> +static u32 mmp_pdma_get_bytes_residue(struct mmp_pdma_chan *chan)
>> +{
>> + u32 reg, orig_pos, cur_pos, residue = 0;
>> + struct mmp_pdma_desc_sw *desc_sw;
>> + struct list_head *list_chain = NULL;
>> +
>> + /*
>> + * When a phy channel is unavailable, maybe its destroied, return last
> typo ^^^^^^^^^
Updated in patch V2. Thanks!
>> + * stored value for safe.
>> + */
>> + if (!chan || !chan->phy)
>> + return chan->bytes_residue;
>> +
>> + if (!list_empty(&chan->chain_running))
>> + list_chain = &chan->chain_running;
>> + else
>> + return 0;
>> +
>> + desc_sw = list_first_entry(list_chain, struct mmp_pdma_desc_sw, node);
>> +
>> + switch (chan->dir) {
>> + case DMA_DEV_TO_MEM:
>> + reg = (chan->phy->idx << 4) + DTADR;
>> + cur_pos = readl_relaxed(chan->phy->base + reg);
>> + orig_pos = desc_sw->desc.dtadr;
>> + break;
> empty line here and below pls
Updated in patch V2. Thanks!
>
>> + case DMA_MEM_TO_DEV:
>> + reg = (chan->phy->idx << 4) + DSADR;
>> + cur_pos = readl_relaxed(chan->phy->base + reg);
>> + orig_pos = desc_sw->desc.dsadr;
>> + break;
>> + case DMA_MEM_TO_MEM:
>> + reg = (chan->phy->idx << 4) + DTADR;
>> + cur_pos = readl_relaxed(chan->phy->base + reg);
>> + orig_pos = desc_sw->desc.dtadr;
>> + break;
>> + default:
>> + return 0;
>> + }
>> +
>> + residue = (u32)(desc_sw->desc.dcmd & DCMD_LENGTH) + orig_pos - cur_pos;
>> + return residue;
>> }
>>
>>
>> @@ -381,7 +430,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
>> chan->phy->vchan = NULL;
>> chan->phy = NULL;
>> }
>> - chan->idle = true;
>> + chan->status = DMA_SUCCESS;
>> chan->dev_addr = 0;
>> return 1;
>> }
>> @@ -409,7 +458,7 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
>>
>> dma_pool_destroy(chan->desc_pool);
>> chan->desc_pool = NULL;
>> - chan->idle = true;
>> + chan->status = DMA_SUCCESS;
>> chan->dev_addr = 0;
>> if (chan->phy) {
>> chan->phy->vchan = NULL;
>> @@ -589,7 +638,13 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
>> mmp_pdma_free_desc_list(chan, &chan->chain_pending);
>> mmp_pdma_free_desc_list(chan, &chan->chain_running);
>> spin_unlock_irqrestore(&chan->desc_lock, flags);
>> - chan->idle = true;
>> + chan->status = DMA_SUCCESS;
>> + chan->bytes_residue = 0;
>> + break;
> ditto
Also updated
>> + case DMA_PAUSE:
>> + disable_chan(chan->phy);
>> + chan->status = DMA_PAUSED;
>> + chan->bytes_residue = mmp_pdma_get_bytes_residue(chan);
>> break;
>> case DMA_SLAVE_CONFIG:
>> if (cfg->direction == DMA_DEV_TO_MEM) {
>> @@ -637,7 +692,8 @@ static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
>> unsigned long flags;
>>
>> spin_lock_irqsave(&chan->desc_lock, flags);
>> - ret = dma_cookie_status(dchan, cookie, txstate);
>> + ret = chan->status;
>> + dma_set_residue(txstate, chan->bytes_residue);
>> spin_unlock_irqrestore(&chan->desc_lock, flags);
>>
>> return ret;
>> @@ -684,6 +740,8 @@ static void dma_do_tasklet(unsigned long data)
>> dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
>> }
>>
>> + /* update bytes_residue so that the user can query later on */
>> + chan->bytes_residue = mmp_pdma_get_bytes_residue(chan);
>> /*
>> * move the descriptors to a temporary list so we can drop the lock
>> * during the entire cleanup operation
>> @@ -691,7 +749,7 @@ static void dma_do_tasklet(unsigned long data)
>> list_splice_tail_init(&chan->chain_running, &chain_cleanup);
>>
>> /* the hardware is now idle and ready for more */
>> - chan->idle = true;
>> + chan->status = DMA_SUCCESS;
>>
>> /* Start any pending transactions automatically */
>> start_pending_queue(chan);
>> @@ -750,6 +808,9 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
>> INIT_LIST_HEAD(&chan->chain_pending);
>> INIT_LIST_HEAD(&chan->chain_running);
>>
>> + chan->status = DMA_SUCCESS;
>> + chan->bytes_residue = 0;
>> +
>> /* register virt channel to dma engine */
>> list_add_tail(&chan->chan.device_node,
>> &pdev->device.channels);
> Okay I went thru the thread briefly. I understand why you are returning the
> reside and nice usage model you have here.
> What i odnt understand is that the reason for limiting the dma driver to a
> certain usage model. Yes this is what you need a nd what you have done. But
> nothing prevents you from reading the reside and returning on the fly. All the
> basic elements are alreayd in this patch. SO lets do the right thing now :)
In patch V2, the user can query residue bytes on the fly by modifying
mmp_pdma_tx_status().
>
> --
> ~Vinod
--
Regards,
Xiang
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists