[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1469534545-14478-3-git-send-email-u.kleine-koenig@pengutronix.de>
Date: Tue, 26 Jul 2016 14:02:25 +0200
From: Uwe Kleine-König
<u.kleine-koenig@...gutronix.de>
To: Mugunthan V N <mugunthanvnm@...com>,
Grygorii Strashko <grygorii.strashko@...com>
Cc: linux-omap@...r.kernel.org, netdev@...r.kernel.org,
kernel@...gutronix.de
Subject: [PATCH 2/2] net: davinci_cpdma: reduce time holding chan->lock in cpdma_chan_submit
Allocating and preparing a dma descriptor doesn't need to happen under
the channel's lock. So do this before taking the channel's lock. The only
down side is that the dma descriptor might be allocated even though the
channel is about to be stopped. This is unlikely though.
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@...gutronix.de>
---
drivers/net/ethernet/ti/davinci_cpdma.c | 38 +++++++++++++++++----------------
1 file changed, 20 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 5ffa04a306c6..ba3462707ae3 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -542,24 +542,10 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
u32 mode;
int ret = 0;
- spin_lock_irqsave(&chan->lock, flags);
-
- if (chan->state == CPDMA_STATE_TEARDOWN) {
- ret = -EINVAL;
- goto unlock_ret;
- }
-
- if (chan->count >= chan->desc_num) {
- chan->stats.desc_alloc_fail++;
- ret = -ENOMEM;
- goto unlock_ret;
- }
-
desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) {
chan->stats.desc_alloc_fail++;
- ret = -ENOMEM;
- goto unlock_ret;
+ return -ENOMEM;
}
if (len < ctlr->params.min_packet_size) {
@@ -571,8 +557,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
ret = dma_mapping_error(ctlr->dev, buffer);
if (ret) {
cpdma_desc_free(ctlr->pool, desc, 1);
- ret = -EINVAL;
- goto unlock_ret;
+ return -EINVAL;
}
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
@@ -586,6 +571,19 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
desc_write(desc, sw_buffer, buffer);
desc_write(desc, sw_len, len);
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ ret = -EINVAL;
+ goto unlock_free;
+ }
+
+ if (chan->count >= chan->desc_num) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_free;
+ }
+
__cpdma_chan_submit(chan, desc);
if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
@@ -593,8 +591,12 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
chan->count++;
-unlock_ret:
spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+
+unlock_free:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ cpdma_desc_free(ctlr->pool, desc, 1);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_chan_submit);
--
2.8.1
Powered by blists - more mailing lists