[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220802054835.19482-1-vaishnav.a@ti.com>
Date: Tue, 2 Aug 2022 11:18:35 +0530
From: Vaishnav Achath <vaishnav.a@...com>
To: <peter.ujfalusi@...il.com>, <vkoul@...nel.org>,
<dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: <nm@...com>, <vigneshr@...com>, <vaishnav.a@...com>,
<j-keerthy@...com>, <m-khayami@...com>, <stanley_liu@...com>
Subject: [PATCH v3] dmaengine: ti: k3-udma: Reset UDMA_CHAN_RT byte counters to prevent overflow
UDMA_CHAN_RT_*BCNT_REG stores the real-time channel bytecount statistics.
These registers are 32-bit hardware counters and the driver uses these
counters to monitor the operational progress status for a channel, when
transferring more than 4GB of data it was observed that these counters
overflow and completion calculation of a operation gets affected and the
transfer hangs indefinitely.
This commit adds changes to decrease the byte count for every complete
transaction so that these registers never overflow and the proper byte
count statistics is maintained for ongoing transaction by the RT counters.
Earlier uc->bcnt used to maintain a count of the completed bytes at driver
side, since the RT counters maintain the statistics of current transaction
now, the maintenance of uc->bcnt is not necessary.
Signed-off-by: Vaishnav Achath <vaishnav.a@...com>
---
V2->V3 :
* Remove unnecessary checks for uc->tchan and uc->rchan in
udma_decrement_byte_counters()
V1->V2 :
* Update bcnt reset based on uc->desc->dir
* change order of udma_decrement_byte_counters() to before udma_start()
* update subsystem tag
drivers/dma/ti/k3-udma.c | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 2f0d2c68c93c..fcfcde947b30 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -300,8 +300,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
- u32 bcnt; /* number of bytes completed since the start of the channel */
-
/* Channel configuration parameters */
struct udma_chan_config config;
@@ -757,6 +755,20 @@ static void udma_reset_rings(struct udma_chan *uc)
}
}
+static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+{
+ if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ } else {
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ if (!uc->bchan)
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+}
+
static void udma_reset_counters(struct udma_chan *uc)
{
u32 val;
@@ -790,8 +802,6 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
-
- uc->bcnt = 0;
}
static int udma_reset_chan(struct udma_chan *uc, bool hard)
@@ -1115,7 +1125,7 @@ static void udma_check_tx_completion(struct work_struct *work)
if (uc->desc) {
struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
@@ -1168,7 +1178,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
if (udma_is_desc_really_done(uc, d)) {
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
} else {
@@ -1204,7 +1214,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
/* TODO: figure out the real amount of data */
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
}
@@ -3809,7 +3819,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
}
- bcnt -= uc->bcnt;
if (bcnt && !(bcnt % uc->desc->residue))
residue = 0;
else
--
2.17.1
Powered by blists - more mailing lists