[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220704111325.636-1-vaishnav.a@ti.com>
Date: Mon, 4 Jul 2022 16:43:25 +0530
From: Vaishnav Achath <vaishnav.a@...com>
To: <peter.ujfalusi@...il.com>, <vkoul@...nel.org>,
<dmaengine@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: <nm@...com>, <vigneshr@...com>, <p.yadav@...com>,
<vaishnav.a@...com>, <j-keerthy@...com>, <m-khayami@...com>,
<stanley_liu@...com>
Subject: [PATCH] dma: ti: k3-udma: Reset UDMA_CHAN_RT byte counters to prevent overflow
UDMA_CHAN_RT_*BCNT_REG stores the real-time channel bytecount statistics.
These registers are 32-bit hardware counters and the driver uses these
counters to monitor the operational progress status for a channel, when
transferring more than 4GB of data it was observed that these counters
overflow and completion calculation of a operation gets affected and the
transfer hangs indefinitely.
This commit adds changes to decrease the byte count for every complete
transaction so that these registers never overflow and the proper byte
count statistics is maintained for ongoing transaction by the RT counters.
Earlier uc->bcnt used to maintain a count of the completed bytes at driver
side, since the RT counters maintain the statistics of current transaction
now, the maintenance of uc->bcnt is not necessary.
Signed-off-by: Vaishnav Achath <vaishnav.a@...com>
---
drivers/dma/ti/k3-udma.c | 27 +++++++++++++++++++--------
1 file changed, 19 insertions(+), 8 deletions(-)
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 2f0d2c68c93c..0f91a3e47c19 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -300,8 +300,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
- u32 bcnt; /* number of bytes completed since the start of the channel */
-
/* Channel configuration parameters */
struct udma_chan_config config;
@@ -757,6 +755,22 @@ static void udma_reset_rings(struct udma_chan *uc)
}
}
+static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+{
+ if (uc->tchan) {
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ if (!uc->bchan)
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+
+ if (uc->rchan) {
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+}
+
static void udma_reset_counters(struct udma_chan *uc)
{
u32 val;
@@ -790,8 +804,6 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
-
- uc->bcnt = 0;
}
static int udma_reset_chan(struct udma_chan *uc, bool hard)
@@ -1115,8 +1127,8 @@ static void udma_check_tx_completion(struct work_struct *work)
if (uc->desc) {
struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
udma_start(uc);
+ udma_decrement_byte_counters(uc, d->residue);
vchan_cookie_complete(&d->vd);
break;
}
@@ -1168,8 +1180,8 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
if (udma_is_desc_really_done(uc, d)) {
- uc->bcnt += d->residue;
udma_start(uc);
+ udma_decrement_byte_counters(uc, d->residue);
vchan_cookie_complete(&d->vd);
} else {
schedule_delayed_work(&uc->tx_drain.work,
@@ -1204,7 +1216,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
/* TODO: figure out the real amount of data */
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
}
@@ -3809,7 +3821,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
}
- bcnt -= uc->bcnt;
if (bcnt && !(bcnt % uc->desc->residue))
residue = 0;
else
--
2.17.1
Powered by blists - more mailing lists