[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260109-edma_dymatic-v1-3-9a98c9c98536@nxp.com>
Date: Fri, 09 Jan 2026 15:13:27 -0500
From: Frank Li <Frank.Li@....com>
To: Manivannan Sadhasivam <mani@...nel.org>, Vinod Koul <vkoul@...nel.org>,
Gustavo Pimentel <Gustavo.Pimentel@...opsys.com>,
Kees Cook <kees@...nel.org>, "Gustavo A. R. Silva" <gustavoars@...nel.org>,
Manivannan Sadhasivam <mani@...nel.org>,
Krzysztof Wilczyński <kwilczynski@...nel.org>,
Kishon Vijay Abraham I <kishon@...nel.org>,
Bjorn Helgaas <bhelgaas@...gle.com>, Christoph Hellwig <hch@....de>,
Niklas Cassel <cassel@...nel.org>
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-hardening@...r.kernel.org, linux-pci@...r.kernel.org,
linux-nvme@...ts.infradead.org, Damien Le Moal <dlemoal@...nel.org>,
imx@...ts.linux.dev, Frank Li <Frank.Li@....com>
Subject: [PATCH RFT 3/5] dmaengine: dw-edma: Make DMA link list work as a
circular buffer
The existing code rebuilds the entire link list from the beginning and
resets the DMA link header for each transfer, which is unnecessary.
The DMA link list can be treated as a circular buffer, where new DMA
requests are appended at ll_head with the appropriate CB flags and push
door bell, without rebuilding the whole list.
Switch to this circular-buffer model to prepare for dynamically adding
new requests while the DMA engine is running.
Signed-off-by: Frank Li <Frank.Li@....com>
---
drivers/dma/dw-edma/dw-edma-core.c | 57 +++++++++++++++++++++++++++++---------
drivers/dma/dw-edma/dw-edma-core.h | 28 ++++++++++++++++++-
2 files changed, 71 insertions(+), 14 deletions(-)
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 9fb7ae4001207b2ccb058d6efa9856dded379b8f..678bbc4e65f0e2fced6efec88a3af6935d833bc6 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -51,7 +51,6 @@ dw_edma_alloc_desc(struct dw_edma_chan *chan, u32 nburst)
desc->chan = chan;
desc->nburst = nburst;
- desc->cb = true;
return desc;
}
@@ -61,27 +60,56 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
kfree(vd2dw_edma_desc(vdesc));
}
+static void dw_edma_core_reset_ll(struct dw_edma_chan *chan)
+{
+ chan->ll_head = 0;
+ chan->ll_end = 0;
+ chan->cb = true;
+
+ dw_edma_core_ll_link(chan, chan->ll_max - 1, chan->cb,
+ chan->ll_region.paddr);
+
+ dw_edma_core_ch_enable(chan);
+}
+
+static u32 dw_edma_core_get_free_num(struct dw_edma_chan *chan)
+{
+ /*
+ * Max entries is ll_max - 1 because last one used for link back to
+ * start of ll_region.
+ */
+ return (chan->ll_end + chan->ll_max - 2 - chan->ll_head) %
+ (chan->ll_max - 1);
+}
+
static void dw_edma_core_start(struct dw_edma_desc *desc, bool first)
{
struct dw_edma_chan *chan = desc->chan;
u32 i = 0;
+ u32 free;
+
+ for (i = desc->start_burst; i < desc->nburst; i++) {
+ free = dw_edma_core_get_free_num(chan);
- for (i = 0; i < desc->nburst; i++) {
- if (i == chan->ll_max - 1)
+ if (!free)
break;
- dw_edma_core_ll_data(chan, &desc->burst[i + desc->start_burst],
- i, desc->cb,
- i == desc->nburst - 1 || i == chan->ll_max - 2);
+ /* Enable irq for last free entry or last burst */
+ dw_edma_core_ll_data(chan, &desc->burst[i],
+ chan->ll_head, chan->cb,
+ i == desc->nburst - 1 || free == 1);
+
+ chan->ll_head++;
+
+ if (chan->ll_head == chan->ll_max - 1) {
+ chan->cb = !chan->cb;
+ chan->ll_head = 0;
+ }
}
desc->done_burst = desc->start_burst;
desc->start_burst += i;
-
- dw_edma_core_ll_link(chan, i, desc->cb, chan->ll_region.paddr);
-
- if (first)
- dw_edma_core_ch_enable(chan);
+ desc->ll_end = chan->ll_head;
dw_edma_core_ch_doorbell(chan);
}
@@ -90,6 +118,10 @@ static int dw_edma_start_transfer(struct dw_edma_chan *chan)
{
struct dw_edma_desc *desc;
struct virt_dma_desc *vd;
+ int index = dw_edma_core_ll_cur_idx(chan);
+
+ if (index < 0)
+ dw_edma_core_reset_ll(chan);
vd = vchan_next_desc(&chan->vc);
if (!vd)
@@ -101,8 +133,6 @@ static int dw_edma_start_transfer(struct dw_edma_chan *chan)
dw_edma_core_start(desc, !desc->start_burst);
- desc->cb = !desc->cb;
-
return 1;
}
@@ -530,6 +560,7 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
DMA_TRANS_NOERROR);
list_del(&vd->node);
vchan_cookie_complete(vd);
+ chan->ll_end = desc->ll_end;
}
/* Continue transferring if there are remaining chunks or issued requests.
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index d68c4592c6177e4fe2a2ae8a645bb065279ac45d..fd4b086a36441cc3209131e4274d6c47de4d616c 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -60,9 +60,10 @@ struct dw_edma_desc {
u32 alloc_sz;
u32 xfer_sz;
+ u32 ll_end;
+
u32 done_burst;
u32 start_burst;
- u8 cb;
u32 nburst;
struct dw_edma_burst burst[] __counted_by(nburst);
};
@@ -73,9 +74,34 @@ struct dw_edma_chan {
int id;
enum dw_edma_dir dir;
+ /*
+ * Add new entry from ll_header.
+ *
+ * ll_end ll_head
+ * │ │
+ * ▼ ▼
+ * ┌─────────────────────────────────────────┌─┐
+ * │SSSSSSSDDDDDDDDDDDDDDDDDDDDDSSSSSSSSSSSSS│ │
+ * └─────────────────────────────────────────└┬┘
+ * ▲ │
+ * └─────────────────────────────────────────┘
+ * DMA Link To Region Start
+ * D: eDMA owned LL entry
+ * S: Software owned LL entry.
+ *
+ * ll_header == ll_end means all own by software, all previous DMA
+ * already done.
+ *
+ * Software at lease owned one entry, all D is impossible.
+ */
+ u32 ll_head;
+ u32 ll_end;
+
u32 ll_max;
struct dw_edma_region ll_region; /* Linked list */
+ bool cb;
+
struct msi_msg msi;
enum dw_edma_request request;
--
2.34.1
Powered by blists - more mailing lists