[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <DDB5IRSNB09F.3HRTZZOZQ7J6@folker-schwesinger.de>
Date: Mon, 06 Oct 2025 10:02:25 +0000
From: "Folker Schwesinger" <dev@...ker-schwesinger.de>
To: "Suraj Gupta" <suraj.gupta2@....com>, <vkoul@...nel.org>,
<radhey.shyam.pandey@....com>, <michal.simek@....com>
Cc: <dmaengine@...r.kernel.org>, <linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH V2 2/3] dmaengine: xilinx_dma: Enable transfer chaining
for AXIDMA and MCDMA by removing idle restriction
On Fri Oct 3, 2025 at 8:19 AM CEST, Suraj Gupta wrote:
> Remove the restrictive idle check in xilinx_dma_start_transfer() and
> xilinx_mcdma_start_transfer() that prevented new transfers from being
> queued when the channel was busy.
> Additionally, only update the CURDESC register when the channel is
> running in scatter-gather mode and active list is empty to avoid
> interfering with transfers already in progress. When the active list
> contains transfers, the hardware tail pointer extension mechanism
> handles chaining automatically.
>
> Signed-off-by: Suraj Gupta <suraj.gupta2@....com>
> Co-developed-by: Srinivas Neeli <srinivas.neeli@....com>
> Signed-off-by: Srinivas Neeli <srinivas.neeli@....com>
For the AXIDMA code paths:
Tested-by: Folker Schwesinger <dev@...ker-schwesinger.de>
> ---
> drivers/dma/xilinx/xilinx_dma.c | 13 ++++---------
> 1 file changed, 4 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
> index 53b82ddad007..aa6589e88c5c 100644
> --- a/drivers/dma/xilinx/xilinx_dma.c
> +++ b/drivers/dma/xilinx/xilinx_dma.c
> @@ -1548,9 +1548,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
> if (list_empty(&chan->pending_list))
> return;
>
> - if (!chan->idle)
> - return;
> -
> head_desc = list_first_entry(&chan->pending_list,
> struct xilinx_dma_tx_descriptor, node);
> tail_desc = list_last_entry(&chan->pending_list,
> @@ -1567,7 +1564,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
> dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
> }
>
> - if (chan->has_sg)
> + if (chan->has_sg && list_empty(&chan->active_list))
> xilinx_write(chan, XILINX_DMA_REG_CURDESC,
> head_desc->async_tx.phys);
> reg &= ~XILINX_DMA_CR_DELAY_MAX;
> @@ -1627,9 +1624,6 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
> if (chan->err)
> return;
>
> - if (!chan->idle)
> - return;
> -
> if (list_empty(&chan->pending_list))
> return;
>
> @@ -1652,8 +1646,9 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
> dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
>
> /* Program current descriptor */
> - xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
> - head_desc->async_tx.phys);
> + if (chan->has_sg && list_empty(&chan->active_list))
> + xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
> + head_desc->async_tx.phys);
>
> /* Program channel enable register */
> reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
Powered by blists - more mailing lists