[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b59dd8cd-fd75-5342-d411-817f33e0ff48@amd.com>
Date: Wed, 27 Mar 2024 10:46:04 -0700
From: Lizhi Hou <lizhi.hou@....com>
To: Louis Chauvet <louis.chauvet@...tlin.com>, Brian Xu <brian.xu@....com>,
Raj Kumar Rampelli <raj.kumar.rampelli@....com>, Vinod Koul
<vkoul@...nel.org>, Michal Simek <michal.simek@....com>, Jan Kuliga
<jankul@...tek.krakow.pl>, Miquel Raynal <miquel.raynal@...tlin.com>
CC: <dmaengine@...r.kernel.org>, <linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <stable@...r.kernel.org>
Subject: Re: [PATCH 2/3] dmaengine: xilinx: xdma: Fix synchronization issue
On 3/27/24 02:58, Louis Chauvet wrote:
> The current xdma_synchronize method does not properly wait for the last
> transfer to be done. Due to limitations of the XMDA engine, it is not
> possible to stop a transfer in the middle of a descriptor. Said
> otherwise, if a stop is requested at the end of descriptor "N" and the OS
> is fast enough, the DMA controller will effectively stop immediately.
> However, if the OS is slightly too slow to request the stop and the DMA
> engine starts descriptor "N+1", the N+1 transfer will be performed until
> its end. This means that after a terminate_all, the last descriptor must
> remain valid and the synchronization must wait for this last descriptor to
> be terminated.
>
> Fixes: 855c2e1d1842 ("dmaengine: xilinx: xdma: Rework xdma_terminate_all()")
> Fixes: f5c392d106e7 ("dmaengine: xilinx: xdma: Add terminate_all/synchronize callbacks")
> Cc: stable@...r.kernel.org
> Suggested-by: Miquel Raynal <miquel.raynal@...tlin.com>
> Signed-off-by: Louis Chauvet <louis.chauvet@...tlin.com>
> ---
> drivers/dma/xilinx/xdma-regs.h | 3 +++
> drivers/dma/xilinx/xdma.c | 26 ++++++++++++++++++--------
> 2 files changed, 21 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
> index 98f5f6fb9ff9..6ad08878e938 100644
> --- a/drivers/dma/xilinx/xdma-regs.h
> +++ b/drivers/dma/xilinx/xdma-regs.h
> @@ -117,6 +117,9 @@ struct xdma_hw_desc {
> CHAN_CTRL_IE_WRITE_ERROR | \
> CHAN_CTRL_IE_DESC_ERROR)
>
> +/* bits of the channel status register */
> +#define XDMA_CHAN_STATUS_BUSY BIT(0)
> +
> #define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
>
> #define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
> diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
> index b9788aa8f6b7..5a3a3293b21b 100644
> --- a/drivers/dma/xilinx/xdma.c
> +++ b/drivers/dma/xilinx/xdma.c
> @@ -71,6 +71,8 @@ struct xdma_chan {
> enum dma_transfer_direction dir;
> struct dma_slave_config cfg;
> u32 irq;
> + struct completion last_interrupt;
> + bool stop_requested;
> };
>
> /**
> @@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
> return ret;
>
> xchan->busy = true;
> + xchan->stop_requested = false;
> + reinit_completion(&xchan->last_interrupt);
If stop_requested is true, it should not start another transfer. So I
would suggest to add
if (xchan->stop_requested)
return -ENODEV;
at the beginning of xdma_xfer_start().
xdma_xfer_start() is protected by chan lock.
>
> return 0;
> }
> @@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
> static int xdma_xfer_stop(struct xdma_chan *xchan)
> {
> int ret;
> - u32 val;
> struct xdma_device *xdev = xchan->xdev_hdl;
>
> /* clear run stop bit to prevent any further auto-triggering */
> @@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
> CHAN_CTRL_RUN_STOP);
> if (ret)
> return ret;
Above two lines can be removed with your change.
> -
> - /* Clear the channel status register */
> - ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
> - if (ret)
> - return ret;
> -
> - return 0;
> + return ret;
> }
>
> /**
> @@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
> xchan->xdev_hdl = xdev;
> xchan->base = base + i * XDMA_CHAN_STRIDE;
> xchan->dir = dir;
> + xchan->stop_requested = false;
> + init_completion(&xchan->last_interrupt);
>
> ret = xdma_channel_init(xchan);
> if (ret)
> @@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
> spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
>
> xdma_chan->busy = false;
> + xdma_chan->stop_requested = true;
> vd = vchan_next_desc(&xdma_chan->vchan);
> if (vd) {
> list_del(&vd->node);
> @@ -542,6 +542,13 @@ static int xdma_terminate_all(struct dma_chan *chan)
> static void xdma_synchronize(struct dma_chan *chan)
> {
> struct xdma_chan *xdma_chan = to_xdma_chan(chan);
> + struct xdma_device *xdev = xdma_chan->xdev_hdl;
> + int st = 0;
> +
> + /* If the engine continues running, wait for the last interrupt */
> + regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
> + if (st & XDMA_CHAN_STATUS_BUSY)
> + wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
I suggest to add error message for timeout case.
>
> vchan_synchronize(&xdma_chan->vchan);
> }
> @@ -876,6 +883,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
> u32 st;
> bool repeat_tx;
>
> + if (xchan->stop_requested)
> + complete(&xchan->last_interrupt);
> +
This should be moved to the end of function to make sure processing
previous transfer is completed.
out:
if (xchan->stop_requested) {
xchan->busy = false;
complete(&xchan->last_interrupt);
}
spin_unlock(&xchan->vchan.lock);
return IRQ_HANDLED;
Thanks,
Lizhi
> spin_lock(&xchan->vchan.lock);
>
> /* get submitted request */
>
Powered by blists - more mailing lists