[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aEhMBsqlx9I4XqJS@lizhi-Precision-Tower-5810>
Date: Tue, 10 Jun 2025 11:15:18 -0400
From: Frank Li <Frank.li@....com>
To: James Clark <james.clark@...aro.org>
Cc: Vladimir Oltean <olteanv@...il.com>, Mark Brown <broonie@...nel.org>,
Vladimir Oltean <vladimir.oltean@....com>,
linux-spi@...r.kernel.org, imx@...ts.linux.dev,
linux-kernel@...r.kernel.org, Arnd Bergmann <arnd@...db.de>
Subject: Re: [PATCH 2/4] spi: spi-fsl-dspi: Use non-coherent memory for DMA
On Mon, Jun 09, 2025 at 04:32:39PM +0100, James Clark wrote:
> Using coherent memory here isn't functionally necessary.
> Because the
> change to use non-coherent memory isn't overly complex and only a few
> synchronization points are required, we might as well do it while fixing
> up some other DMA issues.
Any beanfit by use on-coherent memory here?
Frank
>
> Suggested-by: Arnd Bergmann <arnd@...db.de>
> Signed-off-by: James Clark <james.clark@...aro.org>
> ---
> drivers/spi/spi-fsl-dspi.c | 55 +++++++++++++++++++++++++++++-----------------
> 1 file changed, 35 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> index 386a17871e79..567632042f8f 100644
> --- a/drivers/spi/spi-fsl-dspi.c
> +++ b/drivers/spi/spi-fsl-dspi.c
> @@ -247,6 +247,11 @@ struct fsl_dspi {
> void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
> };
>
> +static int dspi_dma_transfer_size(struct fsl_dspi *dspi)
> +{
> + return dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES;
> +}
> +
> static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
> {
> switch (dspi->oper_word_size) {
> @@ -361,7 +366,10 @@ static void dspi_tx_dma_callback(void *arg)
> {
> struct fsl_dspi *dspi = arg;
> struct fsl_dspi_dma *dma = dspi->dma;
> + struct device *dev = &dspi->pdev->dev;
>
> + dma_sync_single_for_cpu(dev, dma->tx_dma_phys,
> + dspi_dma_transfer_size(dspi), DMA_TO_DEVICE);
> complete(&dma->cmd_tx_complete);
> }
>
> @@ -369,9 +377,13 @@ static void dspi_rx_dma_callback(void *arg)
> {
> struct fsl_dspi *dspi = arg;
> struct fsl_dspi_dma *dma = dspi->dma;
> + struct device *dev = &dspi->pdev->dev;
> int i;
>
> if (dspi->rx) {
> + dma_sync_single_for_cpu(dev, dma->rx_dma_phys,
> + dspi_dma_transfer_size(dspi),
> + DMA_FROM_DEVICE);
> for (i = 0; i < dspi->words_in_flight; i++)
> dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
> }
> @@ -381,6 +393,7 @@ static void dspi_rx_dma_callback(void *arg)
>
> static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> {
> + size_t size = dspi_dma_transfer_size(dspi);
> struct device *dev = &dspi->pdev->dev;
> struct fsl_dspi_dma *dma = dspi->dma;
> int time_left;
> @@ -389,10 +402,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> for (i = 0; i < dspi->words_in_flight; i++)
> dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
>
> + dma_sync_single_for_device(dev, dma->tx_dma_phys, size, DMA_TO_DEVICE);
> dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
> - dma->tx_dma_phys,
> - dspi->words_in_flight *
> - DMA_SLAVE_BUSWIDTH_4_BYTES,
> + dma->tx_dma_phys, size,
> DMA_MEM_TO_DEV,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->tx_desc) {
> @@ -407,10 +419,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> return -EINVAL;
> }
>
> + dma_sync_single_for_device(dev, dma->rx_dma_phys, size,
> + DMA_FROM_DEVICE);
> dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
> - dma->rx_dma_phys,
> - dspi->words_in_flight *
> - DMA_SLAVE_BUSWIDTH_4_BYTES,
> + dma->rx_dma_phys, size,
> DMA_DEV_TO_MEM,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->rx_desc) {
> @@ -512,17 +524,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
> goto err_tx_channel;
> }
>
> - dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
> - dma_bufsize, &dma->tx_dma_phys,
> - GFP_KERNEL);
> + dma->tx_dma_buf = dma_alloc_noncoherent(dma->chan_tx->device->dev,
> + dma_bufsize, &dma->tx_dma_phys,
> + DMA_TO_DEVICE, GFP_KERNEL);
> if (!dma->tx_dma_buf) {
> ret = -ENOMEM;
> goto err_tx_dma_buf;
> }
>
> - dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
> - dma_bufsize, &dma->rx_dma_phys,
> - GFP_KERNEL);
> + dma->rx_dma_buf = dma_alloc_noncoherent(dma->chan_rx->device->dev,
> + dma_bufsize, &dma->rx_dma_phys,
> + DMA_FROM_DEVICE, GFP_KERNEL);
> if (!dma->rx_dma_buf) {
> ret = -ENOMEM;
> goto err_rx_dma_buf;
> @@ -557,11 +569,12 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
> return 0;
>
> err_slave_config:
> - dma_free_coherent(dma->chan_rx->device->dev,
> - dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
> + dma_free_noncoherent(dma->chan_rx->device->dev, dma_bufsize,
> + dma->rx_dma_buf, dma->rx_dma_phys,
> + DMA_FROM_DEVICE);
> err_rx_dma_buf:
> - dma_free_coherent(dma->chan_tx->device->dev,
> - dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
> + dma_free_noncoherent(dma->chan_tx->device->dev, dma_bufsize,
> + dma->tx_dma_buf, dma->tx_dma_phys, DMA_TO_DEVICE);
> err_tx_dma_buf:
> dma_release_channel(dma->chan_tx);
> err_tx_channel:
> @@ -582,14 +595,16 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
> return;
>
> if (dma->chan_tx) {
> - dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
> - dma->tx_dma_buf, dma->tx_dma_phys);
> + dma_free_noncoherent(dma->chan_tx->device->dev, dma_bufsize,
> + dma->tx_dma_buf, dma->tx_dma_phys,
> + DMA_TO_DEVICE);
> dma_release_channel(dma->chan_tx);
> }
>
> if (dma->chan_rx) {
> - dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
> - dma->rx_dma_buf, dma->rx_dma_phys);
> + dma_free_noncoherent(dma->chan_rx->device->dev, dma_bufsize,
> + dma->rx_dma_buf, dma->rx_dma_phys,
> + DMA_FROM_DEVICE);
> dma_release_channel(dma->chan_rx);
> }
> }
>
> --
> 2.34.1
>
Powered by blists - more mailing lists