[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aF7zUFaOPDr18Uns@lizhi-Precision-Tower-5810>
Date: Fri, 27 Jun 2025 15:38:56 -0400
From: Frank Li <Frank.li@....com>
To: James Clark <james.clark@...aro.org>
Cc: Vladimir Oltean <olteanv@...il.com>, Mark Brown <broonie@...nel.org>,
Vladimir Oltean <vladimir.oltean@....com>,
Arnd Bergmann <arnd@...db.de>,
Larisa Grigore <larisa.grigore@....com>,
Christoph Hellwig <hch@....de>, linux-spi@...r.kernel.org,
imx@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 4/6] spi: spi-fsl-dspi: Use non-coherent memory for DMA
On Fri, Jun 27, 2025 at 11:21:40AM +0100, James Clark wrote:
> Using coherent memory here isn't functionally necessary, we're only
> either sending data to the device or reading from it. This means
> explicit synchronizations are only required around those points and the
> change is fairly trivial.
>
> This gives us around a 10% increase in throughput for large DMA
> transfers and no loss for small transfers.
>
> Suggested-by: Arnd Bergmann <arnd@...db.de>
> Signed-off-by: James Clark <james.clark@...aro.org>
Reviewed-by: Frank Li <Frank.Li@....com>
> ---
> drivers/spi/spi-fsl-dspi.c | 55 +++++++++++++++++++++++++++++-----------------
> 1 file changed, 35 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
> index feff475dddfc..e7856f9c9440 100644
> --- a/drivers/spi/spi-fsl-dspi.c
> +++ b/drivers/spi/spi-fsl-dspi.c
> @@ -493,11 +493,19 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
> return cmd << 16 | data;
> }
>
> +static int dspi_dma_transfer_size(struct fsl_dspi *dspi)
> +{
> + return dspi->words_in_flight * DMA_SLAVE_BUSWIDTH_4_BYTES;
> +}
> +
> static void dspi_tx_dma_callback(void *arg)
> {
> struct fsl_dspi *dspi = arg;
> struct fsl_dspi_dma *dma = dspi->dma;
> + struct device *dev = &dspi->pdev->dev;
>
> + dma_sync_single_for_cpu(dev, dma->tx_dma_phys,
> + dspi_dma_transfer_size(dspi), DMA_TO_DEVICE);
> complete(&dma->cmd_tx_complete);
> }
>
> @@ -505,9 +513,13 @@ static void dspi_rx_dma_callback(void *arg)
> {
> struct fsl_dspi *dspi = arg;
> struct fsl_dspi_dma *dma = dspi->dma;
> + struct device *dev = &dspi->pdev->dev;
> int i;
>
> if (dspi->rx) {
> + dma_sync_single_for_cpu(dev, dma->rx_dma_phys,
> + dspi_dma_transfer_size(dspi),
> + DMA_FROM_DEVICE);
> for (i = 0; i < dspi->words_in_flight; i++)
> dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
> }
> @@ -517,6 +529,7 @@ static void dspi_rx_dma_callback(void *arg)
>
> static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> {
> + size_t size = dspi_dma_transfer_size(dspi);
> struct device *dev = &dspi->pdev->dev;
> struct fsl_dspi_dma *dma = dspi->dma;
> int time_left;
> @@ -525,10 +538,9 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> for (i = 0; i < dspi->words_in_flight; i++)
> dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
>
> + dma_sync_single_for_device(dev, dma->tx_dma_phys, size, DMA_TO_DEVICE);
> dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
> - dma->tx_dma_phys,
> - dspi->words_in_flight *
> - DMA_SLAVE_BUSWIDTH_4_BYTES,
> + dma->tx_dma_phys, size,
> DMA_MEM_TO_DEV,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->tx_desc) {
> @@ -543,10 +555,10 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
> return -EINVAL;
> }
>
> + dma_sync_single_for_device(dev, dma->rx_dma_phys, size,
> + DMA_FROM_DEVICE);
> dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
> - dma->rx_dma_phys,
> - dspi->words_in_flight *
> - DMA_SLAVE_BUSWIDTH_4_BYTES,
> + dma->rx_dma_phys, size,
> DMA_DEV_TO_MEM,
> DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
> if (!dma->rx_desc) {
> @@ -643,17 +655,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
> goto err_tx_channel;
> }
>
> - dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
> - dma_bufsize, &dma->tx_dma_phys,
> - GFP_KERNEL);
> + dma->tx_dma_buf = dma_alloc_noncoherent(dma->chan_tx->device->dev,
> + dma_bufsize, &dma->tx_dma_phys,
> + DMA_TO_DEVICE, GFP_KERNEL);
> if (!dma->tx_dma_buf) {
> ret = -ENOMEM;
> goto err_tx_dma_buf;
> }
>
> - dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
> - dma_bufsize, &dma->rx_dma_phys,
> - GFP_KERNEL);
> + dma->rx_dma_buf = dma_alloc_noncoherent(dma->chan_rx->device->dev,
> + dma_bufsize, &dma->rx_dma_phys,
> + DMA_FROM_DEVICE, GFP_KERNEL);
> if (!dma->rx_dma_buf) {
> ret = -ENOMEM;
> goto err_rx_dma_buf;
> @@ -688,11 +700,12 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
> return 0;
>
> err_slave_config:
> - dma_free_coherent(dma->chan_rx->device->dev,
> - dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
> + dma_free_noncoherent(dma->chan_rx->device->dev, dma_bufsize,
> + dma->rx_dma_buf, dma->rx_dma_phys,
> + DMA_FROM_DEVICE);
> err_rx_dma_buf:
> - dma_free_coherent(dma->chan_tx->device->dev,
> - dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
> + dma_free_noncoherent(dma->chan_tx->device->dev, dma_bufsize,
> + dma->tx_dma_buf, dma->tx_dma_phys, DMA_TO_DEVICE);
> err_tx_dma_buf:
> dma_release_channel(dma->chan_tx);
> err_tx_channel:
> @@ -713,14 +726,16 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
> return;
>
> if (dma->chan_tx) {
> - dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
> - dma->tx_dma_buf, dma->tx_dma_phys);
> + dma_free_noncoherent(dma->chan_tx->device->dev, dma_bufsize,
> + dma->tx_dma_buf, dma->tx_dma_phys,
> + DMA_TO_DEVICE);
> dma_release_channel(dma->chan_tx);
> }
>
> if (dma->chan_rx) {
> - dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
> - dma->rx_dma_buf, dma->rx_dma_phys);
> + dma_free_noncoherent(dma->chan_rx->device->dev, dma_bufsize,
> + dma->rx_dma_buf, dma->rx_dma_phys,
> + DMA_FROM_DEVICE);
> dma_release_channel(dma->chan_rx);
> }
> }
>
> --
> 2.34.1
>
Powered by blists - more mailing lists