[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251208-dma_prep_config-v1-4-53490c5e1e2a@nxp.com>
Date: Mon, 08 Dec 2025 12:09:43 -0500
From: Frank Li <Frank.Li@....com>
To: Vinod Koul <vkoul@...nel.org>, Manivannan Sadhasivam <mani@...nel.org>,
Krzysztof WilczyĆski <kwilczynski@...nel.org>,
Kishon Vijay Abraham I <kishon@...nel.org>,
Bjorn Helgaas <bhelgaas@...gle.com>, Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>, Chaitanya Kulkarni <kch@...dia.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
"David S. Miller" <davem@...emloft.net>,
Nicolas Ferre <nicolas.ferre@...rochip.com>,
Alexandre Belloni <alexandre.belloni@...tlin.com>,
Claudiu Beznea <claudiu.beznea@...on.dev>, Koichiro Den <den@...inux.co.jp>,
Niklas Cassel <cassel@...nel.org>
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-pci@...r.kernel.org, linux-nvme@...ts.infradead.org,
mhi@...ts.linux.dev, linux-arm-msm@...r.kernel.org,
linux-crypto@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
imx@...ts.linux.dev, Frank Li <Frank.Li@....com>
Subject: [PATCH 4/8] dmaengine: dw-edma: Pass dma_slave_config to
dw_edma_device_transfer()
Pass dma_slave_config to dw_edma_device_transfer() to support atomic
configuration and descriptor preparation when a non-NULL config is
provided to device_prep_slave_sg_config().
Signed-off-by: Frank Li <Frank.Li@....com>
---
drivers/dma/dw-edma/dw-edma-core.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 58f98542f8329a3bfdc5455768e8394ae601ab5f..744c60ec964102287bd32b9e55d0f3024d1d39d9 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -230,6 +230,15 @@ static int dw_edma_device_config(struct dma_chan *dchan,
return 0;
}
+static struct dma_slave_config *
+dw_edma_device_get_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+ return config ? config : &chan->config;
+}
+
static int dw_edma_device_pause(struct dma_chan *dchan)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
@@ -348,7 +357,8 @@ dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
}
static struct dma_async_tx_descriptor *
-dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+dw_edma_device_transfer(struct dw_edma_transfer *xfer,
+ struct dma_slave_config *config)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
@@ -427,8 +437,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
src_addr = xfer->xfer.il->src_start;
dst_addr = xfer->xfer.il->dst_start;
} else {
- src_addr = chan->config.src_addr;
- dst_addr = chan->config.dst_addr;
+ src_addr = config->src_addr;
+ dst_addr = config->dst_addr;
}
if (dir == DMA_DEV_TO_MEM)
@@ -552,7 +562,7 @@ dw_edma_device_prep_slave_sg_config(struct dma_chan *dchan,
if (config)
dw_edma_device_config(dchan, config);
- return dw_edma_device_transfer(&xfer);
+ return dw_edma_device_transfer(&xfer, dw_edma_device_get_config(dchan, config));
}
static struct dma_async_tx_descriptor *
@@ -571,7 +581,7 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
xfer.flags = flags;
xfer.type = EDMA_XFER_CYCLIC;
- return dw_edma_device_transfer(&xfer);
+ return dw_edma_device_transfer(&xfer, dw_edma_device_get_config(dchan, NULL));
}
static struct dma_async_tx_descriptor *
@@ -587,7 +597,7 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
xfer.flags = flags;
xfer.type = EDMA_XFER_INTERLEAVED;
- return dw_edma_device_transfer(&xfer);
+ return dw_edma_device_transfer(&xfer, dw_edma_device_get_config(dchan, NULL));
}
static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
--
2.34.1
Powered by blists - more mailing lists