[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251218-dma_prep_config-v2-4-c07079836128@nxp.com>
Date: Thu, 18 Dec 2025 10:56:24 -0500
From: Frank Li <Frank.Li@....com>
To: Vinod Koul <vkoul@...nel.org>, Manivannan Sadhasivam <mani@...nel.org>,
Krzysztof WilczyĆski <kwilczynski@...nel.org>,
Kishon Vijay Abraham I <kishon@...nel.org>,
Bjorn Helgaas <bhelgaas@...gle.com>, Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>, Chaitanya Kulkarni <kch@...dia.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
"David S. Miller" <davem@...emloft.net>,
Nicolas Ferre <nicolas.ferre@...rochip.com>,
Alexandre Belloni <alexandre.belloni@...tlin.com>,
Claudiu Beznea <claudiu.beznea@...on.dev>, Koichiro Den <den@...inux.co.jp>,
Niklas Cassel <cassel@...nel.org>
Cc: dmaengine@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-pci@...r.kernel.org, linux-nvme@...ts.infradead.org,
mhi@...ts.linux.dev, linux-arm-msm@...r.kernel.org,
linux-crypto@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
imx@...ts.linux.dev, Frank Li <Frank.Li@....com>
Subject: [PATCH v2 4/8] dmaengine: dw-edma: Pass dma_slave_config to
dw_edma_device_transfer()
Pass dma_slave_config to dw_edma_device_transfer() to support atomic
configuration and descriptor preparation when a non-NULL config is
provided to device_prep_config_sg().
Tested-by: Niklas Cassel <cassel@...nel.org>
Signed-off-by: Frank Li <Frank.Li@....com>
---
drivers/dma/dw-edma/dw-edma-core.c | 22 ++++++++++++++++------
1 file changed, 16 insertions(+), 6 deletions(-)
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index e005b7bdaee156a3f4573b4734f50e3e47553dd2..1863254bf61eb892cb0cf8934e53b40d32027dfa 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -230,6 +230,15 @@ static int dw_edma_device_config(struct dma_chan *dchan,
return 0;
}
+static struct dma_slave_config *
+dw_edma_device_get_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+ return config ? config : &chan->config;
+}
+
static int dw_edma_device_pause(struct dma_chan *dchan)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
@@ -348,7 +357,8 @@ dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
}
static struct dma_async_tx_descriptor *
-dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+dw_edma_device_transfer(struct dw_edma_transfer *xfer,
+ struct dma_slave_config *config)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
@@ -427,8 +437,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
src_addr = xfer->xfer.il->src_start;
dst_addr = xfer->xfer.il->dst_start;
} else {
- src_addr = chan->config.src_addr;
- dst_addr = chan->config.dst_addr;
+ src_addr = config->src_addr;
+ dst_addr = config->dst_addr;
}
if (dir == DMA_DEV_TO_MEM)
@@ -550,7 +560,7 @@ dw_edma_device_prep_config_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if (config)
dw_edma_device_config(dchan, config);
- return dw_edma_device_transfer(&xfer);
+ return dw_edma_device_transfer(&xfer, dw_edma_device_get_config(dchan, config));
}
static struct dma_async_tx_descriptor *
@@ -569,7 +579,7 @@ dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
xfer.flags = flags;
xfer.type = EDMA_XFER_CYCLIC;
- return dw_edma_device_transfer(&xfer);
+ return dw_edma_device_transfer(&xfer, dw_edma_device_get_config(dchan, NULL));
}
static struct dma_async_tx_descriptor *
@@ -585,7 +595,7 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
xfer.flags = flags;
xfer.type = EDMA_XFER_INTERLEAVED;
- return dw_edma_device_transfer(&xfer);
+ return dw_edma_device_transfer(&xfer, dw_edma_device_get_config(dchan, NULL));
}
static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
--
2.34.1
Powered by blists - more mailing lists