[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240310124836.31863-3-paul@crapouillou.net>
Date: Sun, 10 Mar 2024 13:48:31 +0100
From: Paul Cercueil <paul@...pouillou.net>
To: Jonathan Cameron <jic23@...nel.org>,
Christian König <christian.koenig@....com>,
Jonathan Corbet <corbet@....net>,
Lars-Peter Clausen <lars@...afoo.de>,
Vinod Koul <vkoul@...nel.org>,
Sumit Semwal <sumit.semwal@...aro.org>
Cc: Nuno Sa <nuno.sa@...log.com>,
Michael Hennerich <michael.hennerich@...log.com>,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-iio@...r.kernel.org,
linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org,
Paul Cercueil <paul@...pouillou.net>
Subject: [PATCH v9 2/6] dmaengine: dma-axi-dmac: Implement device_prep_peripheral_dma_vec
Add implementation of the .device_prep_peripheral_dma_vec() callback.
Signed-off-by: Paul Cercueil <paul@...pouillou.net>
Signed-off-by: Nuno Sa <nuno.sa@...log.com>
---
v3: New patch
v5: Implement .device_prep_slave_dma_vec() instead of v3's
.device_prep_slave_dma_array().
v6: Use new prototype for axi_dmac_alloc_desc() as it changed upstream.
v7: Adapted patch for the changes made in patch 1.
---
drivers/dma/dma-axi-dmac.c | 40 ++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 4e339c04fc1e..d03cbf1d28e8 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -620,6 +620,45 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
return sg;
}
+static struct dma_async_tx_descriptor *
+axi_dmac_prep_peripheral_dma_vec(struct dma_chan *c, const struct dma_vec *vecs,
+ size_t nb, enum dma_transfer_direction direction,
+ unsigned long prep_flags, unsigned long flags)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac_desc *desc;
+ unsigned int num_sgs = 0;
+ struct axi_dmac_sg *dsg;
+ size_t i;
+
+ if (direction != chan->direction)
+ return NULL;
+
+ for (i = 0; i < nb; i++)
+ num_sgs += DIV_ROUND_UP(vecs[i].len, chan->max_length);
+
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
+ if (!desc)
+ return NULL;
+
+ dsg = desc->sg;
+
+ for (i = 0; i < nb; i++) {
+ if (!axi_dmac_check_addr(chan, vecs[i].addr) ||
+ !axi_dmac_check_len(chan, vecs[i].len)) {
+ kfree(desc);
+ return NULL;
+ }
+
+ dsg = axi_dmac_fill_linear_sg(chan, direction, vecs[i].addr, 1,
+ vecs[i].len, dsg);
+ }
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, prep_flags);
+}
+
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -1055,6 +1094,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending;
dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+ dma_dev->device_prep_peripheral_dma_vec = axi_dmac_prep_peripheral_dma_vec;
dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
dma_dev->device_terminate_all = axi_dmac_terminate_all;
--
2.43.0
Powered by blists - more mailing lists