[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240618100302.72886-3-paul@crapouillou.net>
Date: Tue, 18 Jun 2024 12:02:57 +0200
From: Paul Cercueil <paul@...pouillou.net>
To: Jonathan Cameron <jic23@...nel.org>,
Lars-Peter Clausen <lars@...afoo.de>,
Vinod Koul <vkoul@...nel.org>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>
Cc: Jonathan Corbet <corbet@....net>,
Nuno Sa <nuno.sa@...log.com>,
linux-iio@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
dmaengine@...r.kernel.org,
linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org,
Paul Cercueil <paul@...pouillou.net>
Subject: [PATCH v11 2/7] dmaengine: dma-axi-dmac: Implement device_prep_peripheral_dma_vec
Add implementation of the .device_prep_peripheral_dma_vec() callback.
Signed-off-by: Paul Cercueil <paul@...pouillou.net>
Co-developed-by: Nuno Sa <nuno.sa@...log.com>
Signed-off-by: Nuno Sa <nuno.sa@...log.com>
---
v3: New patch
v5: Implement .device_prep_slave_dma_vec() instead of v3's
.device_prep_slave_dma_array().
v6: Use new prototype for axi_dmac_alloc_desc() as it changed upstream.
v7: Adapted patch for the changes made in patch 1.
v10: Use the new function prototype (without the extra prep_flags).
---
drivers/dma/dma-axi-dmac.c | 40 ++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index bdb752f11869..36943b0c6d60 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -620,6 +620,45 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
return sg;
}
+static struct dma_async_tx_descriptor *
+axi_dmac_prep_peripheral_dma_vec(struct dma_chan *c, const struct dma_vec *vecs,
+ size_t nb, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+ struct axi_dmac_desc *desc;
+ unsigned int num_sgs = 0;
+ struct axi_dmac_sg *dsg;
+ size_t i;
+
+ if (direction != chan->direction)
+ return NULL;
+
+ for (i = 0; i < nb; i++)
+ num_sgs += DIV_ROUND_UP(vecs[i].len, chan->max_length);
+
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
+ if (!desc)
+ return NULL;
+
+ dsg = desc->sg;
+
+ for (i = 0; i < nb; i++) {
+ if (!axi_dmac_check_addr(chan, vecs[i].addr) ||
+ !axi_dmac_check_len(chan, vecs[i].len)) {
+ kfree(desc);
+ return NULL;
+ }
+
+ dsg = axi_dmac_fill_linear_sg(chan, direction, vecs[i].addr, 1,
+ vecs[i].len, dsg);
+ }
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -1061,6 +1100,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending;
dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+ dma_dev->device_prep_peripheral_dma_vec = axi_dmac_prep_peripheral_dma_vec;
dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
dma_dev->device_terminate_all = axi_dmac_terminate_all;
--
2.43.0
Powered by blists - more mailing lists