[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241023-dlech-mainline-spi-engine-offload-2-v4-11-f8125b99f5a1@baylibre.com>
Date: Wed, 23 Oct 2024 15:59:18 -0500
From: David Lechner <dlechner@...libre.com>
To: Mark Brown <broonie@...nel.org>, Jonathan Cameron <jic23@...nel.org>,
Rob Herring <robh@...nel.org>, Krzysztof Kozlowski <krzk+dt@...nel.org>,
Conor Dooley <conor+dt@...nel.org>,
Nuno Sá <nuno.sa@...log.com>,
Uwe Kleine-König <ukleinek@...nel.org>
Cc: Michael Hennerich <Michael.Hennerich@...log.com>,
Lars-Peter Clausen <lars@...afoo.de>, David Jander <david@...tonic.nl>,
Martin Sperl <kernel@...tin.sperl.org>, linux-spi@...r.kernel.org,
devicetree@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-iio@...r.kernel.org, linux-pwm@...r.kernel.org,
David Lechner <dlechner@...libre.com>
Subject: [PATCH RFC v4 11/15] iio: buffer-dmaengine: add
devm_iio_dmaengine_buffer_setup_ext2()
Add a new devm_iio_dmaengine_buffer_setup_ext2() function to handle
cases where the DMA channel is managed by the caller rather than being
requested and released by the iio_dmaengine module.
Signed-off-by: David Lechner <dlechner@...libre.com>
---
v4 changes:
* This replaces "iio: buffer-dmaengine: generalize requesting DMA channel"
---
drivers/iio/buffer/industrialio-buffer-dmaengine.c | 107 +++++++++++++++------
include/linux/iio/buffer-dmaengine.h | 5 +
2 files changed, 81 insertions(+), 31 deletions(-)
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 054af21dfa65..602cb2e147a6 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -33,6 +33,7 @@ struct dmaengine_buffer {
struct iio_dma_buffer_queue queue;
struct dma_chan *chan;
+ bool owns_chan;
struct list_head active;
size_t align;
@@ -216,28 +217,23 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel)
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan,
+ bool owns_chan)
{
struct dmaengine_buffer *dmaengine_buffer;
unsigned int width, src_width, dest_width;
struct dma_slave_caps caps;
- struct dma_chan *chan;
int ret;
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
- if (!dmaengine_buffer)
- return ERR_PTR(-ENOMEM);
-
- chan = dma_request_chan(dev, channel);
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- goto err_free;
+ if (!dmaengine_buffer) {
+ ret = -ENOMEM;
+ goto err_release;
}
ret = dma_get_slave_caps(chan, &caps);
if (ret < 0)
- goto err_release;
+ goto err_free;
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
@@ -252,6 +248,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
INIT_LIST_HEAD(&dmaengine_buffer->active);
dmaengine_buffer->chan = chan;
+ dmaengine_buffer->owns_chan = owns_chan;
dmaengine_buffer->align = width;
dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
@@ -263,10 +260,12 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
return &dmaengine_buffer->queue.buffer;
-err_release:
- dma_release_channel(chan);
err_free:
kfree(dmaengine_buffer);
+err_release:
+ if (owns_chan)
+ dma_release_channel(chan);
+
return ERR_PTR(ret);
}
@@ -282,12 +281,38 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_to_dmaengine_buffer(buffer);
iio_dma_buffer_exit(&dmaengine_buffer->queue);
- dma_release_channel(dmaengine_buffer->chan);
-
iio_buffer_put(buffer);
+
+ if (dmaengine_buffer->owns_chan)
+ dma_release_channel(dmaengine_buffer->chan);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
+static struct iio_buffer
+*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct dma_chan *chan, bool owns_chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_dmaengine_buffer_alloc(chan, owns_chan);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ buffer->direction = dir;
+
+ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret) {
+ iio_dmaengine_buffer_free(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
+
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: DMA channel consumer device
@@ -308,24 +333,13 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
const char *channel,
enum iio_buffer_direction dir)
{
- struct iio_buffer *buffer;
- int ret;
-
- buffer = iio_dmaengine_buffer_alloc(dev, channel);
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- buffer->direction = dir;
+ struct dma_chan *chan;
- ret = iio_device_attach_buffer(indio_dev, buffer);
- if (ret) {
- iio_dmaengine_buffer_free(buffer);
- return ERR_PTR(ret);
- }
+ chan = dma_request_chan(dev, channel);
+ if (IS_ERR(chan))
+ return ERR_CAST(chan);
- return buffer;
+ return __iio_dmaengine_buffer_setup_ext(indio_dev, chan, true, dir);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
@@ -362,6 +376,37 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
+/**
+ * devm_iio_dmaengine_buffer_setup_ext2() - Setup a DMA buffer for an IIO device
+ * @dev: Device for devm ownership
+ * @indio_dev: IIO device to which to attach this buffer.
+ * @chan: DMA channel
+ * @dir: Direction of buffer (in or out)
+ *
+ * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
+ * and attaches it to an IIO device with iio_device_attach_buffer().
+ * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
+ * IIO device.
+ *
+ * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the
+ * caller manages requesting and releasing the DMA channel.
+ */
+int devm_iio_dmaengine_buffer_setup_ext2(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, false, dir);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ buffer);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext2, IIO_DMAENGINE_BUFFER);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@...afoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 81d9a19aeb91..7bdb979b59f2 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -11,6 +11,7 @@
struct iio_dev;
struct device;
+struct dma_chan;
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
@@ -26,6 +27,10 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir);
+int devm_iio_dmaengine_buffer_setup_ext2(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir);
#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
--
2.43.0
Powered by blists - more mailing lists