lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210219124012.92897-6-alexandru.ardelean@analog.com>
Date:   Fri, 19 Feb 2021 14:40:11 +0200
From:   Alexandru Ardelean <alexandru.ardelean@...log.com>
To:     <linux-kernel@...r.kernel.org>, <linux-iio@...r.kernel.org>
CC:     <lars@...afoo.de>, <Michael.Hennerich@...log.com>,
        <jic23@...nel.org>, <nuno.sa@...log.com>,
        <dragos.bogdan@...log.com>,
        Alexandru Ardelean <alexandru.ardelean@...log.com>
Subject: [PATCH v3 5/6] iio: buffer-dma: Add output buffer support

From: Lars-Peter Clausen <lars@...afoo.de>

Add support for output buffers to the dma buffer implementation.

Signed-off-by: Lars-Peter Clausen <lars@...afoo.de>
Signed-off-by: Alexandru Ardelean <alexandru.ardelean@...log.com>
---
 drivers/iio/adc/adi-axi-adc.c                 |   3 +-
 drivers/iio/buffer/industrialio-buffer-dma.c  | 116 ++++++++++++++++--
 .../buffer/industrialio-buffer-dmaengine.c    |  43 +++++--
 include/linux/iio/buffer-dma.h                |   6 +
 include/linux/iio/buffer-dmaengine.h          |   6 +-
 5 files changed, 151 insertions(+), 23 deletions(-)

diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index bfa00100a631..98cc1e7caa69 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -104,6 +104,7 @@ static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
 static int adi_axi_adc_config_dma_buffer(struct device *dev,
 					 struct iio_dev *indio_dev)
 {
+	enum iio_buffer_direction dir = IIO_BUFFER_DIRECTION_IN;
 	const char *dma_name;
 
 	if (!device_property_present(dev, "dmas"))
@@ -113,7 +114,7 @@ static int adi_axi_adc_config_dma_buffer(struct device *dev,
 		dma_name = "rx";
 
 	return devm_iio_dmaengine_buffer_setup(indio_dev->dev.parent,
-					       indio_dev, dma_name,
+					       indio_dev, dir, dma_name,
 					       NULL, NULL);
 }
 
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index aa56c10418d0..83074d060535 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -223,7 +223,8 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
 	spin_unlock_irqrestore(&queue->list_lock, flags);
 
 	iio_buffer_block_put_atomic(block);
-	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+	wake_up_interruptible_poll(&queue->buffer.pollq,
+		(uintptr_t)queue->poll_wakup_flags);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
 
@@ -252,7 +253,8 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
 	}
 	spin_unlock_irqrestore(&queue->list_lock, flags);
 
-	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+	wake_up_interruptible_poll(&queue->buffer.pollq,
+		(uintptr_t)queue->poll_wakup_flags);
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
 
@@ -353,9 +355,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 		}
 
 		block->block.id = i;
-
-		block->state = IIO_BLOCK_STATE_QUEUED;
-		list_add_tail(&block->head, &queue->incoming);
 	}
 
 out_unlock:
@@ -437,7 +436,29 @@ int iio_dma_buffer_enable(struct iio_buffer *buffer,
 	struct iio_dma_buffer_block *block, *_block;
 
 	mutex_lock(&queue->lock);
+
+	if (buffer->direction == IIO_BUFFER_DIRECTION_IN)
+		queue->poll_wakup_flags = POLLIN | POLLRDNORM;
+	else
+		queue->poll_wakup_flags = POLLOUT | POLLWRNORM;
+
 	queue->fileio.enabled = !queue->num_blocks;
+	if (queue->fileio.enabled) {
+		unsigned int i;
+
+		for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+			struct iio_dma_buffer_block *block =
+			    queue->fileio.blocks[i];
+			if (buffer->direction == IIO_BUFFER_DIRECTION_IN) {
+				block->state = IIO_BLOCK_STATE_QUEUED;
+				list_add_tail(&block->head, &queue->incoming);
+			} else {
+				block->state = IIO_BLOCK_STATE_DEQUEUED;
+				list_add_tail(&block->head, &queue->outgoing);
+			}
+		}
+	}
+
 	queue->active = true;
 	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
 		list_del(&block->head);
@@ -567,6 +588,61 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
 
+int iio_dma_buffer_write(struct iio_buffer *buf, size_t n,
+	const char __user *user_buffer)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
+	struct iio_dma_buffer_block *block;
+	int ret;
+
+	if (n < buf->bytes_per_datum)
+		return -EINVAL;
+
+	mutex_lock(&queue->lock);
+
+	if (!queue->fileio.enabled) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+
+	if (!queue->fileio.active_block) {
+		block = iio_dma_buffer_dequeue(queue);
+		if (block == NULL) {
+			ret = 0;
+			goto out_unlock;
+		}
+		queue->fileio.pos = 0;
+		queue->fileio.active_block = block;
+	} else {
+		block = queue->fileio.active_block;
+	}
+
+	n = rounddown(n, buf->bytes_per_datum);
+	if (n > block->block.size - queue->fileio.pos)
+		n = block->block.size - queue->fileio.pos;
+
+	if (copy_from_user(block->vaddr + queue->fileio.pos, user_buffer, n)) {
+		ret = -EFAULT;
+		goto out_unlock;
+	}
+
+	queue->fileio.pos += n;
+
+	if (queue->fileio.pos == block->block.size) {
+		queue->fileio.active_block = NULL;
+		block->block.bytes_used = block->block.size;
+		iio_dma_buffer_enqueue(queue, block);
+	}
+
+	ret = n;
+
+out_unlock:
+	mutex_unlock(&queue->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
+
 /**
  * iio_dma_buffer_data_available() - DMA buffer data_available callback
  * @buf: Buffer to check for data availability
@@ -588,12 +664,14 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 	 */
 
 	mutex_lock(&queue->lock);
-	if (queue->fileio.active_block)
-		data_available += queue->fileio.active_block->block.size;
+	if (queue->fileio.active_block) {
+		data_available += queue->fileio.active_block->block.bytes_used -
+			queue->fileio.pos;
+	}
 
 	spin_lock_irq(&queue->list_lock);
 	list_for_each_entry(block, &queue->outgoing, head)
-		data_available += block->block.size;
+		data_available += block->block.bytes_used;
 	spin_unlock_irq(&queue->list_lock);
 	mutex_unlock(&queue->lock);
 
@@ -601,6 +679,28 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
 }
 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
 
+size_t iio_dma_buffer_space_available(struct iio_buffer *buf)
+{
+	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
+	struct iio_dma_buffer_block *block;
+	size_t space_available = 0;
+
+	mutex_lock(&queue->lock);
+	if (queue->fileio.active_block) {
+		space_available += queue->fileio.active_block->block.size -
+			queue->fileio.pos;
+	}
+
+	spin_lock_irq(&queue->list_lock);
+	list_for_each_entry(block, &queue->outgoing, head)
+		space_available += block->block.size;
+	spin_unlock_irq(&queue->list_lock);
+	mutex_unlock(&queue->lock);
+
+	return space_available;
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_space_available);
+
 int iio_dma_buffer_alloc_blocks(struct iio_buffer *buffer,
 				struct iio_buffer_block_alloc_req *req)
 {
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index a4e7b97ce239..65458a6cc81a 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -37,6 +37,8 @@ struct dmaengine_buffer {
 
 	u32 align;
 	u32 max_size;
+
+	bool is_tx;
 };
 
 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
@@ -64,9 +66,12 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 	struct dmaengine_buffer *dmaengine_buffer =
 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
 	struct dma_async_tx_descriptor *desc;
+	enum dma_transfer_direction direction;
 	dma_cookie_t cookie;
 
-	block->block.bytes_used = min(block->block.size,
+	if (!dmaengine_buffer->is_tx)
+		block->block.bytes_used = block->block.size;
+	block->block.bytes_used = min(block->block.bytes_used,
 		dmaengine_buffer->max_size);
 	block->block.bytes_used = rounddown(block->block.bytes_used,
 		dmaengine_buffer->align);
@@ -75,8 +80,10 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
 		return 0;
 	}
 
+	direction = dmaengine_buffer->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+
 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
-		block->phys_addr, block->block.bytes_used, DMA_DEV_TO_MEM,
+		block->phys_addr, block->block.bytes_used, direction,
 		DMA_PREP_INTERRUPT);
 	if (!desc)
 		return -ENOMEM;
@@ -117,12 +124,14 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
 
 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
 	.read = iio_dma_buffer_read,
+	.write = iio_dma_buffer_write,
 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
 	.set_length = iio_dma_buffer_set_length,
 	.request_update = iio_dma_buffer_request_update,
 	.enable = iio_dma_buffer_enable,
 	.disable = iio_dma_buffer_disable,
 	.data_available = iio_dma_buffer_data_available,
+	.space_available = iio_dma_buffer_space_available,
 	.release = iio_dmaengine_buffer_release,
 
 	.alloc_blocks = iio_dma_buffer_alloc_blocks,
@@ -162,6 +171,7 @@ static const struct attribute *iio_dmaengine_buffer_attrs[] = {
 /**
  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
  * @dev: Parent device for the buffer
+ * @direction: Set the direction of the data.
  * @channel: DMA channel name, typically "rx".
  * @ops: Custom iio_dma_buffer_ops, if NULL default ops will be used
  * @driver_data: Driver data to be passed to custom iio_dma_buffer_ops
@@ -174,11 +184,12 @@ static const struct attribute *iio_dmaengine_buffer_attrs[] = {
  * release it.
  */
 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
-	const char *channel, const struct iio_dma_buffer_ops *ops,
-	void *driver_data)
+	enum iio_buffer_direction direction, const char *channel,
+	const struct iio_dma_buffer_ops *ops, void *driver_data)
 {
 	struct dmaengine_buffer *dmaengine_buffer;
 	unsigned int width, src_width, dest_width;
+	bool is_tx = (direction == IIO_BUFFER_DIRECTION_OUT);
 	struct dma_slave_caps caps;
 	struct dma_chan *chan;
 	int ret;
@@ -187,6 +198,9 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
 	if (!dmaengine_buffer)
 		return ERR_PTR(-ENOMEM);
 
+	if (!channel)
+		channel = is_tx ? "tx" : "rx";
+
 	chan = dma_request_chan(dev, channel);
 	if (IS_ERR(chan)) {
 		ret = PTR_ERR(chan);
@@ -212,6 +226,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
 	dmaengine_buffer->chan = chan;
 	dmaengine_buffer->align = width;
 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
+	dmaengine_buffer->is_tx = is_tx;
 
 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
 		ops ? ops : &iio_dmaengine_default_ops, driver_data);
@@ -251,7 +266,8 @@ static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
 /**
  * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
  * @dev: Parent device for the buffer
- * @channel: DMA channel name, typically "rx".
+ * @direction: Direction of the data stream (in/out).
+ * @channel: DMA channel name, typically "rx" for input, "tx" for output.
  * @ops: Custom iio_dma_buffer_ops, if NULL default ops will be used
  * @driver_data: Driver data to be passed to custom iio_dma_buffer_ops
  *
@@ -262,8 +278,8 @@ static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
  * The buffer will be automatically de-allocated once the device gets destroyed.
  */
 static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
-	const char *channel, const struct iio_dma_buffer_ops *ops,
-	void *driver_data)
+	enum iio_buffer_direction direction, const char *channel,
+	const struct iio_dma_buffer_ops *ops, void *driver_data)
 {
 	struct iio_buffer **bufferp, *buffer;
 
@@ -272,7 +288,8 @@ static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
 	if (!bufferp)
 		return ERR_PTR(-ENOMEM);
 
-	buffer = iio_dmaengine_buffer_alloc(dev, channel, ops, driver_data);
+	buffer = iio_dmaengine_buffer_alloc(dev, direction, channel, ops,
+					    driver_data);
 	if (IS_ERR(buffer)) {
 		devres_free(bufferp);
 		return buffer;
@@ -288,7 +305,8 @@ static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
  * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
  * @dev: Parent device for the buffer
  * @indio_dev: IIO device to which to attach this buffer.
- * @channel: DMA channel name, typically "rx".
+ * @direction: Direction of the data stream (in/out).
+ * @channel: DMA channel name, typically "rx" for input, "tx" for output.
  * @ops: Custom iio_dma_buffer_ops, if NULL default ops will be used
  * @driver_data: Driver data to be passed to custom iio_dma_buffer_ops
  *
@@ -298,14 +316,15 @@ static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
  * IIO device.
  */
 int devm_iio_dmaengine_buffer_setup(struct device *dev,
-	struct iio_dev *indio_dev, const char *channel,
-	const struct iio_dma_buffer_ops *ops,
+	struct iio_dev *indio_dev, enum iio_buffer_direction direction,
+	const char *channel, const struct iio_dma_buffer_ops *ops,
 	void *driver_data)
 {
 	struct iio_buffer *buffer;
 
 	buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
-						 channel, ops, driver_data);
+						 direction, channel, ops,
+						 driver_data);
 	if (IS_ERR(buffer))
 		return PTR_ERR(buffer);
 
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 1eec7efe44cf..9a99c74fab16 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -112,6 +112,8 @@ struct iio_dma_buffer_queue {
 
 	void *driver_data;
 
+	unsigned int poll_wakup_flags;
+
 	unsigned int num_blocks;
 	struct iio_dma_buffer_block **blocks;
 	unsigned int max_offset;
@@ -145,6 +147,10 @@ int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
 
+int iio_dma_buffer_write(struct iio_buffer *buf, size_t n,
+	const char __user *user_buffer);
+size_t iio_dma_buffer_space_available(struct iio_buffer *buf);
+
 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
 	struct device *dma_dev, const struct iio_dma_buffer_ops *ops,
 	void *driver_data);
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 1fca8cdbf14e..1d70d35946b1 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -7,13 +7,15 @@
 #ifndef __IIO_DMAENGINE_H__
 #define __IIO_DMAENGINE_H__
 
+#include <linux/iio/buffer.h>
+
 struct iio_dev;
 struct iio_dma_buffer_ops;
 struct device;
 
 int devm_iio_dmaengine_buffer_setup(struct device *dev,
-	struct iio_dev *indio_dev, const char *channel,
-	const struct iio_dma_buffer_ops *ops,
+	struct iio_dev *indio_dev, enum iio_buffer_direction direction,
+       	const char *channel, const struct iio_dma_buffer_ops *ops,
 	void *driver_data);
 
 #endif
-- 
2.27.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ