[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230416161024.68c761b2@jic23-huawei>
Date: Sun, 16 Apr 2023 16:10:24 +0100
From: Jonathan Cameron <jic23@...nel.org>
To: Paul Cercueil <paul@...pouillou.net>
Cc: Lars-Peter Clausen <lars@...afoo.de>,
Vinod Koul <vkoul@...nel.org>,
Michael Hennerich <Michael.Hennerich@...log.com>,
Nuno Sá <noname.nuno@...il.com>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org,
linux-iio@...r.kernel.org, linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org, linaro-mm-sig@...ts.linaro.org
Subject: Re: [PATCH v3 09/11] iio: buffer-dma: Enable support for DMABUFs
On Mon, 3 Apr 2023 17:47:58 +0200
Paul Cercueil <paul@...pouillou.net> wrote:
> Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
> and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
> DMA buffer implementations.
>
> Signed-off-by: Paul Cercueil <paul@...pouillou.net>
Hi Paul,
A few superficially comments.
Jonathan
>
> ---
> v3: Update code to provide the functions that will be used as callbacks
> for the new IOCTLs.
> ---
> drivers/iio/buffer/industrialio-buffer-dma.c | 157 +++++++++++++++++--
> include/linux/iio/buffer-dma.h | 24 +++
> 2 files changed, 168 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
> index e14814e0d4c8..422bd784fd1e 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
...
> @@ -412,8 +448,12 @@ static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
>
> block->state = IIO_BLOCK_STATE_ACTIVE;
> iio_buffer_block_get(block);
> +
Trivial, but I'd rather not see unrelated white space changes in a patch
doing anything else.
> ret = queue->ops->submit(queue, block);
> if (ret) {
> + if (!block->fileio)
> + iio_buffer_signal_dmabuf_done(block->attach, ret);
> +
> /*
> * This is a bit of a problem and there is not much we can do
> * other then wait for the buffer to be disabled and re-enabled
> @@ -645,6 +685,97 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
> }
> EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
...
> +int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
> + struct iio_dma_buffer_block *block,
> + struct sg_table *sgt,
> + size_t size, bool cyclic)
> +{
> + struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> + int ret = 0;
No need to init.
> +
> + mutex_lock(&queue->lock);
> + ret = iio_dma_can_enqueue_block(block);
> + if (ret < 0)
> + goto out_mutex_unlock;
> +
> + block->bytes_used = size;
> + block->cyclic = cyclic;
> + block->sg_table = sgt;
> +
> + iio_dma_buffer_enqueue(queue, block);
> +
> +out_mutex_unlock:
> + mutex_unlock(&queue->lock);
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(iio_dma_buffer_enqueue_dmabuf);
Obviously an unrelated activity but good to namespace these
in a future patch set.
> +
> /**
> * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
> * @buffer: Buffer to set the bytes-per-datum for
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index 490b93f76fa8..e5e5817e99db 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> /**
> * enum iio_block_state - State of a struct iio_dma_buffer_block
> @@ -41,6 +43,7 @@ enum iio_block_state {
> * @queue: Parent DMA buffer queue
> * @kref: kref used to manage the lifetime of block
> * @state: Current state of the block
> + * @fileio: True if this buffer is used for fileio mode
Docs need update for the other two new elements.
> */
> struct iio_dma_buffer_block {
> /* May only be accessed by the owner of the block */
> @@ -63,6 +66,11 @@ struct iio_dma_buffer_block {
> * queue->list_lock if the block is not owned by the core.
> */
> enum iio_block_state state;
> +
> + bool fileio;
> +
> + struct dma_buf_attachment *attach;
> + struct sg_table *sg_table;
> };
Powered by blists - more mailing lists