[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230416161527.6e7021f1@jic23-huawei>
Date: Sun, 16 Apr 2023 16:15:27 +0100
From: Jonathan Cameron <jic23@...nel.org>
To: Paul Cercueil <paul@...pouillou.net>
Cc: Lars-Peter Clausen <lars@...afoo.de>,
Vinod Koul <vkoul@...nel.org>,
Michael Hennerich <Michael.Hennerich@...log.com>,
Nuno Sá <noname.nuno@...il.com>,
Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org,
linux-iio@...r.kernel.org, linux-media@...r.kernel.org,
dri-devel@...ts.freedesktop.org, linaro-mm-sig@...ts.linaro.org
Subject: Re: [PATCH v3 10/11] iio: buffer-dmaengine: Support new DMABUF
based userspace API
On Mon, 3 Apr 2023 17:49:54 +0200
Paul Cercueil <paul@...pouillou.net> wrote:
> Use the functions provided by the buffer-dma core to implement the
> DMABUF userspace API in the buffer-dmaengine IIO buffer implementation.
>
> Since we want to be able to transfer an arbitrary number of bytes and
> not necesarily the full DMABUF, the associated scatterlist is converted
> to an array of DMA addresses + lengths, which is then passed to
> dmaengine_prep_slave_dma_array().
>
> Signed-off-by: Paul Cercueil <paul@...pouillou.net>
A few things inline.
Thanks,
Jonathan
>
> ---
> v3: Use the new dmaengine_prep_slave_dma_array(), and adapt the code to
> work with the new functions introduced in industrialio-buffer-dma.c.
> ---
> .../buffer/industrialio-buffer-dmaengine.c | 69 ++++++++++++++++---
> include/linux/iio/buffer-dma.h | 2 +
> 2 files changed, 60 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> index 866c8b84bb24..faed9c2b089c 100644
> --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
> @@ -65,25 +65,68 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
> iio_buffer_to_dmaengine_buffer(&queue->buffer);
> struct dma_async_tx_descriptor *desc;
> enum dma_transfer_direction dma_dir;
> + unsigned int i, nents, *lenghts;
> + struct scatterlist *sgl;
> + unsigned long flags;
> + dma_addr_t *addrs;
> size_t max_size;
> dma_cookie_t cookie;
> + size_t len_total;
>
> - max_size = min(block->size, dmaengine_buffer->max_size);
> - max_size = round_down(max_size, dmaengine_buffer->align);
> + if (!block->bytes_used)
> + return -EINVAL;
>
> - if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
> - block->bytes_used = max_size;
> + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
> dma_dir = DMA_DEV_TO_MEM;
> - } else {
> + else
> dma_dir = DMA_MEM_TO_DEV;
> - }
>
> - if (!block->bytes_used || block->bytes_used > max_size)
> - return -EINVAL;
Ah this is dropping the code I moaned about earlier. I'll probably
forget though so maybe add a note to that patch saying it goes
away later anyway so I don't keep moaning about it in future versions.
> + if (block->sg_table) {
> + sgl = block->sg_table->sgl;
> + nents = sg_nents_for_len(sgl, block->bytes_used);
> +
> + addrs = kmalloc_array(nents, sizeof(*addrs), GFP_KERNEL);
> + if (!addrs)
> + return -ENOMEM;
> +
> + lenghts = kmalloc_array(nents, sizeof(*lenghts), GFP_KERNEL);
lengths?
> + if (!lenghts) {
> + kfree(addrs);
> + return -ENOMEM;
> + }
> +
> + len_total = block->bytes_used;
>
> - desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> - block->phys_addr, block->bytes_used, dma_dir,
> - DMA_PREP_INTERRUPT);
> + for (i = 0; i < nents; i++) {
> + addrs[i] = sg_dma_address(sgl);
> + lenghts[i] = min(sg_dma_len(sgl), len_total);
> + len_total -= lenghts[i];
> +
> + sgl = sg_next(sgl);
> + }
> +
> + flags = block->cyclic ? DMA_PREP_REPEAT : DMA_PREP_INTERRUPT;
> +
> + desc = dmaengine_prep_slave_dma_array(dmaengine_buffer->chan,
> + addrs, lenghts, nents,
> + dma_dir, flags);
> + kfree(addrs);
> + kfree(lenghts);
> + } else {
> + max_size = min(block->size, dmaengine_buffer->max_size);
> + max_size = round_down(max_size, dmaengine_buffer->align);
> +
> + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
> + block->bytes_used = max_size;
> +
> + if (block->bytes_used > max_size)
> + return -EINVAL;
> +
> + desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
> + block->phys_addr,
> + block->bytes_used, dma_dir,
> + DMA_PREP_INTERRUPT);
> + }
> diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
> index e5e5817e99db..48f7ffaf0867 100644
> --- a/include/linux/iio/buffer-dma.h
> +++ b/include/linux/iio/buffer-dma.h
> @@ -43,6 +43,7 @@ enum iio_block_state {
> * @queue: Parent DMA buffer queue
> * @kref: kref used to manage the lifetime of block
> * @state: Current state of the block
> + * @cyclic: True if this is a cyclic buffer
> * @fileio: True if this buffer is used for fileio mode
I might have commented on it earlier (I've lost track) but
attach should be documented as well. Worth sanity checking
by either building with W=1 or running kernel-doc over
the files and fixing the warnings.
> */
> struct iio_dma_buffer_block {
> @@ -67,6 +68,7 @@ struct iio_dma_buffer_block {
> */
> enum iio_block_state state;
>
> + bool cyclic;
> bool fileio;
>
> struct dma_buf_attachment *attach;
Powered by blists - more mailing lists