lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <219abc43b4fdd4a13b307ed2efaa0e6869e68e3f.camel@gmail.com>
Date: Fri, 22 Dec 2023 09:56:39 +0100
From: Nuno Sá <noname.nuno@...il.com>
To: Jonathan Cameron <jic23@...nel.org>, Paul Cercueil <paul@...pouillou.net>
Cc: Lars-Peter Clausen <lars@...afoo.de>, Sumit Semwal
 <sumit.semwal@...aro.org>,  Christian König
 <christian.koenig@....com>, Vinod Koul <vkoul@...nel.org>, Jonathan Corbet
 <corbet@....net>, linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org, 
 dmaengine@...r.kernel.org, linux-iio@...r.kernel.org, 
 linux-media@...r.kernel.org, dri-devel@...ts.freedesktop.org, 
 linaro-mm-sig@...ts.linaro.org, Michael Hennerich
 <Michael.Hennerich@...log.com>
Subject: Re: [PATCH v5 6/8] iio: buffer-dma: Enable support for DMABUFs

On Thu, 2023-12-21 at 16:04 +0000, Jonathan Cameron wrote:
> On Tue, 19 Dec 2023 18:50:07 +0100
> Paul Cercueil <paul@...pouillou.net> wrote:
> 
> > Implement iio_dma_buffer_attach_dmabuf(), iio_dma_buffer_detach_dmabuf()
> > and iio_dma_buffer_transfer_dmabuf(), which can then be used by the IIO
> > DMA buffer implementations.
> > 
> > Signed-off-by: Paul Cercueil <paul@...pouillou.net>
> > 
> Hi Paul,
> 
> A few comments in here. Mostly places where the cleanup.h guard() stuff
> can simplify error paths.
> 
> Overall this looks reasonable to me.
> 
> Jonathan
> 
> > ---
> > v3: Update code to provide the functions that will be used as callbacks
> >     for the new IOCTLs.
> > ---
> >  drivers/iio/buffer/industrialio-buffer-dma.c | 157 +++++++++++++++++--
> >  include/linux/iio/buffer-dma.h               |  26 +++
> >  2 files changed, 170 insertions(+), 13 deletions(-)
> > 
> > diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c
> > b/drivers/iio/buffer/industrialio-buffer-dma.c
> > index 5610ba67925e..adb64f975064 100644
> > --- a/drivers/iio/buffer/industrialio-buffer-dma.c
> > +++ b/drivers/iio/buffer/industrialio-buffer-dma.c
> > @@ -14,6 +14,7 @@
> >  #include <linux/poll.h>
> >  #include <linux/iio/buffer_impl.h>
> >  #include <linux/iio/buffer-dma.h>
> > +#include <linux/dma-buf.h>
> >  #include <linux/dma-mapping.h>
> >  #include <linux/sizes.h>
> >  
> > @@ -94,14 +95,24 @@ static void iio_buffer_block_release(struct kref *kref)
> >  {
> >         struct iio_dma_buffer_block *block = container_of(kref,
> >                 struct iio_dma_buffer_block, kref);
> > +       struct iio_dma_buffer_queue *queue = block->queue;
> >  
> > -       WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
> > +       WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
> >  
> > -       dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
> > -                                       block->vaddr, block->phys_addr);
> > +       mutex_lock(&queue->lock);
> >  
> > -       iio_buffer_put(&block->queue->buffer);
> > +       if (block->fileio) {
> > +               dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
> > +                                 block->vaddr, block->phys_addr);
> > +               queue->num_fileio_blocks--;
> > +       }
> > +
> > +       queue->num_blocks--;
> >         kfree(block);
> > +
> > +       mutex_unlock(&queue->lock);
> > +
> > +       iio_buffer_put(&queue->buffer);
> >  }
> >  
> >  static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
> > @@ -163,7 +174,7 @@ static struct iio_dma_buffer_queue
> > *iio_buffer_to_queue(struct iio_buffer *buf)
> >  }
> >  
> >  static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
> > -       struct iio_dma_buffer_queue *queue, size_t size)
> > +       struct iio_dma_buffer_queue *queue, size_t size, bool fileio)
> >  {
> >         struct iio_dma_buffer_block *block;
> >  
> > @@ -171,13 +182,16 @@ static struct iio_dma_buffer_block
> > *iio_dma_buffer_alloc_block(
> >         if (!block)
> >                 return NULL;
> >  
> > -       block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> > -               &block->phys_addr, GFP_KERNEL);
> > -       if (!block->vaddr) {
> > -               kfree(block);
> > -               return NULL;
> > +       if (fileio) {
> > +               block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
> > +                                                 &block->phys_addr, GFP_KERNEL);
> > +               if (!block->vaddr) {
> > +                       kfree(block);
> > +                       return NULL;
> > +               }
> >         }
> >  
> > +       block->fileio = fileio;
> >         block->size = size;
> >         block->state = IIO_BLOCK_STATE_DONE;
> >         block->queue = queue;
> > @@ -186,6 +200,9 @@ static struct iio_dma_buffer_block
> > *iio_dma_buffer_alloc_block(
> >  
> >         iio_buffer_get(&queue->buffer);
> >  
> > +       queue->num_blocks++;
> > +       queue->num_fileio_blocks += fileio;
> Adding a boolean is non intuitive.
> 
>         if (fileio)
>                 queue->num_fileio_blocks++;
> 
> probably easier to read and compiler should be able to figure out how to
> optimise the condition away.
> 
> > +
> >         return block;
> >  }
> >  
> > @@ -211,6 +228,9 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block
> > *block)
> >         _iio_dma_buffer_block_done(block);
> >         spin_unlock_irqrestore(&queue->list_lock, flags);
> >  
> > +       if (!block->fileio)
> > +               iio_buffer_signal_dmabuf_done(block->attach, 0);
> > +
> >         iio_buffer_block_put_atomic(block);
> >         wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
> >  }
> > @@ -237,10 +257,14 @@ void iio_dma_buffer_block_list_abort(struct
> > iio_dma_buffer_queue *queue,
> >                 list_del(&block->head);
> >                 block->bytes_used = 0;
> >                 _iio_dma_buffer_block_done(block);
> > +
> > +               if (!block->fileio)
> > +                       iio_buffer_signal_dmabuf_done(block->attach, -EINTR);
> >                 iio_buffer_block_put_atomic(block);
> >         }
> >         spin_unlock_irqrestore(&queue->list_lock, flags);
> >  
> > +       queue->fileio.enabled = false;
> 
> While this obviously doesn't need to be conditional if it can already be false
> it might be easier to follow the code flow it if didn't check if we were doing
> fileio or not before disabling it.
> 
> >         wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
> >  }
> >  EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
> > @@ -261,6 +285,12 @@ static bool iio_dma_block_reusable(struct
> > iio_dma_buffer_block *block)
> >         }
> >  }
> >  
> > +static bool iio_dma_buffer_fileio_mode(struct iio_dma_buffer_queue *queue)
> > +{
> > +       return queue->fileio.enabled ||
> > +               queue->num_blocks == queue->num_fileio_blocks;
> This is a little odd. So would be good have a comment on why this condition.
> Or rename the function to imply it's checking if enabled, or can be enabled.
> 
> At first glanced I expected a function with this name to just be an accessor
> function. e.g.
>         return queue->fileio.enabled;
> 
> > +}
> > +
> >  /**
> >   * iio_dma_buffer_request_update() - DMA buffer request_update callback
> >   * @buffer: The buffer which to request an update
> > @@ -287,6 +317,12 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
> >  
> >         mutex_lock(&queue->lock);
> >  
> > +       queue->fileio.enabled = iio_dma_buffer_fileio_mode(queue);
> > +
> > +       /* If DMABUFs were created, disable fileio interface */
> > +       if (!queue->fileio.enabled)
> > +               goto out_unlock;
> > +
> >         /* Allocations are page aligned */
> >         if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
> >                 try_reuse = true;
> > @@ -317,7 +353,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
> >                         block = queue->fileio.blocks[i];
> >                         if (block->state == IIO_BLOCK_STATE_DEAD) {
> >                                 /* Could not reuse it */
> > -                               iio_buffer_block_put(block);
> > +                               iio_buffer_block_put_atomic(block);
> >                                 block = NULL;
> >                         } else {
> >                                 block->size = size;
> > @@ -327,7 +363,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
> >                 }
> >  
> >                 if (!block) {
> > -                       block = iio_dma_buffer_alloc_block(queue, size);
> > +                       block = iio_dma_buffer_alloc_block(queue, size, true);
> >                         if (!block) {
> >                                 ret = -ENOMEM;
> >                                 goto out_unlock;
> > @@ -363,7 +399,7 @@ static void iio_dma_buffer_fileio_free(struct
> > iio_dma_buffer_queue *queue)
> >         for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
> >                 if (!queue->fileio.blocks[i])
> >                         continue;
> > -               iio_buffer_block_put(queue->fileio.blocks[i]);
> > +               iio_buffer_block_put_atomic(queue->fileio.blocks[i]);
> 
> For these cases that are atomic or not, it might be worth calling out why they need
> to be
> atomic.
> 
> >                 queue->fileio.blocks[i] = NULL;
> >         }
> >         queue->fileio.active_block = NULL;
> > @@ -384,8 +420,12 @@ static void iio_dma_buffer_submit_block(struct
> > iio_dma_buffer_queue *queue,
> >  
> >         block->state = IIO_BLOCK_STATE_ACTIVE;
> >         iio_buffer_block_get(block);
> > +
> >         ret = queue->ops->submit(queue, block);
> >         if (ret) {
> > +               if (!block->fileio)
> > +                       iio_buffer_signal_dmabuf_done(block->attach, ret);
> > +
> >                 /*
> >                  * This is a bit of a problem and there is not much we can do
> >                  * other then wait for the buffer to be disabled and re-enabled
> > @@ -588,6 +628,97 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
> >  }
> >  EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
> >  
> > +struct iio_dma_buffer_block *
> > +iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
> > +                            struct dma_buf_attachment *attach)
> > +{
> > +       struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
> > +       struct iio_dma_buffer_block *block;
> > +       int err;
> > +
> > +       mutex_lock(&queue->lock);
> 
>         guard(mutex)(&queue->lock);

I'm also a big fan of this new stuff but shouldn't we have a prep patch making the
transition to that? Otherwise we'll end up with a mix of styles. I'm happy to clean
up stuff with follow up patches (even some coding style could be improved IIRC) but
typically that's not how we handle things like this I believe...

- Nuno Sá
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ