[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200302041213.27662-11-senozhatsky@chromium.org>
Date: Mon, 2 Mar 2020 13:12:12 +0900
From: Sergey Senozhatsky <senozhatsky@...omium.org>
To: Hans Verkuil <hans.verkuil@...co.com>,
Tomasz Figa <tfiga@...omium.org>
Cc: Mauro Carvalho Chehab <mchehab@...nel.org>,
Kyungmin Park <kyungmin.park@...sung.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Sakari Ailus <sakari.ailus@....fi>,
Laurent Pinchart <laurent.pinchart@...asonboard.com>,
Pawel Osciak <posciak@...omium.org>,
linux-media@...r.kernel.org, linux-kernel@...r.kernel.org,
Sergey Senozhatsky <senozhatsky@...omium.org>
Subject: [PATCHv4 10/11] videobuf2: add begin/end cpu_access callbacks to dma-sg
Provide begin_cpu_access() and end_cpu_access() dma_buf_ops
callbacks for cache synchronisation on exported buffers.
V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
dma-sg allocates memory using the page allocator directly, so
there is no memory consistency guarantee.
Signed-off-by: Sergey Senozhatsky <senozhatsky@...omium.org>
---
.../media/common/videobuf2/videobuf2-dma-sg.c | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 6db60e9d5183..ddc67c9aaedb 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -120,6 +120,12 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->num_pages = size >> PAGE_SHIFT;
buf->dma_sgt = &buf->sg_table;
+ /*
+ * NOTE: dma-sg allocates memory using the page allocator directly, so
+ * there is no memory consistency guarantee, hence dma-sg ignores DMA
+ * attributes passed from the upper layer. That means that
+ * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
+ */
buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!buf->pages)
@@ -470,6 +476,26 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_dma_sg_put(dbuf->priv);
}
+static int vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ return 0;
+}
+
+static int vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ return 0;
+}
+
static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -488,6 +514,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.detach = vb2_dma_sg_dmabuf_ops_detach,
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+ .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
+ .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,
--
2.25.0.265.gbab2e86ba0-goog
Powered by blists - more mailing lists