[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190612012219.21652-1-nicoleotsuka@gmail.com>
Date: Tue, 11 Jun 2019 18:22:19 -0700
From: Nicolin Chen <nicoleotsuka@...il.com>
To: sumit.semwal@...aro.org, christian.koenig@....com
Cc: linux-media@...r.kernel.org, dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org, linux-kernel@...r.kernel.org,
daniel.vetter@...ll.ch
Subject: [PATCH] dma-buf: refcount the attachment for cache_sgt_mapping
Commit f13e143e7444 ("dma-buf: start caching of sg_table objects v2")
added a support of caching the sgt pointer into an attach pointer to
let users reuse the sgt pointer without another mapping. However, it
might not totally work as most of dma-buf callers are doing attach()
and map_attachment() back-to-back, using drm_prime.c for example:
drm_gem_prime_import_dev() {
attach = dma_buf_attach() {
/* Allocating a new attach */
attach = kzalloc();
/* .... */
return attach;
}
dma_buf_map_attachment(attach, direction) {
/* attach->sgt would be always empty as attach is new */
if (attach->sgt) {
/* Reuse attach->sgt */
}
/* Otherwise, map it */
attach->sgt = map();
}
}
So, for a cache_sgt_mapping use case, it would need to get the same
attachment pointer in order to reuse its sgt pointer. So this patch
adds a refcount to the attach() function and lets it search for the
existing attach pointer by matching the dev pointer.
Signed-off-by: Nicolin Chen <nicoleotsuka@...il.com>
---
drivers/dma-buf/dma-buf.c | 23 +++++++++++++++++++++++
include/linux/dma-buf.h | 2 ++
2 files changed, 25 insertions(+)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f4104a21b069..d0260553a31c 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -559,6 +559,21 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
+ /* cache_sgt_mapping requires to reuse the same attachment pointer */
+ if (dmabuf->ops->cache_sgt_mapping) {
+ mutex_lock(&dmabuf->lock);
+
+ /* Search for existing attachment and increase its refcount */
+ list_for_each_entry(attach, &dmabuf->attachments, node) {
+ if (dev != attach->dev)
+ continue;
+ atomic_inc_not_zero(&attach->refcount);
+ goto unlock_attach;
+ }
+
+ mutex_unlock(&dmabuf->lock);
+ }
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return ERR_PTR(-ENOMEM);
@@ -575,6 +590,9 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
list_add(&attach->node, &dmabuf->attachments);
+ atomic_set(&attach->refcount, 1);
+
+unlock_attach:
mutex_unlock(&dmabuf->lock);
return attach;
@@ -599,6 +617,11 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
+ /* Decrease the refcount for cache_sgt_mapping use cases */
+ if (dmabuf->ops->cache_sgt_mapping &&
+ atomic_dec_return(&attach->refcount))
+ return;
+
if (attach->sgt)
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8a327566d7f4..65f12212ca2e 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -333,6 +333,7 @@ struct dma_buf {
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment.
* @sgt: cached mapping.
+ * @refcount: refcount of the attachment for the same device.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
*
@@ -350,6 +351,7 @@ struct dma_buf_attachment {
struct device *dev;
struct list_head node;
struct sg_table *sgt;
+ atomic_t refcount;
enum dma_data_direction dir;
void *priv;
};
--
2.17.1
Powered by blists - more mailing lists