[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210810020254.103134-1-guangming.cao@mediatek.com>
Date: Tue, 10 Aug 2021 10:02:54 +0800
From: <guangming.cao@...iatek.com>
To: Sumit Semwal <sumit.semwal@...aro.org>,
Benjamin Gaignard <benjamin.gaignard@...aro.org>,
Liam Mark <lmark@...eaurora.org>,
Laura Abbott <labbott@...hat.com>,
Brian Starkey <Brian.Starkey@....com>,
John Stultz <john.stultz@...aro.org>,
Christian König <christian.koenig@....com>,
Matthias Brugger <matthias.bgg@...il.com>,
"open list:DMA-BUF HEAPS FRAMEWORK" <linux-media@...r.kernel.org>,
"open list:DMA-BUF HEAPS FRAMEWORK" <dri-devel@...ts.freedesktop.org>,
"moderated list:DMA-BUF HEAPS FRAMEWORK"
<linaro-mm-sig@...ts.linaro.org>,
open list <linux-kernel@...r.kernel.org>,
"moderated list:ARM/Mediatek SoC support"
<linux-arm-kernel@...ts.infradead.org>,
"moderated list:ARM/Mediatek SoC support"
<linux-mediatek@...ts.infradead.org>
CC: <wsd_upstream@...iatek.com>,
Guangming Cao <Guangming.Cao@...iatek.com>
Subject: [PATCH] dma_heap: enable map_attrs when (un)map_attachment
From: Guangming Cao <Guangming.Cao@...iatek.com>
For dma-heap users, they can't bypass cache sync when (un)map
iova with dma heap. But they can do it by adding
DMA_ATTR_SKIP_CPU_SYNC into dma_alloc_attrs.
To Keep alignment, add map_attrs support for dma_heap when
(un)map_attachment.
Signed-off-by: Guangming Cao <Guangming.Cao@...iatek.com>
---
drivers/dma-buf/heaps/cma_heap.c | 6 ++++--
drivers/dma-buf/heaps/system_heap.c | 6 ++++--
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0c05b79870f9..2c9feb3bfc3e 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -99,9 +99,10 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
+ int attrs = attachment->dma_map_attrs;
int ret;
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
if (ret)
return ERR_PTR(-ENOMEM);
a->mapped = true;
@@ -113,9 +114,10 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
+ int attrs = attachment->dma_map_attrs;
a->mapped = false;
- dma_unmap_sgtable(attachment->dev, table, direction, 0);
+ dma_unmap_sgtable(attachment->dev, table, direction, attrs);
}
static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 23a7e74ef966..fc7b1e02988e 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -130,9 +130,10 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = a->table;
+ int attrs = attachment->dma_map_attrs;
int ret;
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
if (ret)
return ERR_PTR(ret);
@@ -145,9 +146,10 @@ static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
+ int attrs = attachment->dma_map_attrs;
a->mapped = false;
- dma_unmap_sgtable(attachment->dev, table, direction, 0);
+ dma_unmap_sgtable(attachment->dev, table, direction, attrs);
}
static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
--
2.17.1
Powered by blists - more mailing lists