lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210120210937.15069-4-john.stultz@linaro.org>
Date:   Wed, 20 Jan 2021 21:09:37 +0000
From:   John Stultz <john.stultz@...aro.org>
To:     lkml <linux-kernel@...r.kernel.org>
Cc:     Bing Song <bing.song@....com>, Daniel Vetter <daniel@...ll.ch>,
        Sumit Semwal <sumit.semwal@...aro.org>,
        Liam Mark <lmark@...eaurora.org>,
        Laura Abbott <labbott@...nel.org>,
        Brian Starkey <Brian.Starkey@....com>,
        Hridya Valsaraju <hridya@...gle.com>,
        Suren Baghdasaryan <surenb@...gle.com>,
        Sandeep Patil <sspatil@...gle.com>,
        Daniel Mentz <danielmentz@...gle.com>,
        Chris Goldsworthy <cgoldswo@...eaurora.org>,
        Ørjan Eide <orjan.eide@....com>,
        Robin Murphy <robin.murphy@....com>,
        Ezequiel Garcia <ezequiel@...labora.com>,
        Simon Ser <contact@...rsion.fr>,
        James Jones <jajones@...dia.com>, linux-media@...r.kernel.org,
        dri-devel@...ts.freedesktop.org,
        John Stultz <john.stultz@...aro.org>
Subject: [PATCH 3/3] dma-buf: cma_heap: Add a cma-uncached heap re-using the cma heap

From: Bing Song <bing.song@....com>

This adds a heap that allocates CMA buffers that are
marked as writecombined, so they are not cached by the CPU.

Cc: Daniel Vetter <daniel@...ll.ch>
Cc: Sumit Semwal <sumit.semwal@...aro.org>
Cc: Liam Mark <lmark@...eaurora.org>
Cc: Laura Abbott <labbott@...nel.org>
Cc: Brian Starkey <Brian.Starkey@....com>
Cc: Hridya Valsaraju <hridya@...gle.com>
Cc: Suren Baghdasaryan <surenb@...gle.com>
Cc: Sandeep Patil <sspatil@...gle.com>
Cc: Daniel Mentz <danielmentz@...gle.com>
Cc: Chris Goldsworthy <cgoldswo@...eaurora.org>
Cc: Ørjan Eide <orjan.eide@....com>
Cc: Robin Murphy <robin.murphy@....com>
Cc: Ezequiel Garcia <ezequiel@...labora.com>
Cc: Simon Ser <contact@...rsion.fr>
Cc: James Jones <jajones@...dia.com>
Cc: linux-media@...r.kernel.org
Cc: dri-devel@...ts.freedesktop.org
Signed-off-by: Bing Song <bing.song@....com>
Signed-off-by: John Stultz <john.stultz@...aro.org>
---
 drivers/dma-buf/heaps/cma_heap.c | 119 +++++++++++++++++++++++++++----
 1 file changed, 107 insertions(+), 12 deletions(-)

diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 364fc2f3e499..1b8c6eb0a8ea 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -38,6 +38,7 @@ struct cma_heap_buffer {
 	pgoff_t pagecount;
 	int vmap_cnt;
 	void *vaddr;
+	bool uncached;
 };
 
 struct dma_heap_attachment {
@@ -45,6 +46,7 @@ struct dma_heap_attachment {
 	struct sg_table table;
 	struct list_head list;
 	bool mapped;
+	bool uncached;
 };
 
 static int cma_heap_attach(struct dma_buf *dmabuf,
@@ -70,6 +72,7 @@ static int cma_heap_attach(struct dma_buf *dmabuf,
 	a->dev = attachment->dev;
 	INIT_LIST_HEAD(&a->list);
 	a->mapped = false;
+	a->uncached = buffer->uncached;
 
 	attachment->priv = a;
 
@@ -99,8 +102,12 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme
 {
 	struct dma_heap_attachment *a = attachment->priv;
 	struct sg_table *table = &a->table;
+	int attr = 0;
 	int ret;
 
+	if (a->uncached)
+		attr = DMA_ATTR_SKIP_CPU_SYNC;
+
 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
 	if (ret)
 		return ERR_PTR(-ENOMEM);
@@ -113,7 +120,10 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
 				   enum dma_data_direction direction)
 {
 	struct dma_heap_attachment *a = attachment->priv;
+	int attr = 0;
 
+	if (a->uncached)
+		attr = DMA_ATTR_SKIP_CPU_SYNC;
 	a->mapped = false;
 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
 }
@@ -128,10 +138,12 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
 
 	mutex_lock(&buffer->lock);
-	list_for_each_entry(a, &buffer->attachments, list) {
-		if (!a->mapped)
-			continue;
-		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+	if (!buffer->uncached) {
+		list_for_each_entry(a, &buffer->attachments, list) {
+			if (!a->mapped)
+				continue;
+			dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+		}
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -148,10 +160,12 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
 
 	mutex_lock(&buffer->lock);
-	list_for_each_entry(a, &buffer->attachments, list) {
-		if (!a->mapped)
-			continue;
-		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+	if (!buffer->uncached) {
+		list_for_each_entry(a, &buffer->attachments, list) {
+			if (!a->mapped)
+				continue;
+			dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+		}
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -183,6 +197,9 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
 		return -EINVAL;
 
+	if (buffer->uncached)
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
 	vma->vm_ops = &dma_heap_vm_ops;
 	vma->vm_private_data = buffer;
 
@@ -191,9 +208,13 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 
 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
 {
+	pgprot_t pgprot = PAGE_KERNEL;
 	void *vaddr;
 
-	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+	if (buffer->uncached)
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
 	if (!vaddr)
 		return ERR_PTR(-ENOMEM);
 
@@ -271,10 +292,11 @@ static const struct dma_buf_ops cma_heap_buf_ops = {
 	.release = cma_heap_dma_buf_release,
 };
 
-static int cma_heap_allocate(struct dma_heap *heap,
+static int cma_heap_do_allocate(struct dma_heap *heap,
 				  unsigned long len,
 				  unsigned long fd_flags,
-				  unsigned long heap_flags)
+				  unsigned long heap_flags,
+				  bool uncached)
 {
 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
 	struct cma_heap_buffer *buffer;
@@ -283,8 +305,9 @@ static int cma_heap_allocate(struct dma_heap *heap,
 	pgoff_t pagecount = size >> PAGE_SHIFT;
 	unsigned long align = get_order(size);
 	struct page *cma_pages;
+	struct sg_table table;
 	struct dma_buf *dmabuf;
-	int ret = -ENOMEM;
+	int ret = -ENOMEM, ret_sg_table;
 	pgoff_t pg;
 
 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -294,6 +317,7 @@ static int cma_heap_allocate(struct dma_heap *heap,
 	INIT_LIST_HEAD(&buffer->attachments);
 	mutex_init(&buffer->lock);
 	buffer->len = size;
+	buffer->uncached = uncached;
 
 	if (align > CONFIG_CMA_ALIGNMENT)
 		align = CONFIG_CMA_ALIGNMENT;
@@ -356,6 +380,18 @@ static int cma_heap_allocate(struct dma_heap *heap,
 		return ret;
 	}
 
+	if (buffer->uncached) {
+		ret_sg_table = sg_alloc_table(&table, 1, GFP_KERNEL);
+		if (ret_sg_table)
+			return ret_sg_table;
+
+		sg_set_page(table.sgl, cma_pages, size, 0);
+
+		dma_map_sgtable(dma_heap_get_dev(heap), &table, DMA_BIDIRECTIONAL, 0);
+		dma_unmap_sgtable(dma_heap_get_dev(heap), &table, DMA_BIDIRECTIONAL, 0);
+		sg_free_table(&table);
+	}
+
 	return ret;
 
 free_pages:
@@ -368,14 +404,45 @@ static int cma_heap_allocate(struct dma_heap *heap,
 	return ret;
 }
 
+static int cma_heap_allocate(struct dma_heap *heap,
+				  unsigned long len,
+				  unsigned long fd_flags,
+				  unsigned long heap_flags)
+{
+	return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
+}
+
+static int cma_uncached_heap_allocate(struct dma_heap *heap,
+				  unsigned long len,
+				  unsigned long fd_flags,
+				  unsigned long heap_flags)
+{
+	return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
+}
+
+/* Dummy function to be used until we can call coerce_mask_and_coherent */
+static int cma_uncached_heap_not_initialized(struct dma_heap *heap,
+						unsigned long len,
+						unsigned long fd_flags,
+						unsigned long heap_flags)
+{
+	return -EBUSY;
+}
+
 static const struct dma_heap_ops cma_heap_ops = {
 	.allocate = cma_heap_allocate,
 };
 
+static struct dma_heap_ops cma_uncached_heap_ops = {
+	.allocate = cma_uncached_heap_not_initialized,
+};
+
 static int __add_cma_heap(struct cma *cma, void *data)
 {
 	struct cma_heap *cma_heap;
 	struct dma_heap_export_info exp_info;
+	const char *postfixed = "-uncached";
+	char *cma_name;
 
 	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
 	if (!cma_heap)
@@ -394,6 +461,34 @@ static int __add_cma_heap(struct cma *cma, void *data)
 		return ret;
 	}
 
+	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+	if (!cma_heap)
+		return -ENOMEM;
+	cma_heap->cma = cma;
+
+	cma_name = kzalloc(strlen(cma_get_name(cma)) + strlen(postfixed) + 1, GFP_KERNEL);
+	if (!cma_name) {
+		kfree(cma_heap);
+		return -ENOMEM;
+	}
+
+	exp_info.name = strcat(strcpy(cma_name, cma_get_name(cma)), postfixed);
+	exp_info.ops = &cma_uncached_heap_ops;
+	exp_info.priv = cma_heap;
+
+	cma_heap->heap = dma_heap_add(&exp_info);
+	if (IS_ERR(cma_heap->heap)) {
+		int ret = PTR_ERR(cma_heap->heap);
+
+		kfree(cma_heap);
+		kfree(cma_name);
+		return ret;
+	}
+
+	dma_coerce_mask_and_coherent(dma_heap_get_dev(cma_heap->heap), DMA_BIT_MASK(64));
+	mb(); /* make sure we only set allocate after dma_mask is set */
+	cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate;
+
 	return 0;
 }
 
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ