[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190823234747.106510-5-john.stultz@linaro.org>
Date: Fri, 23 Aug 2019 23:47:46 +0000
From: John Stultz <john.stultz@...aro.org>
To: lkml <linux-kernel@...r.kernel.org>
Cc: John Stultz <john.stultz@...aro.org>,
Laura Abbott <labbott@...hat.com>,
Benjamin Gaignard <benjamin.gaignard@...aro.org>,
Sumit Semwal <sumit.semwal@...aro.org>,
Liam Mark <lmark@...eaurora.org>,
Pratik Patel <pratikp@...eaurora.org>,
Brian Starkey <Brian.Starkey@....com>,
Vincent Donnefort <Vincent.Donnefort@....com>,
Sudipto Paul <Sudipto.Paul@....com>,
"Andrew F . Davis" <afd@...com>,
Christoph Hellwig <hch@...radead.org>,
Chenbo Feng <fengc@...gle.com>,
Alistair Strachan <astrachan@...gle.com>,
Hridya Valsaraju <hridya@...gle.com>,
dri-devel@...ts.freedesktop.org
Subject: [PATCH v8 4/5] dma-buf: heaps: Add CMA heap to dmabuf heaps
This adds a CMA heap, which allows userspace to allocate
a dma-buf of contiguous memory out of a CMA region.
This code is an evolution of the Android ION implementation, so
thanks to its original author and maintainters:
Benjamin Gaignard, Laura Abbott, and others!
Cc: Laura Abbott <labbott@...hat.com>
Cc: Benjamin Gaignard <benjamin.gaignard@...aro.org>
Cc: Sumit Semwal <sumit.semwal@...aro.org>
Cc: Liam Mark <lmark@...eaurora.org>
Cc: Pratik Patel <pratikp@...eaurora.org>
Cc: Brian Starkey <Brian.Starkey@....com>
Cc: Vincent Donnefort <Vincent.Donnefort@....com>
Cc: Sudipto Paul <Sudipto.Paul@....com>
Cc: Andrew F. Davis <afd@...com>
Cc: Christoph Hellwig <hch@...radead.org>
Cc: Chenbo Feng <fengc@...gle.com>
Cc: Alistair Strachan <astrachan@...gle.com>
Cc: Hridya Valsaraju <hridya@...gle.com>
Cc: dri-devel@...ts.freedesktop.org
Reviewed-by: Benjamin Gaignard <benjamin.gaignard@...aro.org>
Signed-off-by: John Stultz <john.stultz@...aro.org>
---
v2:
* Switch allocate to return dmabuf fd
* Simplify init code
* Checkpatch fixups
v3:
* Switch to inline function for to_cma_heap()
* Minor cleanups suggested by Brian
* Fold in new registration style from Andrew
* Folded in changes from Andrew to use simplified page list
from the heap helpers
v4:
* Use the fd_flags when creating dmabuf fd (Suggested by
Benjamin)
* Use precalculated pagecount (Suggested by Andrew)
v6:
* Changed variable names to improve clarity, as suggested
by Brian
v7:
* Use newly lower-cased init_heap_helper_buffer helper
* Use new dmabuf export helper
v8:
* Make struct dma_heap_ops const (Suggested by Christoph)
* Condense dma_heap_buffer and heap_helper_buffer (suggested by
Christoph)
* Checkpatch whitespace fixups
---
drivers/dma-buf/heaps/Kconfig | 8 ++
drivers/dma-buf/heaps/Makefile | 1 +
drivers/dma-buf/heaps/cma_heap.c | 164 +++++++++++++++++++++++++++++++
3 files changed, 173 insertions(+)
create mode 100644 drivers/dma-buf/heaps/cma_heap.c
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 205052744169..a5eef06c4226 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM
help
Choose this option to enable the system dmabuf heap. The system heap
is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config DMABUF_HEAPS_CMA
+ bool "DMA-BUF CMA Heap"
+ depends on DMABUF_HEAPS && DMA_CMA
+ help
+ Choose this option to enable dma-buf CMA heap. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index d1808eca2581..6e54cdec3da0 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += heap-helpers.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
new file mode 100644
index 000000000000..b8f67b7c6a5c
--- /dev/null
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF CMA heap exporter
+ *
+ * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Author: <benjamin.gaignard@...aro.org> for ST-Ericsson.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/cma.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+
+#include "heap-helpers.h"
+
+struct cma_heap {
+ struct dma_heap *heap;
+ struct cma *cma;
+};
+
+static void cma_heap_free(struct heap_helper_buffer *buffer)
+{
+ struct cma_heap *cma_heap = dma_heap_get_data(buffer->heap);
+ unsigned long nr_pages = buffer->pagecount;
+ struct page *cma_pages = buffer->priv_virt;
+
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+ kfree(buffer);
+}
+
+/* dmabuf heap CMA operations functions */
+static int cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct cma_heap *cma_heap = dma_heap_get_data(heap);
+ struct heap_helper_buffer *helper_buffer;
+ struct page *cma_pages;
+ size_t size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
+ if (!helper_buffer)
+ return -ENOMEM;
+
+ init_heap_helper_buffer(helper_buffer, cma_heap_free);
+ helper_buffer->flags = heap_flags;
+ helper_buffer->heap = heap;
+ helper_buffer->size = len;
+
+ cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+ if (!cma_pages)
+ goto free_buf;
+
+ if (PageHighMem(cma_pages)) {
+ unsigned long nr_clear_pages = nr_pages;
+ struct page *page = cma_pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(cma_pages), 0, size);
+ }
+
+ helper_buffer->pagecount = nr_pages;
+ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
+ sizeof(*helper_buffer->pages),
+ GFP_KERNEL);
+ if (!helper_buffer->pages) {
+ ret = -ENOMEM;
+ goto free_cma;
+ }
+
+ for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ helper_buffer->pages[pg] = &cma_pages[pg];
+ if (!helper_buffer->pages[pg])
+ goto free_pages;
+ }
+
+ /* create the dmabuf */
+ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pages;
+ }
+
+ helper_buffer->dmabuf = dmabuf;
+ helper_buffer->priv_virt = cma_pages;
+
+ ret = dma_buf_fd(dmabuf, fd_flags);
+ if (ret < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ return ret;
+ }
+
+ return ret;
+
+free_pages:
+ kfree(helper_buffer->pages);
+free_cma:
+ cma_release(cma_heap->cma, cma_pages, nr_pages);
+free_buf:
+ kfree(helper_buffer);
+ return ret;
+}
+
+static const struct dma_heap_ops cma_heap_ops = {
+ .allocate = cma_heap_allocate,
+};
+
+static int __add_cma_heap(struct cma *cma, void *data)
+{
+ struct cma_heap *cma_heap;
+ struct dma_heap_export_info exp_info;
+
+ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+ if (!cma_heap)
+ return -ENOMEM;
+ cma_heap->cma = cma;
+
+ exp_info.name = cma_get_name(cma);
+ exp_info.ops = &cma_heap_ops;
+ exp_info.priv = cma_heap;
+
+ cma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(cma_heap->heap)) {
+ int ret = PTR_ERR(cma_heap->heap);
+
+ kfree(cma_heap);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int add_cma_heaps(void)
+{
+ cma_for_each_area(__add_cma_heap, NULL);
+ return 0;
+}
+device_initcall(add_cma_heaps);
--
2.17.1
Powered by blists - more mailing lists