lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220917072356.2255620-3-jiho.chu@samsung.com>
Date:   Sat, 17 Sep 2022 16:23:45 +0900
From:   Jiho Chu <jiho.chu@...sung.com>
To:     gregkh@...uxfoundation.org, arnd@...db.de, ogabbay@...nel.org,
        krzysztof.kozlowski@...aro.org, broonie@...nel.org
Cc:     linux-kernel@...r.kernel.org, yelini.jeong@...sung.com,
        myungjoo.ham@...sung.com, jiho.chu@...sung.com
Subject: [PATCH v2 02/13] tirnity: Add memory module

This patch includes DMA memory modules.

It handles DMA buffer using general DMA APIs with reserved
area for NPU device. The reserved area is used for IDU binary,
statistics and profiling data. Another hwmem modules are
provided to handle user requests of memory allocation which
are used for model data or inputa data.

Signed-off-by: Jiho Chu <jiho.chu@...sung.com>
Signed-off-by: Yelin Jeong <yelini.jeong@...sung.com>
Signed-off-by: Dongju Chae <dongju.chae@...sung.com>
Signed-off-by: MyungJoo Ham <myungjoo.ham@...sung.com>
---
 drivers/misc/trinity/Makefile        |   1 +
 drivers/misc/trinity/trinity.c       |   1 +
 drivers/misc/trinity/trinity_dma.c   |  83 ++++++
 drivers/misc/trinity/trinity_dma.h   |  87 ++++++
 drivers/misc/trinity/trinity_hwmem.c | 380 +++++++++++++++++++++++++++
 drivers/misc/trinity/trinity_hwmem.h |  81 ++++++
 6 files changed, 633 insertions(+)
 create mode 100644 drivers/misc/trinity/trinity_dma.c
 create mode 100644 drivers/misc/trinity/trinity_dma.h
 create mode 100644 drivers/misc/trinity/trinity_hwmem.c
 create mode 100644 drivers/misc/trinity/trinity_hwmem.h

diff --git a/drivers/misc/trinity/Makefile b/drivers/misc/trinity/Makefile
index a8e5697d6d85..5d2b75112482 100644
--- a/drivers/misc/trinity/Makefile
+++ b/drivers/misc/trinity/Makefile
@@ -3,5 +3,6 @@
 obj-$(CONFIG_TRINITY_VISION2) += trinity_vision2.o
 
 trinity-y := trinity.o
+trinity-y += trinity_dma.o trinity_hwmem.o
 
 trinity_vision2-objs := $(trinity-y) trinity_vision2_drv.o
diff --git a/drivers/misc/trinity/trinity.c b/drivers/misc/trinity/trinity.c
index 1704eecfc439..3a492eef011f 100644
--- a/drivers/misc/trinity/trinity.c
+++ b/drivers/misc/trinity/trinity.c
@@ -11,6 +11,7 @@
  */
 
 #include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
 
 #include "trinity_common.h"
 
diff --git a/drivers/misc/trinity/trinity_dma.c b/drivers/misc/trinity/trinity_dma.c
new file mode 100644
index 000000000000..c93a9187ee9d
--- /dev/null
+++ b/drivers/misc/trinity/trinity_dma.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * DMA memory for Trinity device drivers
+ *
+ * Copyright (C) 2022 Samsung Electronics
+ * Copyright (C) 2022 Jiho Chu <jiho.chu@...sung.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/of_reserved_mem.h>
+
+#include "trinity_dma.h"
+
+int trinity_dma_init(struct device *dev)
+{
+	return of_reserved_mem_device_init(dev);
+}
+
+void trinity_dma_exit(struct device *dev)
+{
+	of_reserved_mem_device_release(dev);
+}
+
+int trinity_dma_alloc(struct device *dev, size_t size, struct trinity_dma *dma)
+{
+	void *addr;
+	dma_addr_t dma_handle;
+	size_t aligned_size;
+
+	aligned_size = PAGE_ALIGN(size);
+	addr = dma_alloc_noncoherent(dev, aligned_size, &dma_handle,
+				DMA_BIDIRECTIONAL, GFP_ATOMIC);
+
+	if (!addr) {
+		dev_err(dev, "Failed to alloc DMA memory");
+		return -1;
+	}
+
+	dma->addr = addr;
+	dma->dma_handle = dma_handle;
+	dma->size = aligned_size;
+
+	memset(addr, '\x00', aligned_size);
+	return 0;
+}
+
+void trinity_dma_free(struct device *dev, struct trinity_dma *dma)
+{
+	dma_free_noncoherent(dev, dma->size, dma->addr, dma->dma_handle,
+			DMA_BIDIRECTIONAL);
+}
+
+int trinity_dma_alloc_coherent(struct device *dev, size_t size, struct trinity_dma *dma)
+{
+	void *addr;
+	dma_addr_t dma_handle;
+	size_t aligned_size;
+
+	aligned_size = PAGE_ALIGN(size);
+	addr = dma_alloc_wc(dev, aligned_size, &dma_handle, GFP_KERNEL);
+	if (!addr) {
+		dev_err(dev, "Failed to alloc DMA memory");
+		return -1;
+	}
+
+	dma->addr = addr;
+	dma->dma_handle = dma_handle;
+	dma->size = aligned_size;
+
+	memset(addr, '\x00', aligned_size);
+	return 0;
+}
+
+void trinity_dma_free_coherent(struct device *dev, struct trinity_dma *dma)
+{
+	dma_free_wc(dev, dma->size, dma->addr, dma->dma_handle);
+}
+
+int trinity_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *addr,
+			dma_addr_t dma_handle, size_t size)
+{
+	return dma_mmap_wc(dev, vma, addr, dma_handle, size);
+}
diff --git a/drivers/misc/trinity/trinity_dma.h b/drivers/misc/trinity/trinity_dma.h
new file mode 100644
index 000000000000..a7824196ff15
--- /dev/null
+++ b/drivers/misc/trinity/trinity_dma.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/**
+ * DMA memory for Trinity device drivers
+ *
+ * Copyright (C) 2022 Samsung Electronics
+ * Copyright (C) 2022 Jiho Chu <jiho.chu@...sung.com>
+ */
+
+#ifndef __DRIVERS_MISC_TRINITY_DMA_H__
+#define __DRIVERS_MISC_TRINITY_DMA_H__
+
+/**
+ * struct trinity_dma - A data structure for DMA information
+ *
+ * @addr: A virtual address of memory
+ * @dma_handle: a handle for DMA
+ * @size: size of DMA memory
+ */
+struct trinity_dma {
+	void *addr;
+	dma_addr_t dma_handle;
+	size_t size;
+};
+
+/**
+ * trinity_dma_init() - Initialize DMA memory
+ * @dev: A pointer to the instance of the device
+ *
+ * Return: 0 on success. Otherwise, returns negative error.
+ */
+int trinity_dma_init(struct device *dev);
+
+/**
+ * trinity_dma_exit() - Allocate DMA memory
+ * @dev: A pointer to the instance of the device
+ * @fd: A file descriptor for a allocated memory
+ */
+void trinity_dma_exit(struct device *dev);
+
+/**
+ * trinity_dma_alloc() - Allocate DMA memory
+ * @dev: A pointer to the instance of the device
+ * @size: size of DMA memory
+ * @dma: A structure of DMA information
+ *
+ * Return: 0 on success. Otherwise, returns negative error.
+ */
+int trinity_dma_alloc(struct device *dev, size_t size, struct trinity_dma *dma);
+
+/**
+ * trinity_dma_free() - Free DMA memory
+ * @dev: A pointer to the instance of the device
+ * @dma: A structure of DMA information
+ */
+void trinity_dma_free(struct device *dev, struct trinity_dma *dma);
+
+/**
+ * trinity_dma_alloc_coherent() - Allocate coherent DMA memory
+ * @dev: A pointer to the instance of the device
+ * @size: size of DMA memory
+ * @dma: A structure of DMA information
+ *
+ * Return: 0 on success. Otherwise, returns negative error.
+ */
+int trinity_dma_alloc_coherent(struct device *dev, size_t size, struct trinity_dma *dma);
+
+/**
+ * trinity_dma_free_coherent() - Free coherent DMA memory
+ * @dev: A pointer to the instance of the device
+ * @dma: A structure of DMA information
+ */
+void trinity_dma_free_coherent(struct device *dev, struct trinity_dma *dma);
+
+/**
+ * trinity_dma_mmap() - mmap for DMA memory
+ * @dev: A pointer to the instance of the device
+ * @vma: A struct for virtual memory area
+ * @addr: virtual address of memory
+ * @dma_handle: a handle for dma
+ * @size: size of mapped DMA memory
+ *
+ * Return: 0 on success. Otherwise, returns negative error.
+ */
+int trinity_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *addr,
+		dma_addr_t dma_handle, size_t size);
+
+#endif /* __DRIVERS_MISC_TRINITY_DMA_H__ */
diff --git a/drivers/misc/trinity/trinity_hwmem.c b/drivers/misc/trinity/trinity_hwmem.c
new file mode 100644
index 000000000000..afc2cc9193c6
--- /dev/null
+++ b/drivers/misc/trinity/trinity_hwmem.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * DMA memory buffers for Trinity device driver
+ *
+ * Copyright (C) 2020-2022 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@...sung.com>
+ * Copyright (C) 2020 Wook Song <wook16.song@...sung.com>
+ * Copyright (C) 2022 MyungJoo Ham <myungjoo.ham@...sung.com>
+ * Copyright (C) 2022 Yelin Jeong <yelini.jeong@...sung.com>
+ * Copyright (C) 2022 Jiho Chu <jiho.chu@...sung.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/version.h>
+
+#include "trinity_dma.h"
+#include "trinity_hwmem.h"
+
+#define dbuf_to_trnt_hwmem(d) ((struct trinity_hwmem *)d->priv)
+#define vma_to_trnt_hwmem(v) ((struct trinity_hwmem *)v->vm_private_data)
+
+/**
+ * struct trinity_hwmem - A data structure for Trinity DMA buffer management
+ * @dev: A pointer to device which this hwmem belongs to.
+ * @dbuf: The dma_buf instance.
+ * @refcnt: Reference counts.
+ * @direction: A variable indicating the DMA data direction in allocating this
+ *		dma_buf.
+ * @attrs: Attributes used in allocating this dma_buf.
+ * @req_size: The size of the DMA buffer that the user request to allocate.
+ * @alc_size: The size of the DMA buffer which is actually allocated.
+ * @addr: The DMA (physical) address of this dma_buf.
+ * @cookie: The DMA cookies.
+ */
+struct trinity_hwmem {
+	struct device *dev;
+	struct dma_buf *dbuf;
+	struct kref refcnt;
+
+	enum dma_data_direction direction;
+	enum trinity_hwmem_type type;
+
+	unsigned long attrs;
+	size_t req_size;
+	size_t alc_size;
+
+	bool is_cont;
+	dma_addr_t addr;
+	void *cookie;
+};
+
+static void __trinity_hwmem_free(struct kref *refcnt)
+{
+	struct trinity_hwmem *mem =
+		container_of(refcnt, struct trinity_hwmem, refcnt);
+	/**
+	 * when the dmabuf reference counter becomes zero,
+	 * trinity_hwmem_dbuf_ops_release() is triggered.
+	 */
+	dma_buf_put(mem->dbuf);
+}
+
+static void __trinity_hwmem_put(struct trinity_hwmem *mem)
+{
+	kref_put(&mem->refcnt, __trinity_hwmem_free);
+}
+
+static void __trinity_hwmem_put_dmabuf(struct dma_buf *dbuf)
+{
+	__trinity_hwmem_put(dbuf_to_trnt_hwmem(dbuf));
+}
+
+static struct trinity_hwmem *__trinity_hwmem_get(struct trinity_hwmem *mem)
+{
+	kref_get(&mem->refcnt);
+
+	return mem;
+}
+
+static void trinity_hwmem_dbuf_ops_detach(struct dma_buf *dbuf,
+					  struct dma_buf_attachment *attachment)
+{
+	struct trinity_hwmem *mem = dbuf_to_trnt_hwmem(dbuf);
+
+	/* Decrease ref count of the backing storage */
+	__trinity_hwmem_put(mem);
+}
+
+static int trinity_hwmem_dbuf_ops_attach(struct dma_buf *dbuf,
+					 struct dma_buf_attachment *attachment)
+{
+	struct trinity_hwmem *mem = dbuf_to_trnt_hwmem(dbuf);
+
+	/* Increase ref count of the backing storage */
+	mem = __trinity_hwmem_get(mem);
+	attachment->priv = mem;
+
+	return 0;
+}
+
+static struct sg_table *
+trinity_hwmem_dbuf_ops_map_dma_buf(struct dma_buf_attachment *attachment,
+				   enum dma_data_direction dir)
+{
+	return NULL;
+}
+
+static void
+trinity_hwmem_dbuf_ops_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				     struct sg_table *sgt,
+				     enum dma_data_direction dir)
+{
+}
+
+static void trinity_hwmem_vm_ops_open(struct vm_area_struct *vma)
+{
+	struct trinity_hwmem *mem = vma_to_trnt_hwmem(vma);
+
+	__trinity_hwmem_get(mem);
+}
+
+static void trinity_hwmem_vm_ops_close(struct vm_area_struct *vma)
+{
+	struct trinity_hwmem *mem = vma_to_trnt_hwmem(vma);
+
+	__trinity_hwmem_put(mem);
+}
+
+static const struct vm_operations_struct trinity_hwmem_vm_ops = {
+	.open = trinity_hwmem_vm_ops_open,
+	.close = trinity_hwmem_vm_ops_close,
+};
+
+static int32_t trinity_hwmem_dbuf_ops_mmap(struct dma_buf *dbuf,
+					   struct vm_area_struct *vma)
+{
+	struct trinity_hwmem *mem;
+	int32_t ret;
+
+	if (!dbuf)
+		return -EINVAL;
+
+	mem = dbuf_to_trnt_hwmem(dbuf);
+	if (!mem)
+		return -EINVAL;
+
+	vma->vm_pgoff = 0;
+
+	ret = trinity_dma_mmap(mem->dev, vma, mem->cookie, mem->addr, mem->alc_size);
+	if (ret)
+		return ret;
+
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = mem;
+	vma->vm_ops = &trinity_hwmem_vm_ops;
+
+	vma->vm_ops->open(vma);
+
+	return 0;
+}
+
+static void trinity_hwmem_dbuf_ops_release(struct dma_buf *dbuf)
+{
+	struct trinity_dma dma_mem;
+	struct trinity_hwmem *mem = dbuf_to_trnt_hwmem(dbuf);
+
+	dma_mem.addr = mem->cookie;
+	dma_mem.dma_handle = mem->addr;
+	dma_mem.size = mem->alc_size;
+
+	if (mem->type == TRINITY_HWMEM_DMA_CONT)
+		trinity_dma_free_coherent(mem->dev, &dma_mem);
+	else
+		trinity_dma_free(mem->dev, &dma_mem);
+
+	put_device(mem->dev);
+
+	mem->dbuf->priv = NULL;
+
+	kfree(mem);
+}
+
+static int trinity_hwmem_dbuf_ops_vmap(struct dma_buf *dbuf,
+				       struct iosys_map *map)
+{
+	struct trinity_hwmem *mem;
+
+	if (!dbuf)
+		return -EINVAL;
+
+	mem = dbuf_to_trnt_hwmem(dbuf);
+	if (!mem)
+		return -ENOENT;
+
+	map->vaddr = mem->cookie;
+
+	return 0;
+}
+
+static struct dma_buf_ops trinity_hwmem_dbuf_ops = {
+	.vmap = trinity_hwmem_dbuf_ops_vmap,
+	.attach = trinity_hwmem_dbuf_ops_attach,
+	.detach = trinity_hwmem_dbuf_ops_detach,
+	.map_dma_buf = trinity_hwmem_dbuf_ops_map_dma_buf,
+	.unmap_dma_buf = trinity_hwmem_dbuf_ops_unmap_dma_buf,
+	.release = trinity_hwmem_dbuf_ops_release,
+	.mmap = trinity_hwmem_dbuf_ops_mmap,
+};
+
+static void *__trinity_hwmem_alloc(struct device *dev, const size_t size,
+				   const enum dma_data_direction dir,
+				   const enum trinity_hwmem_type type)
+{
+	size_t aligned_size = ALIGN(size, PAGE_SIZE);
+	struct trinity_hwmem *mem;
+	struct trinity_dma dma_mem;
+	int ret;
+
+	if (WARN_ON(!dev))
+		return ERR_PTR(-EINVAL);
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+
+	mem->dev = get_device(dev);
+	mem->req_size = size;
+	mem->alc_size = aligned_size;
+	mem->direction = dir;
+	mem->type = TRINITY_HWMEM_DMA_IOMMU;
+	mem->is_cont = (type == TRINITY_HWMEM_DMA_CONT);
+
+	mem->attrs |= DMA_ATTR_WRITE_COMBINE;
+	mem->attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+	if (mem->is_cont)
+		ret = trinity_dma_alloc_coherent(dev, aligned_size, &dma_mem);
+	else
+		ret = trinity_dma_alloc(dev, aligned_size, &dma_mem);
+
+	if (ret < 0) {
+		dev_err(mem->dev,
+			"Unable alloc memory: %d\n", ret);
+		kfree(mem);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mem->addr = dma_mem.dma_handle;
+	mem->cookie = dma_mem.addr;
+
+	kref_init(&mem->refcnt);
+
+	return mem;
+}
+
+static struct dma_buf *__trinity_hwmem_get_dmabuf(struct trinity_hwmem *mem,
+						  unsigned long flags)
+{
+	DEFINE_DMA_BUF_EXPORT_INFO(einfo);
+	struct dma_buf *dbuf;
+
+	einfo.ops = &trinity_hwmem_dbuf_ops;
+	einfo.size = mem->alc_size;
+	einfo.flags = flags;
+	einfo.priv = (void *)mem;
+
+	dbuf = dma_buf_export(&einfo);
+	if (IS_ERR(dbuf))
+		return dbuf;
+
+	/* Increase ref count of the backing storage */
+	dbuf->priv = __trinity_hwmem_get(mem);
+	mem->dbuf = dbuf;
+
+	return dbuf;
+}
+
+int32_t trinity_hwmem_alloc(struct device *dev, const size_t size,
+			    enum trinity_hwmem_type type)
+{
+	struct trinity_hwmem *mem;
+	struct dma_buf *dbuf;
+	int32_t ret;
+
+	mem = __trinity_hwmem_alloc(dev, size, DMA_BIDIRECTIONAL, type);
+	if (IS_ERR(mem))
+		return PTR_ERR(mem);
+
+	dbuf = __trinity_hwmem_get_dmabuf(mem, O_CLOEXEC | O_RDWR);
+	if (IS_ERR(dbuf)) {
+		ret = PTR_ERR(dbuf);
+		goto err_put_mem;
+	}
+
+	ret = dma_buf_fd(dbuf, O_CLOEXEC);
+	if (ret < 0)
+		goto err_put_mem;
+
+	return ret;
+
+err_put_mem:
+	__trinity_hwmem_put(mem);
+
+	return ret;
+}
+
+int32_t trinity_hwmem_free(struct device *dev, const int32_t fd)
+{
+	struct dma_buf *dbuf;
+	struct trinity_hwmem *mem;
+
+	dbuf = dma_buf_get(fd);
+	if (IS_ERR(dbuf)) {
+		dev_err(dev,
+			"failed to free the dma_buf structure with %ld\n",
+			PTR_ERR(dbuf));
+
+		return PTR_ERR(dbuf);
+	}
+
+	mem = dbuf_to_trnt_hwmem(dbuf);
+	__trinity_hwmem_put_dmabuf(dbuf);
+	__trinity_hwmem_put(mem);
+
+	dma_buf_put(dbuf);
+	return 0;
+}
+
+int32_t
+trinity_hwmem_import_dmabuf_begin(struct device *dev, const int32_t dbuf_fd,
+				  struct trinity_hwmem_import *import_info)
+{
+	struct dma_buf_attachment *attachment;
+	struct dma_buf *buf;
+	struct trinity_hwmem *mem;
+	struct iosys_map map;
+	int32_t ret;
+
+	if (!import_info)
+		return -EINVAL;
+
+	buf = dma_buf_get(dbuf_fd);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	attachment = dma_buf_attach(buf, dev);
+	if (IS_ERR(attachment)) {
+		ret = PTR_ERR(attachment);
+		goto err_dbuf_put;
+	}
+
+	mem = attachment->priv;
+	import_info->dma_addr = mem->addr;
+	ret = dma_buf_vmap(buf, &map);
+	if (ret)
+		goto err_dbuf_put;
+
+	import_info->addr = map.vaddr;
+	import_info->attachment = attachment;
+	import_info->buf = buf;
+
+	return 0;
+
+err_dbuf_put:
+	dma_buf_put(buf);
+
+	return ret;
+}
+
+void trinity_hwmem_import_dmabuf_end(struct trinity_hwmem_import *import_info)
+{
+	if (!import_info || !import_info->buf)
+		return;
+	dma_buf_vunmap(import_info->buf, import_info->addr);
+	dma_buf_detach(import_info->buf, import_info->attachment);
+	dma_buf_put(import_info->buf);
+}
diff --git a/drivers/misc/trinity/trinity_hwmem.h b/drivers/misc/trinity/trinity_hwmem.h
new file mode 100644
index 000000000000..b75f3f14c537
--- /dev/null
+++ b/drivers/misc/trinity/trinity_hwmem.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/**
+ * DMA memory buffers for Trinity device driver
+ *
+ * Copyright (C) 2020-2022 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@...sung.com>
+ * Copyright (C) 2020 Wook Song <wook16.song@...sung.com>
+ * Copyright (C) 2022 MyungJoo Ham <myungjoo.ham@...sung.com>
+ * Copyright (C) 2022 Yelin Jeong <yelini.jeong@...sung.com>
+ * Copyright (C) 2022 Jiho Chu <jiho.chu@...sung.com>
+ */
+
+#ifndef __DRIVERS_MISC_TRINITY_HWMEM_H__
+#define __DRIVERS_MISC_TRINITY_HWMEM_H__
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/kref.h>
+#include <uapi/misc/trinity.h>
+
+/**
+ * struct trinity_hwmem_import - A data structure to maintain imported hwmem
+ *   (that is Trinity DMA buffer).
+ * @dma_addr: The physical DMA address of this DMA buffer.
+ * @addr: A virtual address of this DMA buffer.
+ * @attachment: A pointer to &struct dma_buf_attachment.
+ * @buf: &struct dma_buf that this hwmem wrapped.
+ */
+struct trinity_hwmem_import {
+	dma_addr_t dma_addr;
+	void *addr;
+	struct dma_buf_attachment *attachment;
+	struct dma_buf *buf;
+};
+
+/**
+ * trinity_hwmem_import_dmabuf_begin() - Defines the beginning of a section to
+ *    import a given DMA buffer file descriptor.
+ * @dev: A pointer to the instance of the device to be attached the DMA buffer
+ * @dbuf_fd: The file descriptor of the DMA buffer to be imported.
+ * @import_info: If importing is successful, information such as the DMA
+ *    address, the virtual address which is mapped to the DMA address,
+ *    &struct dma_buf_attachment, a scatter-gather table, and &struct
+ *    dma_buf corresponding to the file descriptor will be passed
+ *    using this parameter.
+ *
+ * Return: 0 on success. Otherwise, returns negative error.
+ */
+int32_t
+trinity_hwmem_import_dmabuf_begin(struct device *dev, const int32_t dbuf_fd,
+				  struct trinity_hwmem_import *import_info);
+
+/**
+ * trinity_hwmem_import_dmabuf_end() - Defines the ending of the section related
+ *    to the given pointer to &strut trinity_hwmem_import.
+ * @import_info: Importing information related to the section to be ended.
+ */
+void trinity_hwmem_import_dmabuf_end(struct trinity_hwmem_import *import_info);
+
+/**
+ * trinity_hwmem_alloc() - Allocate Hardware memory according to type
+ * @dev: A pointer to the instance of the device to be attached the DMA buffer
+ * @size: Requested memory size
+ * @type: Requested memory type. It will try to allocate from reserved memory first
+ *
+ * Return: a file descriptor for the dma buffer on success.
+ *         Otherwise, returns negative error.
+ */
+int32_t trinity_hwmem_alloc(struct device *dev, const size_t size,
+			    enum trinity_hwmem_type type);
+
+/**
+ * trinity_hwmem_free() - Free Hardware memory
+ * @dev: A pointer to the instance of the device to be attached the DMA buffer
+ * @fd: A file descriptor for a allocated memory
+ *
+ * Return: 0 on success. Otherwise, returns negative error.
+ */
+int32_t trinity_hwmem_free(struct device *dev, const int32_t fd);
+
+#endif /* __DRIVERS_MISC_TRINITY_HWMEM_H__ */
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ