[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240515112308.10171-10-yong.wu@mediatek.com>
Date: Wed, 15 May 2024 19:23:08 +0800
From: Yong Wu <yong.wu@...iatek.com>
To: Rob Herring <robh+dt@...nel.org>, Matthias Brugger
<matthias.bgg@...il.com>, <christian.koenig@....com>, Sumit Semwal
<sumit.semwal@...aro.org>, Andrew Morton <akpm@...ux-foundation.org>
CC: Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>, Conor Dooley
<conor+dt@...nel.org>, Benjamin Gaignard <benjamin.gaignard@...labora.com>,
Brian Starkey <Brian.Starkey@....com>, John Stultz <jstultz@...gle.com>,
<tjmercier@...gle.com>, AngeloGioacchino Del Regno
<angelogioacchino.delregno@...labora.com>, Yong Wu <yong.wu@...iatek.com>,
<devicetree@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-media@...r.kernel.org>, <dri-devel@...ts.freedesktop.org>,
<linaro-mm-sig@...ts.linaro.org>, <linux-arm-kernel@...ts.infradead.org>,
<linux-mediatek@...ts.infradead.org>, Robin Murphy <robin.murphy@....com>,
Vijayanand Jitta <quic_vjitta@...cinc.com>, Joakim Bech
<joakim.bech@...aro.org>, Jeffrey Kardatzke <jkardatzke@...gle.com>, Pavel
Machek <pavel@....cz>, Simon Ser <contact@...rsion.fr>, Pekka Paalanen
<ppaalanen@...il.com>, <willy@...radead.org>, Logan Gunthorpe
<logang@...tatee.com>, Daniel Vetter <daniel@...ll.ch>,
<jianjiao.zeng@...iatek.com>, <kuohong.wang@...iatek.com>,
<youlin.pei@...iatek.com>
Subject: [PATCH v5 9/9] dma_buf: heaps: restricted_heap_mtk: Add a new CMA heap
Create a new MediaTek CMA heap from the CMA reserved buffer.
In this heap, When the first allocating buffer, use cma_alloc to prepare
whole the CMA range, then send its range to TEE to protect and manage.
For the later allocating, we just adds the cma_used_size.
When SVP done, cma_release will release the buffer, then kernel may
reuse it.
For the "CMA" restricted heap, "struct cma *cma" is a common property,
not just for MediaTek, so put it into "struct restricted_heap" instead of
our private data.
Signed-off-by: Yong Wu <yong.wu@...iatek.com>
---
drivers/dma-buf/heaps/Kconfig | 2 +-
drivers/dma-buf/heaps/restricted_heap.h | 4 +
drivers/dma-buf/heaps/restricted_heap_mtk.c | 121 +++++++++++++++++++-
3 files changed, 123 insertions(+), 4 deletions(-)
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index 84f748fb2856..58903bc62ac8 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -24,7 +24,7 @@ config DMABUF_HEAPS_RESTRICTED
config DMABUF_HEAPS_RESTRICTED_MTK
bool "MediaTek DMA-BUF Restricted Heap"
- depends on DMABUF_HEAPS_RESTRICTED && TEE=y
+ depends on DMABUF_HEAPS_RESTRICTED && DMA_CMA && TEE=y
help
Enable restricted dma-buf heaps for MediaTek platform. This heap is backed by
TEE client interfaces. If in doubt, say N.
diff --git a/drivers/dma-buf/heaps/restricted_heap.h b/drivers/dma-buf/heaps/restricted_heap.h
index 8cb9211093c5..7dec4b8a471b 100644
--- a/drivers/dma-buf/heaps/restricted_heap.h
+++ b/drivers/dma-buf/heaps/restricted_heap.h
@@ -23,6 +23,10 @@ struct restricted_heap {
const struct restricted_heap_ops *ops;
+ struct cma *cma;
+ unsigned long cma_paddr;
+ unsigned long cma_size;
+
void *priv_data;
};
diff --git a/drivers/dma-buf/heaps/restricted_heap_mtk.c b/drivers/dma-buf/heaps/restricted_heap_mtk.c
index e571eae719e0..6d8119828485 100644
--- a/drivers/dma-buf/heaps/restricted_heap_mtk.c
+++ b/drivers/dma-buf/heaps/restricted_heap_mtk.c
@@ -6,9 +6,11 @@
*/
#define pr_fmt(fmt) "rheap_mtk: " fmt
+#include <linux/cma.h>
#include <linux/dma-buf.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uuid.h>
@@ -25,6 +27,13 @@ enum mtk_secure_mem_type {
* management is inside the TEE.
*/
MTK_SECURE_MEMORY_TYPE_CM_TZ = 1,
+ /*
+ * MediaTek dynamic chunk memory carved out from CMA.
+ * In normal case, the CMA could be used in kernel; When SVP start, we will
+ * allocate whole this CMA and pass whole the CMA PA and size into TEE to
+ * protect it, then the detail memory management also is inside the TEE.
+ */
+ MTK_SECURE_MEMORY_TYPE_CM_CMA = 2,
};
/* This structure also is synchronized with tee, thus not use the phys_addr_t */
@@ -40,7 +49,8 @@ enum mtk_secure_buffer_tee_cmd {
* [in] value[0].a: The buffer size.
* value[0].b: alignment.
* [in] value[1].a: enum mtk_secure_mem_type.
- * [inout]
+ * [inout] [in] value[2].a: pa base in cma case.
+ * value[2].b: The buffer size in cma case.
* [out] value[2].a: entry number of memory block.
* If this is 1, it means the memory is continuous.
* value[2].b: buffer PA base.
@@ -73,6 +83,9 @@ struct mtk_restricted_heap_data {
const enum mtk_secure_mem_type mem_type;
+ struct page *cma_page;
+ unsigned long cma_used_size;
+ struct mutex lock; /* lock for cma_used_size */
};
static int mtk_tee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
@@ -173,6 +186,10 @@ static int mtk_tee_restrict_memory(struct restricted_heap *rheap, struct restric
params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
params[1].u.value.a = data->mem_type;
params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ if (rheap->cma && data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+ params[2].u.value.a = rheap->cma_paddr;
+ params[2].u.value.b = rheap->cma_size;
+ }
params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
ret = mtk_tee_service_call(data->tee_ctx, data->tee_session,
MTK_TZCMD_SECMEM_ZALLOC, params);
@@ -265,6 +282,48 @@ mtk_restricted_memory_free(struct restricted_heap *rheap, struct restricted_buff
{
}
+static int mtk_restricted_memory_cma_allocate(struct restricted_heap *rheap,
+ struct restricted_buffer *buf)
+{
+ struct mtk_restricted_heap_data *data = rheap->priv_data;
+ int ret = 0;
+ /*
+ * Allocate CMA only when allocating buffer for the first time, and just
+ * increase cma_used_size at the other time, Actually the memory
+ * allocating is within the TEE.
+ */
+ mutex_lock(&data->lock);
+ if (!data->cma_used_size) {
+ data->cma_page = cma_alloc(rheap->cma, rheap->cma_size >> PAGE_SHIFT,
+ get_order(PAGE_SIZE), false);
+ if (!data->cma_page) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else if (data->cma_used_size + buf->size > rheap->cma_size) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ data->cma_used_size += buf->size;
+
+out_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static void mtk_restricted_memory_cma_free(struct restricted_heap *rheap,
+ struct restricted_buffer *buf)
+{
+ struct mtk_restricted_heap_data *data = rheap->priv_data;
+
+ mutex_lock(&data->lock);
+ data->cma_used_size -= buf->size;
+ if (!data->cma_used_size)
+ cma_release(rheap->cma, data->cma_page,
+ rheap->cma_size >> PAGE_SHIFT);
+ mutex_unlock(&data->lock);
+}
+
static int mtk_restricted_heap_init(struct restricted_heap *rheap)
{
struct mtk_restricted_heap_data *data = rheap->priv_data;
@@ -286,21 +345,77 @@ static struct mtk_restricted_heap_data mtk_restricted_heap_data = {
.mem_type = MTK_SECURE_MEMORY_TYPE_CM_TZ,
};
+static const struct restricted_heap_ops mtk_restricted_heap_ops_cma = {
+ .heap_init = mtk_restricted_heap_init,
+ .alloc = mtk_restricted_memory_cma_allocate,
+ .free = mtk_restricted_memory_cma_free,
+ .restrict_buf = mtk_tee_restrict_memory,
+ .unrestrict_buf = mtk_tee_unrestrict_memory,
+};
+
+static struct mtk_restricted_heap_data mtk_restricted_heap_data_cma = {
+ .mem_type = MTK_SECURE_MEMORY_TYPE_CM_CMA,
+};
+
static struct restricted_heap mtk_restricted_heaps[] = {
{
.name = "restricted_mtk_cm",
.ops = &mtk_restricted_heap_ops,
.priv_data = &mtk_restricted_heap_data,
},
+ {
+ .name = "restricted_mtk_cma",
+ .ops = &mtk_restricted_heap_ops_cma,
+ .priv_data = &mtk_restricted_heap_data_cma,
+ },
};
+static int __init mtk_restricted_cma_init(struct reserved_mem *rmem)
+{
+ struct restricted_heap *rheap = mtk_restricted_heaps, *rheap_cma = NULL;
+ struct mtk_restricted_heap_data *data;
+ struct cma *cma;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_restricted_heaps); i++, rheap++) {
+ data = rheap->priv_data;
+ if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA) {
+ rheap_cma = rheap;
+ break;
+ }
+ }
+ if (!rheap_cma)
+ return -EINVAL;
+
+ ret = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name,
+ &cma);
+ if (ret) {
+ pr_err("%s: %s set up CMA fail. ret %d.\n", __func__, rmem->name, ret);
+ return ret;
+ }
+
+ rheap_cma->cma = cma;
+ rheap_cma->cma_paddr = rmem->base;
+ rheap_cma->cma_size = rmem->size;
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(restricted_cma, "mediatek,dynamic-restricted-region",
+ mtk_restricted_cma_init);
+
static int mtk_restricted_heap_initialize(void)
{
struct restricted_heap *rheap = mtk_restricted_heaps;
+ struct mtk_restricted_heap_data *data;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(mtk_restricted_heaps); i++, rheap++)
- restricted_heap_add(rheap);
+ for (i = 0; i < ARRAY_SIZE(mtk_restricted_heaps); i++, rheap++) {
+ data = rheap->priv_data;
+ if (data->mem_type == MTK_SECURE_MEMORY_TYPE_CM_CMA && !rheap->cma)
+ continue;
+ if (!restricted_heap_add(rheap))
+ mutex_init(&data->lock);
+ }
return 0;
}
module_init(mtk_restricted_heap_initialize);
--
2.25.1
Powered by blists - more mailing lists