[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240112092014.23999-7-yong.wu@mediatek.com>
Date: Fri, 12 Jan 2024 17:20:13 +0800
From: Yong Wu <yong.wu@...iatek.com>
To: Rob Herring <robh+dt@...nel.org>, Matthias Brugger
<matthias.bgg@...il.com>, <christian.koenig@....com>, Sumit Semwal
<sumit.semwal@...aro.org>
CC: Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>, Conor Dooley
<conor+dt@...nel.org>, Benjamin Gaignard <benjamin.gaignard@...labora.com>,
Brian Starkey <Brian.Starkey@....com>, John Stultz <jstultz@...gle.com>,
<tjmercier@...gle.com>, AngeloGioacchino Del Regno
<angelogioacchino.delregno@...labora.com>, Yong Wu <yong.wu@...iatek.com>,
<devicetree@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-media@...r.kernel.org>, <dri-devel@...ts.freedesktop.org>,
<linaro-mm-sig@...ts.linaro.org>, <linux-arm-kernel@...ts.infradead.org>,
<linux-mediatek@...ts.infradead.org>, Robin Murphy <robin.murphy@....com>,
Vijayanand Jitta <quic_vjitta@...cinc.com>, Joakim Bech
<joakim.bech@...aro.org>, Jeffrey Kardatzke <jkardatzke@...gle.com>, Pavel
Machek <pavel@....cz>, Simon Ser <contact@...rsion.fr>, Pekka Paalanen
<ppaalanen@...il.com>, <jianjiao.zeng@...iatek.com>,
<kuohong.wang@...iatek.com>, <youlin.pei@...iatek.com>
Subject: [PATCH v4 6/7] dma-buf: heaps: restricted_heap_mtk: Add TEE memory service call
Add TEE service call for MediaTek heap. We have a limited number of
hardware entries to protect memory, therefore we cannot protect memory
arbitrarily, and our secure memory management is actually inside OPTEE.
The kernel just tells the TEE what size I want and the TEE will return a
"secure handle"/"secure address". To make the name more general, We call
it "restricted_addr" here. The restricted_addr is a reference to a secure
buffer within TEE. We put it in the sg_dma_address, please see the comment
in code.
Signed-off-by: Yong Wu <yong.wu@...iatek.com>
---
drivers/dma-buf/heaps/restricted_heap.c | 17 ++++
drivers/dma-buf/heaps/restricted_heap.h | 3 +
drivers/dma-buf/heaps/restricted_heap_mtk.c | 93 +++++++++++++++++++++
3 files changed, 113 insertions(+)
diff --git a/drivers/dma-buf/heaps/restricted_heap.c b/drivers/dma-buf/heaps/restricted_heap.c
index 4e9869ab4a85..148dbf5662c2 100644
--- a/drivers/dma-buf/heaps/restricted_heap.c
+++ b/drivers/dma-buf/heaps/restricted_heap.c
@@ -96,8 +96,23 @@ static struct sg_table *
restricted_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direct)
{
struct restricted_heap_attachment *a = attachment->priv;
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct restricted_buffer *restricted_buf = dmabuf->priv;
struct sg_table *table = a->table;
+ /*
+ * Technically dma_address refers to the address used by HW, But for restricted buffer
+ * we don't know its dma_address in kernel, Instead, we may know its restricted address
+ * which refers to the real buffer in the trusted or secure world. Here use this property
+ * to save the restricted address, and the user will use it to obtain the real address in
+ * trusted or secure world.
+ *
+ * Note: CONFIG_DMA_API_DEBUG requires this to be aligned with PAGE_SIZE.
+ */
+ if (restricted_buf->restricted_addr) {
+ sg_dma_address(table->sgl) = restricted_buf->restricted_addr;
+ sg_dma_len(table->sgl) = restricted_buf->size;
+ }
return table;
}
@@ -108,6 +123,8 @@ restricted_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_t
struct restricted_heap_attachment *a = attachment->priv;
WARN_ON(a->table != table);
+ sg_dma_address(table->sgl) = 0;
+ sg_dma_len(table->sgl) = 0;
}
static int
diff --git a/drivers/dma-buf/heaps/restricted_heap.h b/drivers/dma-buf/heaps/restricted_heap.h
index cf5865f829fc..6c93f6d257dc 100644
--- a/drivers/dma-buf/heaps/restricted_heap.h
+++ b/drivers/dma-buf/heaps/restricted_heap.h
@@ -11,6 +11,9 @@
struct restricted_buffer {
struct dma_heap *heap;
size_t size;
+
+ /* A reference to a buffer in the trusted or secure world. */
+ u64 restricted_addr;
};
struct restricted_heap {
diff --git a/drivers/dma-buf/heaps/restricted_heap_mtk.c b/drivers/dma-buf/heaps/restricted_heap_mtk.c
index a5f5365059cd..902add95bb7e 100644
--- a/drivers/dma-buf/heaps/restricted_heap_mtk.c
+++ b/drivers/dma-buf/heaps/restricted_heap_mtk.c
@@ -25,6 +25,27 @@ enum mtk_secure_mem_type {
MTK_SECURE_MEMORY_TYPE_CM_TZ = 1,
};
+enum mtk_secure_buffer_tee_cmd {
+ /*
+ * Allocate the zeroed secure memory from TEE.
+ *
+ * [in] value[0].a: The buffer size.
+ * value[0].b: alignment.
+ * [in] value[1].a: enum mtk_secure_mem_type.
+ * [out] value[3].a: The secure handle.
+ */
+ MTK_TZCMD_SECMEM_ZALLOC = 0x10000, /* MTK TEE Command ID Base */
+
+ /*
+ * Free secure memory.
+ *
+ * [in] value[0].a: The secure handle of this buffer, It's value[3].a of
+ * MTK_TZCMD_SECMEM_ZALLOC.
+ * [out] value[1].a: return value, 0 means successful, otherwise fail.
+ */
+ MTK_TZCMD_SECMEM_FREE = 0x10001,
+};
+
struct mtk_restricted_heap_data {
struct tee_context *tee_ctx;
u32 tee_session;
@@ -74,6 +95,74 @@ static int mtk_tee_session_init(struct mtk_restricted_heap_data *data)
return ret;
}
+static int mtk_tee_service_call(struct tee_context *tee_ctx, u32 session,
+ unsigned int command, struct tee_param *params)
+{
+ struct tee_ioctl_invoke_arg arg = {0};
+ int ret;
+
+ arg.num_params = TEE_PARAM_NUM;
+ arg.session = session;
+ arg.func = command;
+
+ ret = tee_client_invoke_func(tee_ctx, &arg, params);
+ if (ret < 0 || arg.ret) {
+ pr_err("%s: cmd %d ret %d:%x.\n", __func__, command, ret, arg.ret);
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int mtk_tee_restrict_memory(struct restricted_heap *heap, struct restricted_buffer *buf)
+{
+ struct mtk_restricted_heap_data *data = heap->priv_data;
+ struct tee_param params[TEE_PARAM_NUM] = {0};
+ int ret;
+
+ params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = buf->size;
+ params[0].u.value.b = PAGE_SIZE;
+ params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[1].u.value.a = data->mem_type;
+ params[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+ ret = mtk_tee_service_call(data->tee_ctx, data->tee_session,
+ MTK_TZCMD_SECMEM_ZALLOC, params);
+ if (ret)
+ return -ENOMEM;
+
+ buf->restricted_addr = params[3].u.value.a;
+ return 0;
+}
+
+static void mtk_tee_unrestrict_memory(struct restricted_heap *heap, struct restricted_buffer *buf)
+{
+ struct mtk_restricted_heap_data *data = heap->priv_data;
+ struct tee_param params[TEE_PARAM_NUM] = {0};
+
+ params[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ params[0].u.value.a = buf->restricted_addr;
+ params[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ mtk_tee_service_call(data->tee_ctx, data->tee_session,
+ MTK_TZCMD_SECMEM_FREE, params);
+ if (params[1].u.value.a)
+ pr_err("%s, Unrestrict buffer(0x%llx) fail(%lld) from TEE.\n",
+ heap->name, buf->restricted_addr, params[1].u.value.a);
+}
+
+static int
+mtk_restricted_memory_allocate(struct restricted_heap *heap, struct restricted_buffer *buf)
+{
+ /* The memory allocating is within the TEE. */
+ return 0;
+}
+
+static void
+mtk_restricted_memory_free(struct restricted_heap *heap, struct restricted_buffer *buf)
+{
+}
+
static int mtk_restricted_heap_init(struct restricted_heap *heap)
{
struct mtk_restricted_heap_data *data = heap->priv_data;
@@ -85,6 +174,10 @@ static int mtk_restricted_heap_init(struct restricted_heap *heap)
static const struct restricted_heap_ops mtk_restricted_heap_ops = {
.heap_init = mtk_restricted_heap_init,
+ .memory_alloc = mtk_restricted_memory_allocate,
+ .memory_free = mtk_restricted_memory_free,
+ .memory_restrict = mtk_tee_restrict_memory,
+ .memory_unrestrict = mtk_tee_unrestrict_memory,
};
static struct mtk_restricted_heap_data mtk_restricted_heap_data = {
--
2.25.1
Powered by blists - more mailing lists