[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241121042602.32730-4-jason-jh.lin@mediatek.com>
Date: Thu, 21 Nov 2024 12:25:57 +0800
From: Jason-JH.Lin <jason-jh.lin@...iatek.com>
To: Jassi Brar <jassisinghbrar@...il.com>, Chun-Kuang Hu
<chunkuang.hu@...nel.org>, AngeloGioacchino Del Regno
<angelogioacchino.delregno@...labora.com>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>, Conor Dooley <conor+dt@...nel.org>,
Matthias Brugger <matthias.bgg@...il.com>, Mauro Carvalho Chehab
<mchehab@...nel.org>
CC: David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>, Moudy
Ho <moudy.ho@...iatek.com>, <linux-kernel@...r.kernel.org>,
<devicetree@...r.kernel.org>, <dri-devel@...ts.freedesktop.org>,
<linux-mediatek@...ts.infradead.org>, <linux-arm-kernel@...ts.infradead.org>,
<linux-media@...r.kernel.org>, "Jason-JH . Lin" <jason-jh.lin@...iatek.com>,
Singo Chang <singo.chang@...iatek.com>, Nancy Lin <nancy.lin@...iatek.com>,
<Project_Global_Chrome_Upstream_Group@...iatek.com>
Subject: [PATCH 3/8] mailbox: mtk-cmdq: Add driver data to support for MT8196
MT8196 has 4 hardware changes compared with the previous SoC,
which correspond to the 4 new driver data:
1. mminfra_offset: For GCE data plane control
Since GCE has been moved into mminfra, GCE needs to append the
mminfra offset to the DRAM address when accessing the DRAM.
2. gce_vm: For GCE hardware virtualization
Currently, the first version of the mt8196 mailbox controller only
requires setting the VM-related registers to enable the permissions
of a host VM.
3. dma_mask_bit: For dma address bit control
In order to avoid the hardware limitations of MT8196 accessing DRAM,
GCE needs to configure the DMA address to be less than 35 bits.
4. subsys_num: For subsys ID validation
In previous SoCs, most hardware supported the 5-bit GCE subsys ID.
When GCE executed instructions, the corresponding hareware register
could be found through the subsys ID.
However, 8196 most hardware does not support subsys ID, so the
subsys ID will be set to an invalid value that exceeds the max
supported subsys ID.
By defining subsys_num, which is the max supported subsys ID, to
determine whether the subsys ID is supported.
So that mtk-cmdq-helper can know how to program the command.
Signed-off-by: Jason-JH.Lin <jason-jh.lin@...iatek.com>
---
drivers/mailbox/mtk-cmdq-mailbox.c | 107 +++++++++++++++++++++--
include/linux/mailbox/mtk-cmdq-mailbox.h | 3 +
2 files changed, 102 insertions(+), 8 deletions(-)
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 4bff73532085..37ff34a11749 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -43,6 +43,17 @@
#define GCE_CTRL_BY_SW GENMASK(2, 0)
#define GCE_DDR_EN GENMASK(18, 16)
+#define GCE_VM_ID_MAP0 0x5018
+#define GCE_VM_MAP0_ALL_HOST GENMASK(29, 0)
+#define GCE_VM_ID_MAP1 0x501c
+#define GCE_VM_MAP1_ALL_HOST GENMASK(29, 0)
+#define GCE_VM_ID_MAP2 0x5020
+#define GCE_VM_MAP2_ALL_HOST GENMASK(29, 0)
+#define GCE_VM_ID_MAP3 0x5024
+#define GCE_VM_MAP3_ALL_HOST GENMASK(5, 0)
+#define GCE_VM_CPR_GSIZE 0x50c4
+#define GCE_VM_CPR_GSIZE_HSOT GENMASK(3, 0)
+
#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
#define CMDQ_THR_ENABLED 0x1
#define CMDQ_THR_DISABLED 0x0
@@ -87,11 +98,25 @@ struct cmdq {
struct gce_plat {
u32 thread_nr;
u8 shift;
+ u64 mminfra_offset;
bool control_by_sw;
bool sw_ddr_en;
+ bool gce_vm;
+ u32 dma_mask_bit;
+ u8 subsys_num;
u32 gce_num;
};
+static inline u32 cmdq_reg_shift_addr(u32 addr, const struct gce_plat *pdata)
+{
+ return ((addr + pdata->mminfra_offset) >> pdata->shift);
+}
+
+static inline u32 cmdq_reg_revert_addr(u32 addr, const struct gce_plat *pdata)
+{
+ return ((addr << pdata->shift) - pdata->mminfra_offset);
+}
+
static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
{
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
@@ -112,6 +137,38 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+u64 cmdq_get_offset_pa(struct mbox_chan *chan)
+{
+ struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
+
+ return cmdq->pdata->mminfra_offset;
+}
+EXPORT_SYMBOL(cmdq_get_offset_pa);
+
+bool cmdq_subsys_is_valid(struct mbox_chan *chan, u8 subsys)
+{
+ struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
+
+ return (subsys < cmdq->pdata->subsys_num);
+}
+EXPORT_SYMBOL(cmdq_subsys_is_valid);
+
+bool cmdq_addr_need_offset(struct mbox_chan *chan, u32 addr)
+{
+ struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
+
+ if (cmdq->pdata->mminfra_offset == 0)
+ return false;
+
+ /*
+ * mminfra will recognize the addr that greater than the mminfra_offset
+ * as a transaction to DRAM.
+ * So the caller needs to append mminfra_offset for the true case.
+ */
+ return (addr >= cmdq->pdata->mminfra_offset);
+}
+EXPORT_SYMBOL(cmdq_addr_need_offset);
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -143,6 +200,17 @@ static void cmdq_init(struct cmdq *cmdq)
u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
+
+ if (cmdq->pdata->gce_vm) {
+ /* config cpr size for host vm */
+ writel(GCE_VM_CPR_GSIZE_HSOT, cmdq->base + GCE_VM_CPR_GSIZE);
+ /* config CPR_GSIZE before setting VM_ID_MAP to avoid data leakage */
+ writel(GCE_VM_MAP0_ALL_HOST, cmdq->base + GCE_VM_ID_MAP0);
+ writel(GCE_VM_MAP1_ALL_HOST, cmdq->base + GCE_VM_ID_MAP1);
+ writel(GCE_VM_MAP2_ALL_HOST, cmdq->base + GCE_VM_ID_MAP2);
+ writel(GCE_VM_MAP3_ALL_HOST, cmdq->base + GCE_VM_ID_MAP3);
+ }
+
if (cmdq->pdata->control_by_sw)
gctl_regval = GCE_CTRL_BY_SW;
if (cmdq->pdata->sw_ddr_en)
@@ -199,7 +267,7 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
(u64)CMDQ_JUMP_BY_PA << 32 |
- (task->pa_base >> task->cmdq->pdata->shift);
+ cmdq_reg_shift_addr(task->pa_base, task->cmdq->pdata);
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
@@ -264,7 +332,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
else
return;
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
+ curr_pa = cmdq_reg_shift_addr(readl(thread->base + CMDQ_THR_CURR_ADDR), cmdq->pdata);
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
@@ -416,9 +484,9 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
- writel(task->pa_base >> cmdq->pdata->shift,
+ writel(cmdq_reg_shift_addr(task->pa_base, cmdq->pdata),
thread->base + CMDQ_THR_CURR_ADDR);
- writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
+ writel(cmdq_reg_shift_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata),
thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
@@ -426,10 +494,10 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
- cmdq->pdata->shift;
- end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
- cmdq->pdata->shift;
+ curr_pa = cmdq_reg_revert_addr(readl(thread->base + CMDQ_THR_CURR_ADDR),
+ cmdq->pdata);
+ end_pa = cmdq_reg_revert_addr(readl(thread->base + CMDQ_THR_END_ADDR),
+ cmdq->pdata);
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
@@ -663,6 +731,9 @@ static int cmdq_probe(struct platform_device *pdev)
if (err)
return err;
+ if (cmdq->pdata->dma_mask_bit)
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(cmdq->pdata->dma_mask_bit));
+
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
@@ -736,6 +807,7 @@ static const struct gce_plat gce_plat_mt6779 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = false,
+ .subsys_num = 30,
.gce_num = 1
};
@@ -743,6 +815,7 @@ static const struct gce_plat gce_plat_mt8173 = {
.thread_nr = 16,
.shift = 0,
.control_by_sw = false,
+ .subsys_num = 4,
.gce_num = 1
};
@@ -750,6 +823,7 @@ static const struct gce_plat gce_plat_mt8183 = {
.thread_nr = 24,
.shift = 0,
.control_by_sw = false,
+ .subsys_num = 30,
.gce_num = 1
};
@@ -758,6 +832,7 @@ static const struct gce_plat gce_plat_mt8186 = {
.shift = 3,
.control_by_sw = true,
.sw_ddr_en = true,
+ .subsys_num = 30,
.gce_num = 1
};
@@ -765,6 +840,7 @@ static const struct gce_plat gce_plat_mt8188 = {
.thread_nr = 32,
.shift = 3,
.control_by_sw = true,
+ .subsys_num = 26,
.gce_num = 2
};
@@ -772,6 +848,7 @@ static const struct gce_plat gce_plat_mt8192 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
+ .subsys_num = 30,
.gce_num = 1
};
@@ -779,6 +856,19 @@ static const struct gce_plat gce_plat_mt8195 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
+ .subsys_num = 26,
+ .gce_num = 2
+};
+
+static const struct gce_plat gce_plat_mt8196 = {
+ .thread_nr = 32,
+ .shift = 3,
+ .mminfra_offset = 0x80000000, /* 2GB */
+ .control_by_sw = true,
+ .sw_ddr_en = true,
+ .gce_vm = true,
+ .dma_mask_bit = 35,
+ .subsys_num = 30,
.gce_num = 2
};
@@ -790,6 +880,7 @@ static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
{.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
+ {.compatible = "mediatek,mt8196-gce", .data = (void *)&gce_plat_mt8196},
{}
};
MODULE_DEVICE_TABLE(of, cmdq_of_ids);
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index a8f0070c7aa9..f30023db4dbf 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -79,5 +79,8 @@ struct cmdq_pkt {
};
u8 cmdq_get_shift_pa(struct mbox_chan *chan);
+u64 cmdq_get_offset_pa(struct mbox_chan *chan);
+bool cmdq_subsys_is_valid(struct mbox_chan *chan, u8 subsys);
+bool cmdq_addr_need_offset(struct mbox_chan *chan, u32 addr);
#endif /* __MTK_CMDQ_MAILBOX_H__ */
--
2.43.0
Powered by blists - more mailing lists