[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <d2e6147dc342867aba18f71869c9b4d4230f6ccc.camel@mediatek.com>
Date: Mon, 15 Aug 2022 13:24:40 +0800
From: CK Hu <ck.hu@...iatek.com>
To: Yongqiang Niu <yongqiang.niu@...iatek.com>,
Chun-Kuang Hu <chunkuang.hu@...nel.org>
CC: Jassi Brar <jassisinghbrar@...il.com>,
Matthias Brugger <matthias.bgg@...il.com>,
<linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-mediatek@...ts.infradead.org>,
<Project_Global_Chrome_Upstream_Group@...iatek.com>,
Hsin-Yi Wang <hsinyi@...omium.org>,
Yongqiang Niu <yongqiang.niu@...iatek.corp-partner.google.com>,
Allen-kh Cheng <allen-kh.cheng@...iatek.corp-partner.google.com>
Subject: Re: [PATCH v1, 1/1] mailbox: mtk-cmdq: fix gce timeout issue
Hi, Yongqiang:
On Mon, 2022-08-15 at 11:07 +0800, Yongqiang Niu wrote:
> From: Yongqiang Niu <yongqiang.niu@...iatek.corp-partner.google.com>
>
> 1. enable gce ddr enable(gce reigster offset 0x48, bit 16 to 18) when
> gce work,
> and disable gce ddr enable when gce work job done
Why need gce ddr enable? I think gce works fine without this. Describe
more about this.
Regards,
CK
> 2. split cmdq clk enable/disable api, and control gce ddr
> enable/disable
> in clk enable/disable function to make sure it could protect when
> cmdq
> is multiple used by display and mdp
>
> Signed-off-by: Yongqiang Niu <
> yongqiang.niu@...iatek.corp-partner.google.com>
> Signed-off-by: Allen-kh Cheng <
> allen-kh.cheng@...iatek.corp-partner.google.com>
> ---
> drivers/mailbox/mtk-cmdq-mailbox.c | 57 ++++++++++++++++++++++++++
> ----
> 1 file changed, 51 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c
> b/drivers/mailbox/mtk-cmdq-mailbox.c
> index 2578e5aaa935..64a47470f062 100644
> --- a/drivers/mailbox/mtk-cmdq-mailbox.c
> +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
> @@ -81,6 +81,8 @@ struct cmdq {
> u8 shift_pa;
> bool control_by_sw;
> u32 gce_num;
> + atomic_t usage;
> + spinlock_t lock;
> };
>
> struct gce_plat {
> @@ -90,6 +92,46 @@ struct gce_plat {
> u32 gce_num;
> };
>
> +static s32 cmdq_clk_enable(struct cmdq *cmdq)
> +{
> + s32 usage, ret;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&cmdq->lock, flags);
> +
> + usage = atomic_inc_return(&cmdq->usage);
> +
> + ret = clk_bulk_enable(cmdq->gce_num, cmdq->clocks);
> + if (usage <=0 || ret < 0) {
> + dev_err(cmdq->mbox.dev, "ref count %d ret %d suspend
> %d\n",
> + usage, ret, cmdq->suspended);
> + } else if (usage == 1) {
> + if (cmdq->control_by_sw)
> + writel((0x7 << 16) + 0x7, cmdq->base +
> GCE_GCTL_VALUE);
> + }
> +
> + spin_unlock_irqrestore(&cmdq->lock, flags);
> +
> + return ret;
> +}
> +
> +static void cmdq_clk_disable(struct cmdq *cmdq)
> +{
> + s32 usage;
> +
> + usage = atomic_dec_return(&cmdq->usage);
> +
> + if (usage < 0) {
> + dev_err(cmdq->mbox.dev, "ref count %d suspend %d\n",
> + usage, cmdq->suspended);
> + } else if (usage == 0) {
> + if (cmdq->control_by_sw)
> + writel(0x7, cmdq->base + GCE_GCTL_VALUE);
> + }
> +
> + clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
> +}
> +
> u8 cmdq_get_shift_pa(struct mbox_chan *chan)
> {
> struct cmdq *cmdq = container_of(chan->mbox, struct cmdq,
> mbox);
> @@ -271,7 +313,8 @@ static void cmdq_thread_irq_handler(struct cmdq
> *cmdq,
>
> if (list_empty(&thread->task_busy_list)) {
> cmdq_thread_disable(cmdq, thread);
> - clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
> +
> + cmdq_clk_disable(cmdq);
> }
> }
>
> @@ -360,8 +403,7 @@ static int cmdq_mbox_send_data(struct mbox_chan
> *chan, void *data)
> task->pkt = pkt;
>
> if (list_empty(&thread->task_busy_list)) {
> - WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
> -
> + WARN_ON(cmdq_clk_enable(cmdq) < 0);
> /*
> * The thread reset will clear thread related register
> to 0,
> * including pc, end, priority, irq, suspend and
> enable. Thus
> @@ -433,7 +475,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan
> *chan)
> }
>
> cmdq_thread_disable(cmdq, thread);
> - clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
> + cmdq_clk_disable(cmdq);
>
> done:
> /*
> @@ -479,7 +521,8 @@ static int cmdq_mbox_flush(struct mbox_chan
> *chan, unsigned long timeout)
>
> cmdq_thread_resume(thread);
> cmdq_thread_disable(cmdq, thread);
> - clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
> +
> + cmdq_clk_disable(cmdq);
>
> out:
> spin_unlock_irqrestore(&thread->chan->lock, flags);
> @@ -490,7 +533,8 @@ static int cmdq_mbox_flush(struct mbox_chan
> *chan, unsigned long timeout)
> spin_unlock_irqrestore(&thread->chan->lock, flags);
> if (readl_poll_timeout_atomic(thread->base +
> CMDQ_THR_ENABLE_TASK,
> enable, enable == 0, 1, timeout))
> {
> - dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x
> done\n",
> + dev_err(cmdq->mbox.dev,
> + "Fail to wait GCE thread 0x%x done\n",
> (u32)(thread->base - cmdq->base));
>
> return -EFAULT;
> @@ -626,6 +670,7 @@ static int cmdq_probe(struct platform_device
> *pdev)
>
> WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
>
> + spin_lock_init(&cmdq->lock);
> cmdq_init(cmdq);
>
> return 0;
Powered by blists - more mailing lists