[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1581931765.12547.0.camel@mtksdaap41>
Date: Mon, 17 Feb 2020 17:29:25 +0800
From: CK Hu <ck.hu@...iatek.com>
To: Bibby Hsieh <bibby.hsieh@...iatek.com>
CC: Jassi Brar <jassisinghbrar@...il.com>,
Matthias Brugger <matthias.bgg@...il.com>,
Rob Herring <robh+dt@...nel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-mediatek@...ts.infradead.org>,
<srv_heupstream@...iatek.com>,
Nicolas Boichat <drinkcat@...omium.org>,
Dennis-YC Hsieh <dennis-yc.hsieh@...iatek.com>,
Houlong Wei <houlong.wei@...iatek.com>
Subject: Re: [PATCH v1 2/3] mailbox: mediatek: implement flush function
Hi, Bibby:
On Mon, 2020-02-17 at 17:05 +0800, Bibby Hsieh wrote:
> For client driver which need to reorganize the command buffer, it could
> use this function to flush the send command buffer.
> If the channel doesn't be started (usually in waiting for event), this
> function will abort it directly.
>
Reviewed-by: CK Hu <ck.hu@...iatek.com>
> Signed-off-by: Bibby Hsieh <bibby.hsieh@...iatek.com>
> ---
> drivers/mailbox/mtk-cmdq-mailbox.c | 52 ++++++++++++++++++++++++++++++
> 1 file changed, 52 insertions(+)
>
> diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
> index 9a6ce9f5a7db..0da5e2dc2c0e 100644
> --- a/drivers/mailbox/mtk-cmdq-mailbox.c
> +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
> @@ -432,10 +432,62 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
> {
> }
>
> +static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
> +{
> + struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
> + struct cmdq_task_cb *cb;
> + struct cmdq_cb_data data;
> + struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
> + struct cmdq_task *task, *tmp;
> + unsigned long flags;
> + u32 enable;
> +
> + spin_lock_irqsave(&thread->chan->lock, flags);
> + if (list_empty(&thread->task_busy_list))
> + goto out;
> +
> + WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
> + if (!cmdq_thread_is_in_wfe(thread))
> + goto wait;
> +
> + list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
> + list_entry) {
> + cb = &task->pkt->async_cb;
> + if (cb->cb) {
> + data.sta = CMDQ_CB_ERROR;
> + data.data = cb->data;
> + cb->cb(data);
> + }
> + list_del(&task->list_entry);
> + kfree(task);
> + }
> +
> + cmdq_thread_resume(thread);
> + cmdq_thread_disable(cmdq, thread);
> + clk_disable(cmdq->clock);
> +
> +out:
> + spin_unlock_irqrestore(&thread->chan->lock, flags);
> + return 0;
> +
> +wait:
> + cmdq_thread_resume(thread);
> + spin_unlock_irqrestore(&thread->chan->lock, flags);
> + if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
> + enable, enable == 0, 1, timeout)) {
> + dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
> + (u32)(thread->base - cmdq->base));
> +
> + return -EFAULT;
> + }
> + return 0;
> +}
> +
> static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
> .send_data = cmdq_mbox_send_data,
> .startup = cmdq_mbox_startup,
> .shutdown = cmdq_mbox_shutdown,
> + .flush = cmdq_mbox_flush,
> };
>
> static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
Powered by blists - more mailing lists