[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <cc9f6d43-0655-482e-384d-e75230e951cf@quicinc.com>
Date: Tue, 1 Nov 2022 10:44:59 -0700
From: Elliot Berman <quic_eberman@...cinc.com>
To: Pavan Kondeti <quic_pkondeti@...cinc.com>
CC: Bjorn Andersson <quic_bjorande@...cinc.com>,
Jassi Brar <jassisinghbrar@...il.com>,
Murali Nalajala <quic_mnalajal@...cinc.com>,
Trilok Soni <quic_tsoni@...cinc.com>,
Srivatsa Vaddagiri <quic_svaddagi@...cinc.com>,
Carl van Schaik <quic_cvanscha@...cinc.com>,
Prakruthi Deepak Heragu <quic_pheragu@...cinc.com>,
Andy Gross <agross@...nel.org>,
Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
<linux-arm-kernel@...ts.infradead.org>,
Mark Rutland <mark.rutland@....com>,
Lorenzo Pieralisi <lorenzo.pieralisi@....com>,
Sudeep Holla <sudeep.holla@....com>,
Marc Zyngier <maz@...nel.org>,
Rob Herring <robh+dt@...nel.org>,
Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
Jonathan Corbet <corbet@....net>,
"Will Deacon" <will@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
"Arnd Bergmann" <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Srinivas Kandagatla <srinivas.kandagatla@...aro.org>,
Amol Maheshwari <amahesh@....qualcomm.com>,
Kalle Valo <kvalo@...nel.org>, <devicetree@...r.kernel.org>,
<linux-doc@...r.kernel.org>, <linux-arm-msm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v6 09/21] mailbox: Add Gunyah message queue mailbox
On 10/27/2022 6:55 AM, Pavan Kondeti wrote:
> Hi Elliot,
>
> On Wed, Oct 26, 2022 at 11:58:34AM -0700, Elliot Berman wrote:
>> Gunyah message queues are a unidirectional inter-VM pipe for messages up
>> to 1024 bytes. This driver supports pairing a receiver message queue and
>> a transmitter message queue to expose a single mailbox channel.
>>
>> Signed-off-by: Elliot Berman <quic_eberman@...cinc.com>
>
> <snip>
>
>> +static irqreturn_t gh_msgq_tx_irq_handler(int irq, void *data)
>> +{
>> + struct gh_msgq *msgq = data;
>> +
>> + mbox_chan_txdone(gh_msgq_chan(msgq), 0);
>> +
>> + return IRQ_HANDLED;
>> +}
>> +
>> +static void gh_msgq_txdone_tasklet(unsigned long data)
>> +{
>> + struct gh_msgq *msgq = (struct gh_msgq *)data;
>> +
>> + mbox_chan_txdone(gh_msgq_chan(msgq), msgq->last_status);
>> +}
>> +
>> +static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
>> +{
>> + struct gh_msgq *msgq = mbox_chan_to_msgq(chan);
>> + struct gh_msgq_tx_data *msgq_data = data;
>> + u64 tx_flags = 0;
>> + unsigned long ret;
>> + bool ready;
>> +
>> + if (msgq_data->push)
>> + tx_flags |= GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH;
>> +
>> + ret = gh_hypercall_msgq_send(msgq->tx_ghrsc->capid, msgq_data->length,
>> + (uintptr_t)msgq_data->data, tx_flags, &ready);
>> +
>> + /**
>> + * unlikely because Linux tracks state of msgq and should not try to
>> + * send message when msgq is full.
>> + */
>> + if (unlikely(ret == GH_ERROR_MSGQUEUE_FULL))
>> + return -EAGAIN;
>> +
>> + /**
>> + * Propagate all other errors to client. If we return error to mailbox
>> + * framework, then no other messages can be sent and nobody will know
>> + * to retry this message.
>> + */
>> + msgq->last_status = gh_remap_error(ret);
>> +
>> + /**
>> + * This message was successfully sent, but message queue isn't ready to
>> + * receive more messages because it's now full. Mailbox framework
>> + * requires that we only report that message was transmitted only when
>> + * we're ready to transmit another message. We'll get that in the form
>> + * of tx IRQ once the other side starts to drain the msgq.
>> + */
>> + if (ret == GH_ERROR_OK && !ready)
>> + return 0;
>> +
>> + /**
>> + * We can send more messages. Mailbox framework requires that tx done
>> + * happens asynchronously to sending the message. Gunyah message queues
>> + * tell us right away on the hypercall return whether we can send more
>> + * messages. To work around this, defer the txdone to a tasklet.
>> + */
>> + tasklet_schedule(&msgq->txdone_tasklet);
>> +
>
> Nice comments.
>
> irq_work would be a better choice.
>
>> + return 0;
>> +}
>> +
>> +struct mbox_chan_ops gh_msgq_ops = {
>> + .send_data = gh_msgq_send_data,
>> +};
>> +
>> +/**
>> + * gh_msgq_init() - Initialize a Gunyah message queue with an mbox_client
>> + * @parent: optional, device parent used for the mailbox controller
>> + * @msgq: Pointer to the gh_msgq to initialize
>> + * @cl: A mailbox client to bind to the mailbox channel that the message queue creates
>> + * @tx_ghrsc: optional, the transmission side of the message queue
>> + * @rx_ghrsc: optional, the receiving side of the message queue
>> + *
>> + * At least one of tx_ghrsc and rx_ghrsc should be not NULL. Most message queue use cases come with
>> + * a pair of message queues to facilitiate bidirectional communication. When tx_ghrsc is set,
>> + * the client can send messages with mbox_send_message(gh_msgq_chan(msgq), msg). When rx_ghrsc
>> + * is set, the mbox_client should register an .rx_callback() and the message queue driver will
>> + * push all available messages upon receiving the RX ready interrupt. The messages should be
>> + * consumed or copied by the client right away as the gh_msgq_rx_data will be replaced/destroyed
>> + * after the callback.
>> + *
>> + * Returns - 0 on success, negative otherwise
>> + */
>> +int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client *cl,
>> + struct gunyah_resource *tx_ghrsc, struct gunyah_resource *rx_ghrsc)
>> +{
>> + int ret;
>> +
>> + /* Must have at least a tx_ghrsc or rx_ghrsc and that they are the right device types */
>> + if ((!tx_ghrsc && !rx_ghrsc) ||
>> + (tx_ghrsc && tx_ghrsc->type != GUNYAH_RESOURCE_TYPE_MSGQ_TX) ||
>> + (rx_ghrsc && rx_ghrsc->type != GUNYAH_RESOURCE_TYPE_MSGQ_RX))
>> + return -EINVAL;
>> +
>> + msgq->tx_ghrsc = tx_ghrsc;
>> + msgq->rx_ghrsc = rx_ghrsc;
>> +
>> + msgq->mbox.dev = parent;
>> + msgq->mbox.ops = &gh_msgq_ops;
>> + msgq->mbox.chans = kcalloc(1, sizeof(*msgq->mbox.chans), GFP_KERNEL);
>
> Error handling missing.
>
> minor nit pick:
>
> If you initialize num_chans to 1 before, then you can use that as the first
> argument to kcalloc() which makes it more readable since you opted for kcalloc()
> instead of kzalloc() there.
>
>> + msgq->mbox.num_chans = 1;
>> + msgq->mbox.txdone_irq = true;
>> +
>> + if (gh_msgq_has_tx(msgq)) {
>> + ret = request_irq(msgq->tx_ghrsc->irq, gh_msgq_tx_irq_handler, 0, "gh_msgq_tx",
>> + msgq);
>> + if (ret)
>> + goto err_chans;
>> + }
>> +
>> + if (gh_msgq_has_rx(msgq)) {
>> + ret = request_threaded_irq(msgq->rx_ghrsc->irq, NULL, gh_msgq_rx_irq_handler,
>> + IRQF_ONESHOT, "gh_msgq_rx", msgq);
>> + if (ret)
>> + goto err_tx_irq;
>> + }
>> +
>> + tasklet_init(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet, (unsigned long)msgq);
>
> If you wish to use tasklets, use tasklet_setup().
>
>> +
>> + ret = mbox_controller_register(&msgq->mbox);
>> + if (ret)
>> + goto err_rx_irq;
>> +
>> + ret = mbox_bind_client(gh_msgq_chan(msgq), cl);
>> + if (ret)
>> + goto err_mbox;
>> +
>> + return 0;
>> +err_mbox:
>> + mbox_controller_unregister(&msgq->mbox);
>> +err_rx_irq:
>> + if (gh_msgq_has_rx(msgq))
>> + free_irq(msgq->rx_ghrsc->irq, msgq);
>> +err_tx_irq:
>> + if (gh_msgq_has_tx(msgq))
>> + free_irq(msgq->tx_ghrsc->irq, msgq);
>> +err_chans:
>> + kfree(msgq->mbox.chans);
>> + return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(gh_msgq_init);
>> +
>> +void gh_msgq_remove(struct gh_msgq *msgq)
>> +{
>> + if (gh_msgq_has_rx(msgq))
>> + free_irq(msgq->rx_ghrsc->irq, msgq);
>> +
>> + if (gh_msgq_has_tx(msgq))
>> + free_irq(msgq->tx_ghrsc->irq, msgq);
>> +
>> + kfree(msgq->mbox.chans);
>> +}
>> +EXPORT_SYMBOL_GPL(gh_msgq_remove);
>> +
>
> Is gh_msgq_remove() supposed to undo every thing done in gh_msgq_init()?
> ex: mbox controller and channel are not unregistered.
>
Ah thanks for catching, updated this as well as rest of your comments.
Powered by blists - more mailing lists