[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAMZdPi-a7W5xYTxKQE7a5wQEh1EfsDrvCjupwc25kK-iaJUPTw@mail.gmail.com>
Date: Tue, 3 Nov 2020 10:17:16 +0100
From: Loic Poulain <loic.poulain@...aro.org>
To: Jakub Kicinski <kuba@...nel.org>
Cc: David Miller <davem@...emloft.net>,
Network Development <netdev@...r.kernel.org>,
linux-arm-msm <linux-arm-msm@...r.kernel.org>,
Bhaumik Bhatt <bbhatt@...eaurora.org>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Jeffrey Hugo <jhugo@...eaurora.org>,
Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>,
Hemant Kumar <hemantk@...eaurora.org>
Subject: Re: [PATCH v9 2/2] net: Add mhi-net driver
Hi Jakub,
On Mon, 2 Nov 2020 at 23:40, Jakub Kicinski <kuba@...nel.org> wrote:
>
> On Fri, 30 Oct 2020 11:48:15 +0100 Loic Poulain wrote:
> > This patch adds a new network driver implementing MHI transport for
> > network packets. Packets can be in any format, though QMAP (rmnet)
> > is the usual protocol (flow control + PDN mux).
> >
> > It support two MHI devices, IP_HW0 which is, the path to the IPA
> > (IP accelerator) on qcom modem, And IP_SW0 which is the software
> > driven IP path (to modem CPU).
> >
> > Signed-off-by: Loic Poulain <loic.poulain@...aro.org>
> > Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>
>
> > +static int mhi_ndo_stop(struct net_device *ndev)
> > +{
> > + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
> > +
> > + netif_stop_queue(ndev);
> > + netif_carrier_off(ndev);
> > + cancel_delayed_work_sync(&mhi_netdev->rx_refill);
>
> Where do you free the allocated skbs? Does
> mhi_unprepare_from_transfer() do that?
When a buffer is queued, it is owned by the device until the transfer
callback (ul_cb/dl_cb) is called. mhi_unprepare_from_transfer() causes
the MHI channels to be reset which in turn leads to releasing the
buffers, for each buffer the MHI core will call the mhi-net transfer
callback with -ENOTCONN status, and we free it from here.
>
> The skbs should be freed somehow in .ndo_stop().
The skbs are released in remove() (mhi_unprepare_from_transfer), I do
not do prepare/unprepare in ndo_open/ndo_stop because we need to have
channels started during the whole life of the interface. That's
because it set up kind of internal routing of on the device/modem
side. Indeed, if channels are not started, configuration of the modem
(via out-of-band qmi, at commands, etc) is not possible.
>
> > + return 0;
> > +}
> > +
> > +static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
> > +{
> > + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
> > + struct mhi_device *mdev = mhi_netdev->mdev;
> > + int err;
> > +
> > + err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
> > + if (unlikely(err)) {
> > + net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
> > + ndev->name, err);
> > +
> > + u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
> > + u64_stats_inc(&mhi_netdev->stats.tx_dropped);
> > + u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
> > +
> > + /* drop the packet */
> > + kfree_skb(skb);
>
> dev_kfree_skb_any()
>
> > + }
> > +
> > + if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
> > + netif_stop_queue(ndev);
> > +
> > + return NETDEV_TX_OK;
> > +}
>
> > +static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
> > + struct mhi_result *mhi_res)
> > +{
> > + struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
> > + struct sk_buff *skb = mhi_res->buf_addr;
> > + int remaining;
> > +
> > + remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
> > +
> > + if (unlikely(mhi_res->transaction_status)) {
> > + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > + u64_stats_inc(&mhi_netdev->stats.rx_errors);
> > + u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> > +
> > + kfree_skb(skb);
>
> Are you sure this never runs with irqs disabled or from irq context?
>
> Otherwise dev_kfree_skb_any().
Yes will fix that.
>
> > +
> > + /* MHI layer resetting the DL channel */
> > + if (mhi_res->transaction_status == -ENOTCONN)
> > + return;
> > + } else {
> > + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
> > + u64_stats_inc(&mhi_netdev->stats.rx_packets);
> > + u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
> > + u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
> > +
> > + skb->protocol = htons(ETH_P_MAP);
> > + skb_put(skb, mhi_res->bytes_xferd);
> > + netif_rx(skb);
> > + }
> > +
> > + /* Refill if RX buffers queue becomes low */
> > + if (remaining <= mhi_netdev->rx_queue_sz / 2)
> > + schedule_delayed_work(&mhi_netdev->rx_refill, 0);
> > +}
> > +
> > +static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
> > + struct mhi_result *mhi_res)
> > +{
> > + struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
> > + struct net_device *ndev = mhi_netdev->ndev;
> > + struct sk_buff *skb = mhi_res->buf_addr;
> > +
> > + /* Hardware has consumed the buffer, so free the skb (which is not
> > + * freed by the MHI stack) and perform accounting.
> > + */
> > + consume_skb(skb);
>
> ditto
>
> > + u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
> > + if (unlikely(mhi_res->transaction_status)) {
> > + u64_stats_inc(&mhi_netdev->stats.tx_errors);
> > +
> > + /* MHI layer resetting the UL channel */
> > + if (mhi_res->transaction_status == -ENOTCONN)
> > + return;
>
> u64_stats_update_end()
>
> > + } else {
> > + u64_stats_inc(&mhi_netdev->stats.tx_packets);
> > + u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
> > + }
> > + u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
> > +
> > + if (netif_queue_stopped(ndev))
> > + netif_wake_queue(ndev);
> > +}
> > +
> > +static void mhi_net_rx_refill_work(struct work_struct *work)
> > +{
> > + struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
> > + rx_refill.work);
> > + struct net_device *ndev = mhi_netdev->ndev;
> > + struct mhi_device *mdev = mhi_netdev->mdev;
> > + int size = READ_ONCE(ndev->mtu);
> > + struct sk_buff *skb;
> > + int err;
> > +
> > + do {
>
> should this be a while(), not a do {} while() loop now?
>
> > + skb = netdev_alloc_skb(ndev, size);
> > + if (unlikely(!skb))
> > + break;
> > +
> > + err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
> > + if (unlikely(err)) {
> > + net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
> > + ndev->name, err);
> > + kfree_skb(skb);
> > + break;
> > + }
> > +
> > + /* Do not hog the CPU if rx buffers are consumed faster than
> > + * queued (unlikely).
> > + */
> > + cond_resched();
> > + } while (atomic_inc_return(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz);
> > +
> > + /* If we're still starved of rx buffers, reschedule later */
> > + if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
> > + schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
> > +}
Powered by blists - more mailing lists