[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aEajxQxP_aWhqHHB@82bae11342dd>
Date: Mon, 9 Jun 2025 09:05:09 +0000
From: Subbaraya Sundeep <sbhatta@...vell.com>
To: Jun Miao <jun.miao@...el.com>
CC: <oneukum@...e.com>, <andrew+netdev@...n.ch>, <davem@...emloft.net>,
<edumazet@...gle.com>, <kuba@...nel.org>, <netdev@...r.kernel.org>,
<linux-usb@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] net: usb: Convert tasklet API to new bottom half
workqueue mechanism
Hi,
On 2025-06-09 at 07:26:10, Jun Miao (jun.miao@...el.com) wrote:
> Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> replaces all occurrences of tasklet usage with the appropriate workqueue
> APIs throughout the usbnet driver. This transition ensures compatibility
> with the latest design and enhances performance.
>
> Signed-off-by: Jun Miao <jun.miao@...el.com>
> ---
> drivers/net/usb/usbnet.c | 36 ++++++++++++++++++------------------
> include/linux/usb/usbnet.h | 2 +-
> 2 files changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
> index c04e715a4c2a..566127b4e0ba 100644
> --- a/drivers/net/usb/usbnet.c
> +++ b/drivers/net/usb/usbnet.c
> @@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
>
> __skb_queue_tail(&dev->done, skb);
> if (dev->done.qlen == 1)
> - tasklet_schedule(&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> spin_unlock(&dev->done.lock);
> spin_unlock_irqrestore(&list->lock, flags);
> return old_state;
> @@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
> default:
> netif_dbg(dev, rx_err, dev->net,
> "rx submit, %d\n", retval);
> - tasklet_schedule (&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> break;
> case 0:
> __usbnet_queue_skb(&dev->rxq, skb, rx_start);
> @@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
> num++;
> }
>
> - tasklet_schedule(&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
>
> netif_dbg(dev, rx_status, dev->net,
> "paused rx queue disabled, %d skbs requeued\n", num);
> @@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
> {
> if (netif_running(dev->net)) {
> (void) unlink_urbs (dev, &dev->rxq);
> - tasklet_schedule(&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> }
> }
> EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
> @@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
> /* deferred work (timer, softirq, task) must also stop */
> dev->flags = 0;
> timer_delete_sync(&dev->delay);
> - tasklet_kill(&dev->bh);
> + disable_work_sync(&dev->bh_work);
> cancel_work_sync(&dev->kevent);
>
> /* We have cyclic dependencies. Those calls are needed
> * to break a cycle. We cannot fall into the gaps because
> * we have a flag
> */
> - tasklet_kill(&dev->bh);
> + disable_work_sync(&dev->bh_work);
> timer_delete_sync(&dev->delay);
> cancel_work_sync(&dev->kevent);
>
> @@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
> clear_bit(EVENT_RX_KILL, &dev->flags);
>
> // delay posting reads until we're fully open
> - tasklet_schedule (&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> if (info->manage_power) {
> retval = info->manage_power(dev, 1);
> if (retval < 0) {
> @@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
> */
> } else {
> /* submitting URBs for reading packets */
> - tasklet_schedule(&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> }
>
> /* hard_mtu or rx_urb_size may change during link change */
> @@ -1198,11 +1198,11 @@ usbnet_deferred_kevent (struct work_struct *work)
> } else {
> clear_bit (EVENT_RX_HALT, &dev->flags);
> if (!usbnet_going_away(dev))
> - tasklet_schedule(&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> }
> }
>
> - /* tasklet could resubmit itself forever if memory is tight */
> + /* workqueue could resubmit itself forever if memory is tight */
> if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
> struct urb *urb = NULL;
> int resched = 1;
> @@ -1224,7 +1224,7 @@ usbnet_deferred_kevent (struct work_struct *work)
> fail_lowmem:
> if (resched)
> if (!usbnet_going_away(dev))
> - tasklet_schedule(&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> }
> }
>
> @@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
> struct usbnet *dev = netdev_priv(net);
>
> unlink_urbs (dev, &dev->txq);
> - tasklet_schedule (&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> /* this needs to be handled individually because the generic layer
> * doesn't know what is sufficient and could not restore private
> * information if a remedy of an unconditional reset were used.
> @@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
>
> /*-------------------------------------------------------------------------*/
>
> -// tasklet (work deferred from completions, in_irq) or timer
> +// workqueue (work deferred from completions, in_irq) or timer
>
> static void usbnet_bh (struct timer_list *t)
> {
> @@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t)
> "rxqlen %d --> %d\n",
> temp, dev->rxq.qlen);
> if (dev->rxq.qlen < RX_QLEN(dev))
> - tasklet_schedule (&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
Correct me if am wrong.
Just above this code there is - if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK).
You can change it to GFP_KERNEL since this is not atomic context now.
Thanks,
Sundeep
> }
> if (dev->txq.qlen < TX_QLEN (dev))
> netif_wake_queue (dev->net);
> }
> }
>
> -static void usbnet_bh_tasklet(struct tasklet_struct *t)
> +static void usbnet_bh_workqueue(struct work_struct *work)
> {
> - struct usbnet *dev = from_tasklet(dev, t, bh);
> + struct usbnet *dev = from_work(dev, work, bh_work);
>
> usbnet_bh(&dev->delay);
> }
> @@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
> skb_queue_head_init (&dev->txq);
> skb_queue_head_init (&dev->done);
> skb_queue_head_init(&dev->rxq_pause);
> - tasklet_setup(&dev->bh, usbnet_bh_tasklet);
> + INIT_WORK (&dev->bh_work, usbnet_bh_workqueue);
> INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
> init_usb_anchor(&dev->deferred);
> timer_setup(&dev->delay, usbnet_bh, 0);
> @@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
>
> if (!(dev->txq.qlen >= TX_QLEN(dev)))
> netif_tx_wake_all_queues(dev->net);
> - tasklet_schedule (&dev->bh);
> + queue_work(system_bh_wq, &dev->bh_work);
> }
> }
>
> diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
> index 0b9f1e598e3a..208682f77179 100644
> --- a/include/linux/usb/usbnet.h
> +++ b/include/linux/usb/usbnet.h
> @@ -58,7 +58,7 @@ struct usbnet {
> unsigned interrupt_count;
> struct mutex interrupt_mutex;
> struct usb_anchor deferred;
> - struct tasklet_struct bh;
> + struct work_struct bh_work;
>
> struct work_struct kevent;
> unsigned long flags;
> --
> 2.43.0
>
Powered by blists - more mailing lists