[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <willemdebruijn.kernel.243baccfedc16@gmail.com>
Date: Tue, 02 Sep 2025 17:20:58 -0400
From: Willem de Bruijn <willemdebruijn.kernel@...il.com>
To: Simon Schippers <simon.schippers@...dortmund.de>,
willemdebruijn.kernel@...il.com,
jasowang@...hat.com,
mst@...hat.com,
eperezma@...hat.com,
stephen@...workplumber.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux.dev,
kvm@...r.kernel.org
Cc: Simon Schippers <simon.schippers@...dortmund.de>,
Tim Gebauer <tim.gebauer@...dortmund.de>
Subject: Re: [PATCH 2/4] netdev queue flow control for TUN
Simon Schippers wrote:
> The netdev queue is stopped in tun_net_xmit after inserting an SKB into
> the ring buffer if the ring buffer became full because of that. If the
> insertion into the ptr_ring fails, the netdev queue is also stopped and
> the SKB is dropped. However, this never happened in my testing.
Indeed, since the last successful insertion will always pause the
queue before this can happen. Since this cannot be reached, no need
to add the code defensively. If in doubt, maybe add a
NET_DEBUG_WARN_ON_ONCE.
> To ensure
> that the ptr_ring change is available to the consumer before the netdev
> queue stop, an smp_wmb() is used.
>
> Then in tun_ring_recv, the new helper wake_netdev_queue is called in the
> blocking wait queue and after consuming an SKB from the ptr_ring. This
> helper first checks if the netdev queue has stopped. Then with the paired
> smp_rmb() it is known that tun_net_xmit will not produce SKBs anymore.
> With that knowledge, the helper can then wake the netdev queue if there is
> at least a single spare slot in the ptr_ring by calling ptr_ring_spare
> with cnt=1.
>
> Co-developed-by: Tim Gebauer <tim.gebauer@...dortmund.de>
> Signed-off-by: Tim Gebauer <tim.gebauer@...dortmund.de>
> Signed-off-by: Simon Schippers <simon.schippers@...dortmund.de>
> ---
> drivers/net/tun.c | 33 ++++++++++++++++++++++++++++++---
> 1 file changed, 30 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index cc6c50180663..735498e221d8 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -1060,13 +1060,21 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
>
> nf_reset_ct(skb);
>
> - if (ptr_ring_produce(&tfile->tx_ring, skb)) {
> + queue = netdev_get_tx_queue(dev, txq);
> + if (unlikely(ptr_ring_produce(&tfile->tx_ring, skb))) {
> + /* Paired with smp_rmb() in wake_netdev_queue. */
> + smp_wmb();
> + netif_tx_stop_queue(queue);
> drop_reason = SKB_DROP_REASON_FULL_RING;
> goto drop;
> }
> + if (ptr_ring_full(&tfile->tx_ring)) {
> + /* Paired with smp_rmb() in wake_netdev_queue. */
> + smp_wmb();
> + netif_tx_stop_queue(queue);
> + }
>
> /* dev->lltx requires to do our own update of trans_start */
> - queue = netdev_get_tx_queue(dev, txq);
> txq_trans_cond_update(queue);
>
> /* Notify and wake up reader process */
> @@ -2110,6 +2118,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> return total;
> }
>
> +static inline void wake_netdev_queue(struct tun_file *tfile)
> +{
> + struct netdev_queue *txq;
> + struct net_device *dev;
> +
> + rcu_read_lock();
> + dev = rcu_dereference(tfile->tun)->dev;
> + txq = netdev_get_tx_queue(dev, tfile->queue_index);
> +
> + if (netif_tx_queue_stopped(txq)) {
> + /* Paired with smp_wmb() in tun_net_xmit. */
> + smp_rmb();
> + if (ptr_ring_spare(&tfile->tx_ring, 1))
> + netif_tx_wake_queue(txq);
> + }
> + rcu_read_unlock();
> +}
> +
> static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
> {
> DECLARE_WAITQUEUE(wait, current);
> @@ -2139,7 +2165,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
> error = -EFAULT;
> break;
> }
> -
> + wake_netdev_queue(tfile);
Why wake when no entry was consumed?
Also keep the empty line.
> schedule();
> }
>
> @@ -2147,6 +2173,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
> remove_wait_queue(&tfile->socket.wq.wait, &wait);
>
> out:
> + wake_netdev_queue(tfile);
> *err = error;
> return ptr;
> }
> --
> 2.43.0
>
Powered by blists - more mailing lists