lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJ8uoz3bMk_0bbtGdEAkbXNHu0c5Zr+-sAUyqk2M84VLE4FtpQ@mail.gmail.com>
Date: Thu, 9 Jan 2025 16:22:16 +0100
From: Magnus Karlsson <magnus.karlsson@...il.com>
To: Stanislav Fomichev <sdf@...ichev.me>
Cc: netdev@...r.kernel.org, davem@...emloft.net, edumazet@...gle.com, 
	kuba@...nel.org, pabeni@...hat.com, linux-kernel@...r.kernel.org, 
	bpf@...r.kernel.org, horms@...nel.org, ast@...nel.org, daniel@...earbox.net, 
	hawk@...nel.org, john.fastabend@...il.com, bjorn@...nel.org, 
	magnus.karlsson@...el.com, maciej.fijalkowski@...el.com, 
	jonathan.lemon@...il.com, jdamato@...tly.com, mkarsten@...terloo.ca
Subject: Re: [PATCH net] xsk: Bring back busy polling support

On Thu, 9 Jan 2025 at 01:35, Stanislav Fomichev <sdf@...ichev.me> wrote:
>
> Commit 86e25f40aa1e ("net: napi: Add napi_config") moved napi->napi_id
> assignment to a later point in time (napi_hash_add_with_id). This breaks
> __xdp_rxq_info_reg which copies napi_id at an earlier time and now
> stores 0 napi_id. It also makes sk_mark_napi_id_once_xdp and
> __sk_mark_napi_id_once useless because they now work against 0 napi_id.
> Since sk_busy_loop requires valid napi_id to busy-poll on, there is no way
> to busy-poll AF_XDP sockets anymore.
>
> Bring back the ability to busy-poll on XSK by resolving socket's napi_id
> at bind time. This relies on relatively recent netif_queue_set_napi,
> but (assume) at this point most popular drivers should have been converted.
> This also removes per-tx/rx cycles which used to check and/or set
> the napi_id value.
>
> Confirmed by running a busy-polling AF_XDP socket
> (github.com/fomichev/xskrtt) on mlx5 and looking at BusyPollRxPackets
> from /proc/net/netstat.

Thanks Stanislav for finding and fixing this. As a bonus, the
resulting code is much nicer too.

I just took a look at the Intel drivers and some of our drivers have
not been converted to use netif_queue_set_napi() yet. Just ice, e1000,
and e1000e use it. But that is on us to fix.

>From the xsk point of view:
Acked-by: Magnus Karlsson <magnus.karlsson@...el.com>

> Fixes: 86e25f40aa1e ("net: napi: Add napi_config")
> Signed-off-by: Stanislav Fomichev <sdf@...ichev.me>
> ---
>  include/net/busy_poll.h    |  8 --------
>  include/net/xdp.h          |  1 -
>  include/net/xdp_sock_drv.h | 14 --------------
>  net/core/xdp.c             |  1 -
>  net/xdp/xsk.c              | 14 +++++++++-----
>  5 files changed, 9 insertions(+), 29 deletions(-)
>
> diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
> index c858270141bc..c39a426ebf52 100644
> --- a/include/net/busy_poll.h
> +++ b/include/net/busy_poll.h
> @@ -174,12 +174,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
>  #endif
>  }
>
> -static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
> -                                           const struct xdp_buff *xdp)
> -{
> -#ifdef CONFIG_NET_RX_BUSY_POLL
> -       __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
> -#endif
> -}
> -
>  #endif /* _LINUX_NET_BUSY_POLL_H */
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index e6770dd40c91..b5b10f2b88e5 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -62,7 +62,6 @@ struct xdp_rxq_info {
>         u32 queue_index;
>         u32 reg_state;
>         struct xdp_mem_info mem;
> -       unsigned int napi_id;
>         u32 frag_size;
>  } ____cacheline_aligned; /* perf critical, avoid false-sharing */
>
> diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
> index 40085afd9160..7a7316d9c0da 100644
> --- a/include/net/xdp_sock_drv.h
> +++ b/include/net/xdp_sock_drv.h
> @@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
>         xp_fill_cb(pool, desc);
>  }
>
> -static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
> -{
> -#ifdef CONFIG_NET_RX_BUSY_POLL
> -       return pool->heads[0].xdp.rxq->napi_id;
> -#else
> -       return 0;
> -#endif
> -}
> -
>  static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
>                                       unsigned long attrs)
>  {
> @@ -306,11 +297,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
>  {
>  }
>
> -static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
> -{
> -       return 0;
> -}
> -
>  static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
>                                       unsigned long attrs)
>  {
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index bcc5551c6424..2315feed94ef 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -186,7 +186,6 @@ int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
>         xdp_rxq_info_init(xdp_rxq);
>         xdp_rxq->dev = dev;
>         xdp_rxq->queue_index = queue_index;
> -       xdp_rxq->napi_id = napi_id;
>         xdp_rxq->frag_size = frag_size;
>
>         xdp_rxq->reg_state = REG_STATE_REGISTERED;
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 3fa70286c846..89d2bef96469 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -322,7 +322,6 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
>                 return -ENOSPC;
>         }
>
> -       sk_mark_napi_id_once_xdp(&xs->sk, xdp);
>         return 0;
>  }
>
> @@ -908,11 +907,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
>         if (unlikely(!xs->tx))
>                 return -ENOBUFS;
>
> -       if (sk_can_busy_loop(sk)) {
> -               if (xs->zc)
> -                       __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
> +       if (sk_can_busy_loop(sk))
>                 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
> -       }
>
>         if (xs->zc && xsk_no_wakeup(sk))
>                 return 0;
> @@ -1298,6 +1294,14 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
>         xs->queue_id = qid;
>         xp_add_xsk(xs->pool, xs);
>
> +       if (xs->zc && qid < dev->real_num_rx_queues) {
> +               struct netdev_rx_queue *rxq;
> +
> +               rxq = __netif_get_rx_queue(dev, qid);
> +               if (rxq->napi)
> +                       __sk_mark_napi_id_once(sk, rxq->napi->napi_id);
> +       }
> +
>  out_unlock:
>         if (err) {
>                 dev_put(dev);
> --
> 2.47.1
>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ