[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZxELWQeV7uBVN6YP@boxer>
Date: Thu, 17 Oct 2024 15:04:25 +0200
From: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
To: Alexander Lobakin <aleksander.lobakin@...el.com>
CC: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, Toke Høiland-Jørgensen
<toke@...hat.com>, Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann
<daniel@...earbox.net>, John Fastabend <john.fastabend@...il.com>, "Andrii
Nakryiko" <andrii@...nel.org>, Stanislav Fomichev <sdf@...ichev.me>, "Magnus
Karlsson" <magnus.karlsson@...el.com>,
<nex.sw.ncis.osdt.itp.upstreaming@...el.com>, <bpf@...r.kernel.org>,
<netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH net-next v2 14/18] xsk: make xsk_buff_add_frag really add
a frag via __xdp_buff_add_frag()
On Tue, Oct 15, 2024 at 04:53:46PM +0200, Alexander Lobakin wrote:
> Currently, xsk_buff_add_frag() only adds a frag to the pool linked list,
> not doing anything with the &xdp_buff. The drivers do that manually and
> the logic is the same.
> Make it really add an skb frag, just like xdp_buff_add_frag() does that,
> and freeing frags on error if needed. This allows to remove repeating
> code from i40e and ice and not add the same code again and again.
>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
Me gusta.
> ---
> include/net/xdp_sock_drv.h | 18 ++++++++++--
> drivers/net/ethernet/intel/i40e/i40e_xsk.c | 30 ++------------------
> drivers/net/ethernet/intel/ice/ice_xsk.c | 32 ++--------------------
> 3 files changed, 20 insertions(+), 60 deletions(-)
>
> diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
> index f3175a5d28f7..6aae95b83645 100644
> --- a/include/net/xdp_sock_drv.h
> +++ b/include/net/xdp_sock_drv.h
> @@ -136,11 +136,21 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
> xp_free(xskb);
> }
>
> -static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
> +static inline bool xsk_buff_add_frag(struct xdp_buff *head,
> + struct xdp_buff *xdp)
> {
> - struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
> + const void *data = xdp->data;
> + struct xdp_buff_xsk *frag;
> +
> + if (!__xdp_buff_add_frag(head, virt_to_page(data),
> + offset_in_page(data), xdp->data_end - data,
> + xdp->frame_sz, false))
> + return false;
>
> + frag = container_of(xdp, struct xdp_buff_xsk, xdp);
> list_add_tail(&frag->list_node, &frag->pool->xskb_list);
> +
> + return true;
> }
>
> static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
> @@ -357,8 +367,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
> {
> }
>
> -static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
> +static inline bool xsk_buff_add_frag(struct xdp_buff *head,
> + struct xdp_buff *xdp)
> {
> + return false;
> }
>
> static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
> diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> index 4e885df789ef..e28f1905a4a0 100644
> --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
> @@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
> WARN_ON_ONCE(1);
> }
>
> -static int
> -i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
> - struct xdp_buff *xdp, const unsigned int size)
> -{
> - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
> -
> - if (!xdp_buff_has_frags(first)) {
> - sinfo->nr_frags = 0;
> - sinfo->xdp_frags_size = 0;
> - xdp_buff_set_frags_flag(first);
> - }
> -
> - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
> - xsk_buff_free(first);
> - return -ENOMEM;
> - }
> -
> - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
> - virt_to_page(xdp->data_hard_start),
> - XDP_PACKET_HEADROOM, size);
> - sinfo->xdp_frags_size += size;
> - xsk_buff_add_frag(xdp);
> -
> - return 0;
> -}
> -
> /**
> * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
> * @rx_ring: Rx ring
> @@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
>
> if (!first)
> first = bi;
> - else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
> + else if (!xsk_buff_add_frag(first, bi)) {
> + xsk_buff_free(first);
> break;
> + }
>
> if (++next_to_process == count)
> next_to_process = 0;
> diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
> index 334ae945d640..8975d2971bc3 100644
> --- a/drivers/net/ethernet/intel/ice/ice_xsk.c
> +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
> @@ -801,35 +801,6 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
> return result;
> }
>
> -static int
> -ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
> - struct xdp_buff *xdp, const unsigned int size)
> -{
> - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
> -
> - if (!size)
> - return 0;
> -
> - if (!xdp_buff_has_frags(first)) {
> - sinfo->nr_frags = 0;
> - sinfo->xdp_frags_size = 0;
> - xdp_buff_set_frags_flag(first);
> - }
> -
> - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
> - xsk_buff_free(first);
> - return -ENOMEM;
> - }
> -
> - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
> - virt_to_page(xdp->data_hard_start),
> - XDP_PACKET_HEADROOM, size);
> - sinfo->xdp_frags_size += size;
> - xsk_buff_add_frag(xdp);
> -
> - return 0;
> -}
> -
> /**
> * ice_clean_rx_irq_zc - consumes packets from the hardware ring
> * @rx_ring: AF_XDP Rx ring
> @@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
>
> if (!first) {
> first = xdp;
> - } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
> + } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
> + xsk_buff_free(first);
> break;
> }
>
> --
> 2.46.2
>
Powered by blists - more mailing lists