[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZxJZEn43W4y8EwsD@boxer>
Date: Fri, 18 Oct 2024 14:48:18 +0200
From: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
To: Alexander Lobakin <aleksander.lobakin@...el.com>
CC: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, Toke Høiland-Jørgensen
<toke@...hat.com>, Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann
<daniel@...earbox.net>, John Fastabend <john.fastabend@...il.com>, "Andrii
Nakryiko" <andrii@...nel.org>, Stanislav Fomichev <sdf@...ichev.me>, "Magnus
Karlsson" <magnus.karlsson@...el.com>,
<nex.sw.ncis.osdt.itp.upstreaming@...el.com>, <bpf@...r.kernel.org>,
<netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH net-next v2 15/18] xsk: add generic XSk &xdp_buff -> skb
conversion
On Tue, Oct 15, 2024 at 04:53:47PM +0200, Alexander Lobakin wrote:
> Same as with converting &xdp_buff to skb on Rx, the code which allocates
> a new skb and copies the XSk frame there is identical across the
> drivers, so make it generic. This includes copying all the frags if they
> are present in the original buff.
> System percpu Page Pools help here a lot: when available, allocate pages
> from there instead of the MM layer. This greatly improves XDP_PASS
> performance on XSk: instead of page_alloc() + page_free(), the net core
> recycles the same pages, so the only overhead left is memcpy()s.
> Note that the passed buff gets freed if the conversion is done w/o any
> error, assuming you don't need this buffer after you convert it to an
> skb.
AFAICT looks good.
Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
I have to switch the context now so I'll finish reviewing the remainder of
this set on monday.
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@...el.com>
> ---
> include/net/xdp.h | 1 +
> net/core/xdp.c | 138 ++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 139 insertions(+)
>
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index 83e3f4648caa..69728b2d75d5 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -331,6 +331,7 @@ void xdp_warn(const char *msg, const char *func, const int line);
> #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
>
> struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp);
> +struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp);
> struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
> struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
> struct sk_buff *skb,
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index 371c26c203b2..116153b88d26 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -22,6 +22,8 @@
> #include <trace/events/xdp.h>
> #include <net/xdp_sock_drv.h>
>
> +#include "dev.h"
> +
> #define REG_STATE_NEW 0x0
> #define REG_STATE_REGISTERED 0x1
> #define REG_STATE_UNREGISTERED 0x2
> @@ -682,6 +684,142 @@ struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp)
> }
> EXPORT_SYMBOL_GPL(xdp_build_skb_from_buff);
>
> +/**
> + * xdp_copy_frags_from_zc - copy the frags from an XSk buff to an skb
> + * @skb: skb to copy frags to
> + * @xdp: XSk &xdp_buff from which the frags will be copied
> + * @pp: &page_pool backing page allocation, if available
> + *
> + * Copy all frags from an XSk &xdp_buff to an skb to pass it up the stack.
> + * Allocate a new page / page frag for each frag, copy it and attach to
> + * the skb.
> + *
> + * Return: true on success, false on page allocation fail.
> + */
> +static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
> + const struct xdp_buff *xdp,
> + struct page_pool *pp)
> +{
> + const struct skb_shared_info *xinfo;
> + struct skb_shared_info *sinfo;
> + u32 nr_frags, ts;
> +
> + xinfo = xdp_get_shared_info_from_buff(xdp);
> + nr_frags = xinfo->nr_frags;
> + sinfo = skb_shinfo(skb);
> +
> +#if IS_ENABLED(CONFIG_PAGE_POOL)
> + ts = 0;
> +#else
> + ts = xinfo->xdp_frags_truesize ? : nr_frags * xdp->frame_sz;
> +#endif
> +
> + for (u32 i = 0; i < nr_frags; i++) {
> + u32 len = skb_frag_size(&xinfo->frags[i]);
> + void *data;
> +#if IS_ENABLED(CONFIG_PAGE_POOL)
> + u32 truesize = len;
> +
> + data = page_pool_dev_alloc_va(pp, &truesize);
> + ts += truesize;
> +#else
> + data = napi_alloc_frag(len);
> +#endif
> + if (unlikely(!data))
> + return false;
> +
> + memcpy(data, skb_frag_address(&xinfo->frags[i]),
> + LARGEST_ALIGN(len));
> + __skb_fill_page_desc(skb, sinfo->nr_frags++,
> + virt_to_page(data),
> + offset_in_page(data), len);
> + }
> +
> + xdp_update_skb_shared_info(skb, nr_frags, xinfo->xdp_frags_size,
> + ts, false);
> +
> + return true;
> +}
> +
> +/**
> + * xdp_build_skb_from_zc - create an skb from an XSk &xdp_buff
> + * @xdp: source XSk buff
> + *
> + * Similar to xdp_build_skb_from_buff(), but for XSk frames. Allocate an skb
> + * head, new page for the head, copy the data and initialize the skb fields.
> + * If there are frags, allocate new pages for them and copy.
> + * If Page Pool is available, the function allocates memory from the system
> + * percpu pools to try recycling the pages, otherwise it uses the NAPI page
> + * frag caches.
> + * If new skb was built successfully, @xdp is returned to XSk pool's freelist.
> + * On error, it remains untouched and the caller must take care of this.
> + *
> + * Return: new &sk_buff on success, %NULL on error.
> + */
> +struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp)
> +{
> + const struct xdp_rxq_info *rxq = xdp->rxq;
> + u32 len = xdp->data_end - xdp->data_meta;
> + struct page_pool *pp;
> + struct sk_buff *skb;
> + int metalen;
> +#if IS_ENABLED(CONFIG_PAGE_POOL)
> + u32 truesize;
> + void *data;
> +
> + pp = this_cpu_read(system_page_pool);
> + truesize = xdp->frame_sz;
> +
> + data = page_pool_dev_alloc_va(pp, &truesize);
> + if (unlikely(!data))
> + return NULL;
> +
> + skb = napi_build_skb(data, truesize);
> + if (unlikely(!skb)) {
> + page_pool_free_va(pp, data, true);
> + return NULL;
> + }
> +
> + skb_mark_for_recycle(skb);
> + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> +#else /* !CONFIG_PAGE_POOL */
> + struct napi_struct *napi;
> +
> + pp = NULL;
> + napi = napi_by_id(rxq->napi_id);
> + if (likely(napi))
> + skb = napi_alloc_skb(napi, len);
> + else
> + skb = __netdev_alloc_skb_ip_align(rxq->dev, len,
> + GFP_ATOMIC | __GFP_NOWARN);
> + if (unlikely(!skb))
> + return NULL;
> +#endif /* !CONFIG_PAGE_POOL */
> +
> + memcpy(__skb_put(skb, len), xdp->data_meta, LARGEST_ALIGN(len));
> +
> + metalen = xdp->data - xdp->data_meta;
> + if (metalen > 0) {
> + skb_metadata_set(skb, metalen);
> + __skb_pull(skb, metalen);
> + }
> +
> + skb_record_rx_queue(skb, rxq->queue_index);
> +
> + if (unlikely(xdp_buff_has_frags(xdp)) &&
> + unlikely(!xdp_copy_frags_from_zc(skb, xdp, pp))) {
> + napi_consume_skb(skb, true);
> + return NULL;
> + }
> +
> + xsk_buff_free(xdp);
> +
> + skb->protocol = eth_type_trans(skb, rxq->dev);
> +
> + return skb;
> +}
> +EXPORT_SYMBOL_GPL(xdp_build_skb_from_zc);
> +
> struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
> struct sk_buff *skb,
> struct net_device *dev)
> --
> 2.46.2
>
Powered by blists - more mailing lists