[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210408183038.yacxn575nl7omcol@skbuf>
Date: Thu, 8 Apr 2021 21:30:38 +0300
From: Vladimir Oltean <olteanv@...il.com>
To: Lorenzo Bianconi <lorenzo@...nel.org>
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org,
lorenzo.bianconi@...hat.com, davem@...emloft.net, kuba@...nel.org,
ast@...nel.org, daniel@...earbox.net, shayagr@...zon.com,
sameehj@...zon.com, john.fastabend@...il.com, dsahern@...nel.org,
brouer@...hat.com, echaudro@...hat.com, jasowang@...hat.com,
alexander.duyck@...il.com, saeed@...nel.org,
maciej.fijalkowski@...el.com
Subject: Re: [PATCH v8 bpf-next 04/14] xdp: add multi-buff support to
xdp_return_{buff/frame}
On Thu, Apr 08, 2021 at 02:50:56PM +0200, Lorenzo Bianconi wrote:
> Take into account if the received xdp_buff/xdp_frame is non-linear
> recycling/returning the frame memory to the allocator or into
> xdp_frame_bulk.
> Introduce xdp_return_num_frags_from_buff to return a given number of
> fragments from a xdp multi-buff starting from the tail.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
> ---
> include/net/xdp.h | 19 ++++++++++--
> net/core/xdp.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 92 insertions(+), 3 deletions(-)
>
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index 02aea7696d15..c8eb7cf4ebed 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -289,6 +289,7 @@ void xdp_return_buff(struct xdp_buff *xdp);
> void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
> void xdp_return_frame_bulk(struct xdp_frame *xdpf,
> struct xdp_frame_bulk *bq);
> +void xdp_return_num_frags_from_buff(struct xdp_buff *xdp, u16 num_frags);
>
> /* When sending xdp_frame into the network stack, then there is no
> * return point callback, which is needed to release e.g. DMA-mapping
> @@ -299,10 +300,24 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
> static inline void xdp_release_frame(struct xdp_frame *xdpf)
> {
> struct xdp_mem_info *mem = &xdpf->mem;
> + struct xdp_shared_info *xdp_sinfo;
> + int i;
>
> /* Curr only page_pool needs this */
> - if (mem->type == MEM_TYPE_PAGE_POOL)
> - __xdp_release_frame(xdpf->data, mem);
> + if (mem->type != MEM_TYPE_PAGE_POOL)
> + return;
> +
> + if (likely(!xdpf->mb))
> + goto out;
> +
> + xdp_sinfo = xdp_get_shared_info_from_frame(xdpf);
> + for (i = 0; i < xdp_sinfo->nr_frags; i++) {
> + struct page *page = xdp_get_frag_page(&xdp_sinfo->frags[i]);
> +
> + __xdp_release_frame(page_address(page), mem);
> + }
> +out:
> + __xdp_release_frame(xdpf->data, mem);
> }
>
> int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index 05354976c1fc..430f516259d9 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -374,12 +374,38 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
>
> void xdp_return_frame(struct xdp_frame *xdpf)
> {
> + struct xdp_shared_info *xdp_sinfo;
> + int i;
> +
> + if (likely(!xdpf->mb))
> + goto out;
> +
> + xdp_sinfo = xdp_get_shared_info_from_frame(xdpf);
> + for (i = 0; i < xdp_sinfo->nr_frags; i++) {
> + struct page *page = xdp_get_frag_page(&xdp_sinfo->frags[i]);
> +
> + __xdp_return(page_address(page), &xdpf->mem, false, NULL);
> + }
> +out:
> __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
> }
> EXPORT_SYMBOL_GPL(xdp_return_frame);
>
> void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
> {
> + struct xdp_shared_info *xdp_sinfo;
> + int i;
> +
> + if (likely(!xdpf->mb))
> + goto out;
> +
> + xdp_sinfo = xdp_get_shared_info_from_frame(xdpf);
> + for (i = 0; i < xdp_sinfo->nr_frags; i++) {
> + struct page *page = xdp_get_frag_page(&xdp_sinfo->frags[i]);
> +
> + __xdp_return(page_address(page), &xdpf->mem, true, NULL);
> + }
> +out:
> __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
> }
> EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
> @@ -415,7 +441,7 @@ void xdp_return_frame_bulk(struct xdp_frame *xdpf,
> struct xdp_mem_allocator *xa;
>
> if (mem->type != MEM_TYPE_PAGE_POOL) {
> - __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
> + xdp_return_frame(xdpf);
> return;
> }
>
> @@ -434,15 +460,63 @@ void xdp_return_frame_bulk(struct xdp_frame *xdpf,
> bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
> }
>
> + if (unlikely(xdpf->mb)) {
> + struct xdp_shared_info *xdp_sinfo;
> + int i;
> +
> + xdp_sinfo = xdp_get_shared_info_from_frame(xdpf);
> + for (i = 0; i < xdp_sinfo->nr_frags; i++) {
> + skb_frag_t *frag = &xdp_sinfo->frags[i];
> +
> + bq->q[bq->count++] = xdp_get_frag_address(frag);
> + if (bq->count == XDP_BULK_QUEUE_SIZE)
> + xdp_flush_frame_bulk(bq);
> + }
> + }
> bq->q[bq->count++] = xdpf->data;
> }
> EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
>
> void xdp_return_buff(struct xdp_buff *xdp)
> {
> + struct xdp_shared_info *xdp_sinfo;
> + int i;
> +
> + if (likely(!xdp->mb))
> + goto out;
> +
> + xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
> + for (i = 0; i < xdp_sinfo->nr_frags; i++) {
> + struct page *page = xdp_get_frag_page(&xdp_sinfo->frags[i]);
> +
> + __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
> + }
> +out:
> __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
> }
>
> +void xdp_return_num_frags_from_buff(struct xdp_buff *xdp, u16 num_frags)
> +{
> + struct xdp_shared_info *xdp_sinfo;
> + int i;
> +
> + if (unlikely(!xdp->mb))
> + return;
> +
> + xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
> + num_frags = min_t(u16, num_frags, xdp_sinfo->nr_frags);
> + for (i = 1; i <= num_frags; i++) {
> + skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags - i];
> + struct page *page = xdp_get_frag_page(frag);
> +
> + xdp_sinfo->data_length -= xdp_get_frag_size(frag);
> + __xdp_return(page_address(page), &xdp->rxq->mem, false, NULL);
> + }
> + xdp_sinfo->nr_frags -= num_frags;
> + xdp->mb = !!xdp_sinfo->nr_frags;
> +}
> +EXPORT_SYMBOL_GPL(xdp_return_num_frags_from_buff);
> +
> /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
> void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
> {
None of this really benefits in any way from having the extra "mb" bit,
does it? I get the impression it would work just the same way without it.
Powered by blists - more mailing lists