[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210408181935.hrouvsh6hroof4jl@skbuf>
Date: Thu, 8 Apr 2021 21:19:35 +0300
From: Vladimir Oltean <olteanv@...il.com>
To: Lorenzo Bianconi <lorenzo@...nel.org>
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org,
lorenzo.bianconi@...hat.com, davem@...emloft.net, kuba@...nel.org,
ast@...nel.org, daniel@...earbox.net, shayagr@...zon.com,
sameehj@...zon.com, john.fastabend@...il.com, dsahern@...nel.org,
brouer@...hat.com, echaudro@...hat.com, jasowang@...hat.com,
alexander.duyck@...il.com, saeed@...nel.org,
maciej.fijalkowski@...el.com
Subject: Re: [PATCH v8 bpf-next 03/14] net: mvneta: update mb bit before
passing the xdp buffer to eBPF layer
On Thu, Apr 08, 2021 at 02:50:55PM +0200, Lorenzo Bianconi wrote:
> Update multi-buffer bit (mb) in xdp_buff to notify XDP/eBPF layer and
> XDP remote drivers if this is a "non-linear" XDP buffer. Access
> xdp_shared_info only if xdp_buff mb is set.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
> ---
> drivers/net/ethernet/marvell/mvneta.c | 26 ++++++++++++++++++++------
> 1 file changed, 20 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
> index a52e132fd2cf..94e29cce693a 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -2041,12 +2041,16 @@ mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> {
> int i;
>
> + if (likely(!xdp->mb))
> + goto out;
> +
Is there any particular reason for this extra check?
> for (i = 0; i < xdp_sinfo->nr_frags; i++) {
> skb_frag_t *frag = &xdp_sinfo->frags[i];
>
> page_pool_put_full_page(rxq->page_pool,
> xdp_get_frag_page(frag), true);
> }
> +out:
> page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
> sync_len, true);
> }
> @@ -2246,7 +2250,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
> {
> unsigned char *data = page_address(page);
> int data_len = -MVNETA_MH_SIZE, len;
> - struct xdp_shared_info *xdp_sinfo;
> struct net_device *dev = pp->dev;
> enum dma_data_direction dma_dir;
>
> @@ -2270,9 +2273,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
> prefetch(data);
> xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
> data_len, false);
> -
> - xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
> - xdp_sinfo->nr_frags = 0;
> }
>
> static void
> @@ -2307,12 +2307,18 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
> xdp_set_frag_size(frag, data_len);
> xdp_set_frag_page(frag, page);
>
> + if (!xdp->mb) {
> + xdp_sinfo->data_length = *size;
> + xdp->mb = 1;
> + }
> /* last fragment */
> if (len == *size) {
> struct xdp_shared_info *sinfo;
>
> sinfo = xdp_get_shared_info_from_buff(xdp);
> sinfo->nr_frags = xdp_sinfo->nr_frags;
> + sinfo->data_length = xdp_sinfo->data_length;
> +
> memcpy(sinfo->frags, xdp_sinfo->frags,
> sinfo->nr_frags * sizeof(skb_frag_t));
> }
> @@ -2327,11 +2333,15 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> struct xdp_buff *xdp, u32 desc_status)
> {
> struct xdp_shared_info *xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
> - int i, num_frags = xdp_sinfo->nr_frags;
> skb_frag_t frag_list[MAX_SKB_FRAGS];
> + int i, num_frags = 0;
> struct sk_buff *skb;
>
> - memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) * num_frags);
> + if (unlikely(xdp->mb)) {
> + num_frags = xdp_sinfo->nr_frags;
> + memcpy(frag_list, xdp_sinfo->frags,
> + sizeof(skb_frag_t) * num_frags);
> + }
>
> skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
> if (!skb)
> @@ -2343,6 +2353,9 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
> skb_put(skb, xdp->data_end - xdp->data);
> mvneta_rx_csum(pp, desc_status, skb);
>
> + if (likely(!xdp->mb))
> + return skb;
> +
> for (i = 0; i < num_frags; i++) {
> struct page *page = xdp_get_frag_page(&frag_list[i]);
>
> @@ -2404,6 +2417,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
> frame_sz = size - ETH_FCS_LEN;
> desc_status = rx_status;
>
> + xdp_buf.mb = 0;
> mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
> &size, page);
> } else {
> --
> 2.30.2
>
Powered by blists - more mailing lists