[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220405070407.445948031@linuxfoundation.org>
Date: Tue, 5 Apr 2022 09:22:36 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Maciej Fijalkowski <maciej.fijalkowski@...el.com>,
Alexander Lobakin <alexandr.lobakin@...el.com>,
Sandeep Penigalapati <sandeep.penigalapati@...el.com>,
Tony Nguyen <anthony.l.nguyen@...el.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.16 0443/1017] ixgbe: pass bi->xdp to ixgbe_construct_skb_zc() directly
From: Alexander Lobakin <alexandr.lobakin@...el.com>
[ Upstream commit 1fbdaa13386804a31eefd3db3c5fe00e80ce9bc3 ]
To not dereference bi->xdp each time in ixgbe_construct_skb_zc(),
pass bi->xdp as an argument instead of bi. We can also call
xsk_buff_free() outside of the function as well as assign bi->xdp
to NULL, which seems to make it closer to its name.
Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@...el.com>
Signed-off-by: Alexander Lobakin <alexandr.lobakin@...el.com>
Tested-by: Sandeep Penigalapati <sandeep.penigalapati@...el.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@...el.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 666ff2c07ab9..ab96d7ce1aa0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -207,26 +207,24 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
}
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
+ const struct xdp_buff *xdp)
{
- unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
- unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- bi->xdp->data_end - bi->xdp->data_hard_start,
+ xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- xsk_buff_free(bi->xdp);
- bi->xdp = NULL;
return skb;
}
@@ -317,12 +315,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
}
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+
cleaned_count++;
ixgbe_inc_ntc(rx_ring);
--
2.34.1
Powered by blists - more mailing lists