[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241011-b4-igb_zero_copy-v8-4-83862f726a9e@linutronix.de>
Date: Fri, 11 Oct 2024 11:01:02 +0200
From: Kurt Kanzenbach <kurt@...utronix.de>
To: Tony Nguyen <anthony.l.nguyen@...el.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>
Cc: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Richard Cochran <richardcochran@...il.com>,
Sriram Yagnaraman <sriram.yagnaraman@...csson.com>,
Benjamin Steinke <benjamin.steinke@...s-audio.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Maciej Fijalkowski <maciej.fijalkowski@...el.com>,
intel-wired-lan@...ts.osuosl.org, netdev@...r.kernel.org,
bpf@...r.kernel.org, Kurt Kanzenbach <kurt@...utronix.de>
Subject: [PATCH iwl-next v8 4/6] igb: Add XDP finalize and stats update
functions
Move XDP finalize and Rx statistics update into separate functions. This
way, they can be reused by the XDP and XDP/ZC code later.
Signed-off-by: Kurt Kanzenbach <kurt@...utronix.de>
---
drivers/net/ethernet/intel/igb/igb.h | 3 ++
drivers/net/ethernet/intel/igb/igb_main.c | 54 ++++++++++++++++++++-----------
2 files changed, 38 insertions(+), 19 deletions(-)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c30d6f9708f8..1e65b41a48d8 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -740,6 +740,9 @@ void igb_clean_tx_ring(struct igb_ring *tx_ring);
void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 341b83e39019..4d3aed6cd848 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8852,6 +8852,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -8859,9 +8891,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_ring *rx_ring = q_vector->rx.ring;
u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
- int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
- struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
@@ -8983,24 +9013,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- nq = txring_txq(tx_ring);
- __netif_tx_lock(nq, cpu);
- igb_xdp_ring_update_tail(tx_ring);
- __netif_tx_unlock(nq);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
--
2.39.5
Powered by blists - more mailing lists