[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250210-igb_irq-v1-3-bde078cdb9df@linutronix.de>
Date: Mon, 10 Feb 2025 10:19:37 +0100
From: Kurt Kanzenbach <kurt@...utronix.de>
To: Tony Nguyen <anthony.l.nguyen@...el.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>
Cc: Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Joe Damato <jdamato@...tly.com>, intel-wired-lan@...ts.osuosl.org,
netdev@...r.kernel.org, Kurt Kanzenbach <kurt@...utronix.de>
Subject: [PATCH 3/3] igb: Get rid of spurious interrupts
When running the igc with XDP/ZC in busy polling mode with deferral of hard
interrupts, interrupts still happen from time to time. That is caused by
the igc task watchdog which triggers Rx interrupts periodically.
That mechanism has been introduced to overcome skb/memory allocation
failures [1]. So the Rx clean functions stop processing the Rx ring in case
of such failure. The task watchdog triggers Rx interrupts periodically in
the hope that memory became available in the mean time.
The current behavior is undesirable for real time applications, because the
driver induced Rx interrupts trigger also the softirq processing. However,
all real time packets should be processed by the application which uses the
busy polling method.
Therefore, only trigger the Rx interrupts in case of real allocation
failures. Introduce a new flag for signaling that condition.
Follow the same logic as in commit 8dcf2c212078 ("igc: Get rid of spurious
interrupts").
[1] - https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git/commit/?id=3be507547e6177e5c808544bd6a2efa2c7f1d436
Signed-off-by: Kurt Kanzenbach <kurt@...utronix.de>
---
drivers/net/ethernet/intel/igb/igb.h | 3 ++-
drivers/net/ethernet/intel/igb/igb_main.c | 29 +++++++++++++++++++++++++----
drivers/net/ethernet/intel/igb/igb_xsk.c | 1 +
3 files changed, 28 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 79eca385a751bfdafdf384928b6cc1b350b22560..f34ead8243e9f0176a068299138c5c16f7faab2e 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -391,7 +391,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG,
- IGB_RING_FLAG_TX_DISABLED
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8e964484f4c9854e4e3e0b4f3e8785fe93bd1207..96da3e2ddc9a67f799ee55d9621d98f80a6b449c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5754,11 +5754,29 @@ static void igb_watchdog_task(struct work_struct *work)
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ struct igb_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ struct igb_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
}
igb_spoof_check(adapter);
@@ -9089,6 +9107,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -9148,6 +9167,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9164,6 +9184,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
index a5ad090dfe94b6afc8194fe39d28cdd51c7067b0..47344ee1ed7f29bd68055485702a87df3b8922e8 100644
--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -417,6 +417,7 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
--
2.39.5
Powered by blists - more mailing lists