[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1edd44e5-a73a-149f-fe0c-96969627d211@solarflare.com>
Date: Wed, 2 Sep 2020 15:35:53 +0100
From: Edward Cree <ecree@...arflare.com>
To: <linux-net-drivers@...arflare.com>, <davem@...emloft.net>
CC: <netdev@...r.kernel.org>
Subject: [PATCH net-next 1/5] sfc: add and use efx_tx_send_pending in tx.c
Instead of using efx_tx_queue_partner(), which relies on the assumption
that tx_queues_per_channel is 2, efx_tx_send_pending() iterates over
txqs with efx_for_each_channel_tx_queue().
Signed-off-by: Edward Cree <ecree@...arflare.com>
---
drivers/net/ethernet/sfc/tx.c | 59 ++++++++++++++++++-----------------
1 file changed, 31 insertions(+), 28 deletions(-)
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 727201d5eb24..71eb99db5439 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -268,6 +268,19 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
}
#endif /* EFX_USE_PIO */
+/* Send any pending traffic for a channel. xmit_more is shared across all
+ * queues for a channel, so we must check all of them.
+ */
+static void efx_tx_send_pending(struct efx_channel *channel)
+{
+ struct efx_tx_queue *q;
+
+ efx_for_each_channel_tx_queue(q, channel) {
+ if (q->xmit_more_available)
+ efx_nic_push_buffers(q);
+ }
+}
+
/*
* Add a socket buffer to a TX queue
*
@@ -336,21 +349,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
efx_tx_maybe_stop_queue(tx_queue);
- /* Pass off to hardware */
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- /* There could be packets left on the partner queue if
- * xmit_more was set. If we do not push those they
- * could be left for a long time and cause a netdev watchdog.
- */
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
+ tx_queue->xmit_more_available = true;
- efx_nic_push_buffers(tx_queue);
- } else {
- tx_queue->xmit_more_available = xmit_more;
- }
+ /* Pass off to hardware */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
+ efx_tx_send_pending(tx_queue->channel);
if (segments) {
tx_queue->tso_bursts++;
@@ -371,14 +374,8 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
* on this queue or a partner queue then we need to push here to get the
* previous packets out.
*/
- if (!xmit_more) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- }
+ if (!xmit_more)
+ efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
@@ -489,18 +486,24 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
- /* PTP "event" packet */
- if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
- unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
- return efx_ptp_tx(efx, skb);
- }
-
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EFX_TXQ_TYPE_HIGHPRI;
}
+
+ /* PTP "event" packet */
+ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+ unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ /* There may be existing transmits on the channel that are
+ * waiting for this packet to trigger the doorbell write.
+ * We need to send the packets at this point.
+ */
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
+ return efx_ptp_tx(efx, skb);
+ }
+
tx_queue = efx_get_tx_queue(efx, index, type);
return __efx_enqueue_skb(tx_queue, skb);
Powered by blists - more mailing lists