[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230704095915.9750-5-sriram.yagnaraman@est.tech>
Date: Tue, 4 Jul 2023 11:59:15 +0200
From: Sriram Yagnaraman <sriram.yagnaraman@....tech>
To:
Cc: intel-wired-lan@...ts.osuosl.org, bpf@...r.kernel.org,
netdev@...r.kernel.org, Jesse Brandeburg <jesse.brandeburg@...el.com>,
Tony Nguyen <anthony.l.nguyen@...el.com>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Sriram Yagnaraman <sriram.yagnaraman@....tech>
Subject: [PATCH 4/4] igb: add AF_XDP zero-copy Tx support
Add support for AF_XDP zero-copy transmit path.
A new TX buffer type IGB_TYPE_XSK is introduced to indicate that the Tx
frame was allocated from the xsk buff pool, so igb_clean_tx_ring and
igb_clean_tx_irq can clean the buffers correctly based on type.
igb_xmit_zc performs the actual packet transmit when AF_XDP zero-copy is
enabled. We share the TX ring between slow path, XDP and AF_XDP
zero-copy, so we use the netdev queue lock to ensure mutual exclusion.
Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@....tech>
---
drivers/net/ethernet/intel/igb/igb.h | 2 +
drivers/net/ethernet/intel/igb/igb_main.c | 44 +++++++++++++--
drivers/net/ethernet/intel/igb/igb_xsk.c | 67 ++++++++++++++++++++++-
3 files changed, 105 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 4f474d7338b5..564706ab0646 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -257,6 +257,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -843,6 +844,7 @@ int igb_xsk_pool_setup(struct igb_adapter *adapter,
u16 qid);
bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, u16 count);
int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, unsigned int budget);
int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f4dbb75d6eac..c6bb5b1944c8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4390,6 +4390,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ ring->xsk_pool = igb_xsk_pool(adapter, ring);
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4970,15 +4972,20 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -5009,6 +5016,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -5023,6 +5031,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -8330,12 +8341,16 @@ static int igb_poll(struct napi_struct *napi, int budget)
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
struct igb_adapter *adapter = q_vector->adapter;
+ int cpu = smp_processor_id();
struct igb_ring *tx_ring = q_vector->tx.ring;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
+ struct netdev_queue *nq;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
+ u32 xsk_frames = 0;
+ bool xsk_xmit_done = true;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8366,10 +8381,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8401,6 +8420,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8429,6 +8449,20 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ if (tx_ring->xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, q_vector->tx.work_limit);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8491,7 +8525,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
index eae616e7608c..8b60285ec242 100644
--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -25,25 +25,28 @@ static int igb_xsk_pool_enable(struct igb_adapter *adapter,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
- struct igb_ring *rx_ring;
+ struct igb_ring *tx_ring, *rx_ring;
bool if_running;
int err;
if (qid >= adapter->num_rx_queues)
return -EINVAL;
- if (qid >= netdev->real_num_rx_queues)
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
return -EINVAL;
err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
if (err)
return err;
+ tx_ring = adapter->tx_ring[qid];
rx_ring = adapter->rx_ring[qid];
if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
if (if_running)
igb_txrx_ring_disable(adapter, qid);
+ set_bit(IGB_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
set_bit(IGB_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
if (if_running) {
@@ -52,6 +55,7 @@ static int igb_xsk_pool_enable(struct igb_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */
err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
if (err) {
+ clear_bit(IGB_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
clear_bit(IGB_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
return err;
@@ -63,7 +67,7 @@ static int igb_xsk_pool_enable(struct igb_adapter *adapter,
static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
{
- struct igb_ring *rx_ring;
+ struct igb_ring *tx_ring, *rx_ring;
struct xsk_buff_pool *pool;
bool if_running;
@@ -71,12 +75,14 @@ static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
if (!pool)
return -EINVAL;
+ tx_ring = adapter->tx_ring[qid];
rx_ring = adapter->rx_ring[qid];
if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
if (if_running)
igb_txrx_ring_disable(adapter, qid);
xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ clear_bit(IGB_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
clear_bit(IGB_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
if (if_running)
@@ -335,6 +341,61 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, const int budget)
return failure ? budget : (int)total_packets;
}
+bool igb_xmit_zc(struct igb_ring *tx_ring, unsigned int budget)
+{
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_bi;
+ bool work_done = true;
+ struct xdp_desc desc;
+ dma_addr_t dma;
+ u32 cmd_type;
+
+ while (budget-- > 0) {
+ if (unlikely(!igb_desc_unused(tx_ring))) {
+ work_done = false;
+ break;
+ }
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ break;
+
+ if (!xsk_tx_peek_desc(pool, &desc))
+ break;
+
+ dma = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
+
+ tx_bi = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_bi->bytecount = desc.len;
+ tx_bi->type = IGB_TYPE_XSK;
+ tx_bi->xdpf = NULL;
+ tx_bi->gso_segs = 1;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+
+ cmd_type |= desc.len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = 0;
+
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ if (tx_desc) {
+ igb_xdp_ring_update_tail(tx_ring);
+ xsk_tx_release(pool);
+ }
+
+ return !!budget && work_done;
+}
+
int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
struct igb_adapter *adapter = netdev_priv(dev);
--
2.34.1
Powered by blists - more mailing lists