[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260208084613.2658-7-xuanzhuo@linux.alibaba.com>
Date: Sun, 8 Feb 2026 16:46:11 +0800
From: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
To: netdev@...r.kernel.org
Cc: Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Xuan Zhuo <xuanzhuo@...ux.alibaba.com>,
Wen Gu <guwen@...ux.alibaba.com>,
Philo Lu <lulie@...ux.alibaba.com>,
Lorenzo Bianconi <lorenzo@...nel.org>,
Vadim Fedorenko <vadim.fedorenko@...ux.dev>,
Dong Yibo <dong100@...se.com>,
Heiner Kallweit <hkallweit1@...il.com>,
Lukas Bulwahn <lukas.bulwahn@...hat.com>,
Dust Li <dust.li@...ux.alibaba.com>
Subject: [PATCH net-next v26 6/8] eea: implement packet transmit logic
Implement the core logic for transmitting packets in the EEA TX path,
including packet preparation and submission to the underlying transport.
Reviewed-by: Dust Li <dust.li@...ux.alibaba.com>
Reviewed-by: Philo Lu <lulie@...ux.alibaba.com>
Signed-off-by: Wen Gu <guwen@...ux.alibaba.com>
Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
---
drivers/net/ethernet/alibaba/eea/eea_tx.c | 270 +++++++++++++++++++++-
1 file changed, 268 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/alibaba/eea/eea_tx.c b/drivers/net/ethernet/alibaba/eea/eea_tx.c
index 1475fca44b6e..2cdaff9645f8 100644
--- a/drivers/net/ethernet/alibaba/eea/eea_tx.c
+++ b/drivers/net/ethernet/alibaba/eea/eea_tx.c
@@ -11,6 +11,11 @@
#include "eea_pci.h"
#include "eea_ring.h"
+struct eea_sq_free_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct eea_tx_meta {
struct eea_tx_meta *next;
@@ -28,20 +33,281 @@ struct eea_tx_meta {
u16 dma_len;
};
+static struct eea_tx_meta *eea_tx_meta_get(struct eea_net_tx *tx)
+{
+ struct eea_tx_meta *meta;
+
+ if (!tx->free)
+ return NULL;
+
+ meta = tx->free;
+ tx->free = meta->next;
+
+ return meta;
+}
+
+static void eea_tx_meta_put_and_unmap(struct eea_net_tx *tx,
+ struct eea_tx_meta *meta)
+{
+ struct eea_tx_meta *head;
+
+ head = meta;
+
+ while (true) {
+ dma_unmap_single(tx->dma_dev, meta->dma_addr,
+ meta->dma_len, DMA_TO_DEVICE);
+
+ if (meta->next) {
+ meta = meta->next;
+ continue;
+ }
+
+ break;
+ }
+
+ meta->next = tx->free;
+ tx->free = head;
+}
+
+static void eea_meta_free_xmit(struct eea_net_tx *tx,
+ struct eea_tx_meta *meta,
+ int budget,
+ struct eea_tx_cdesc *desc,
+ struct eea_sq_free_stats *stats)
+{
+ struct sk_buff *skb = meta->skb;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && desc)) {
+ struct skb_shared_hwtstamps ts = {};
+
+ ts.hwtstamp = EEA_DESC_TS(desc) + tx->enet->hw_ts_offset;
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ ++stats->packets;
+ stats->bytes += meta->skb->len;
+ napi_consume_skb(meta->skb, budget);
+
+ meta->data = NULL;
+}
+
+static u32 eea_clean_tx(struct eea_net_tx *tx, int budget)
+{
+ struct eea_sq_free_stats stats = {0};
+ struct eea_tx_cdesc *desc;
+ struct eea_tx_meta *meta;
+ int desc_n;
+
+ while ((desc = ering_cq_get_desc(tx->ering))) {
+ meta = &tx->meta[le16_to_cpu(desc->id)];
+
+ if (meta->data) {
+ eea_tx_meta_put_and_unmap(tx, meta);
+ eea_meta_free_xmit(tx, meta, budget, desc, &stats);
+ desc_n = meta->num;
+ } else {
+ netdev_err(tx->enet->netdev,
+ "tx meta->data is null. id %d num: %d\n",
+ meta->id, meta->num);
+ desc_n = 1;
+ }
+
+ ering_cq_ack_desc(tx->ering, desc_n);
+ }
+
+ return stats.packets;
+}
+
int eea_poll_tx(struct eea_net_tx *tx, int budget)
{
- /* Empty function; will be implemented in a subsequent commit. */
+ struct eea_net *enet = tx->enet;
+ u32 index = tx - enet->tx;
+ struct netdev_queue *txq;
+ u32 cleaned;
+
+ txq = netdev_get_tx_queue(enet->netdev, index);
+
+ __netif_tx_lock(txq, smp_processor_id());
+
+ cleaned = eea_clean_tx(tx, budget);
+
+ if (netif_tx_queue_stopped(txq) && cleaned > 0)
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+
+ return 0;
+}
+
+static int eea_fill_desc_from_skb(const struct sk_buff *skb,
+ struct eea_ring *ering,
+ struct eea_tx_desc *desc)
+{
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ desc->gso_size = cpu_to_le16(sinfo->gso_size);
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ desc->gso_type = EEA_TX_GSO_TCPV4;
+
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ desc->gso_type = EEA_TX_GSO_TCPV6;
+
+ else if (sinfo->gso_type & SKB_GSO_UDP_L4)
+ desc->gso_type = EEA_TX_GSO_UDP_L4;
+
+ else
+ return -EINVAL;
+
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ desc->gso_type |= EEA_TX_GSO_ECN;
+ } else {
+ desc->gso_type = EEA_TX_GSO_NONE;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
+ desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ }
+
+ return 0;
+}
+
+static struct eea_tx_meta *eea_tx_desc_fill(struct eea_net_tx *tx,
+ dma_addr_t addr, u32 len,
+ bool is_last, void *data, u16 flags)
+{
+ struct eea_tx_meta *meta;
+ struct eea_tx_desc *desc;
+
+ meta = eea_tx_meta_get(tx);
+
+ desc = ering_sq_alloc_desc(tx->ering, meta->id, is_last, flags);
+ desc->addr = cpu_to_le64(addr);
+ desc->len = cpu_to_le16(len);
+
+ meta->next = NULL;
+ meta->dma_len = len;
+ meta->dma_addr = addr;
+ meta->data = data;
+ meta->num = 1;
+ meta->desc = desc;
+
+ return meta;
+}
+
+static int eea_tx_add_skb_frag(struct eea_net_tx *tx,
+ struct eea_tx_meta *head_meta,
+ const skb_frag_t *frag, bool is_last)
+{
+ u32 len = skb_frag_size(frag);
+ struct eea_tx_meta *meta;
+ dma_addr_t addr;
+
+ addr = skb_frag_dma_map(tx->dma_dev, frag, 0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dma_dev, addr)))
+ return -ENOMEM;
+
+ meta = eea_tx_desc_fill(tx, addr, len, is_last, NULL, 0);
+
+ meta->next = head_meta->next;
+ head_meta->next = meta;
+
return 0;
}
+static int eea_tx_post_skb(struct eea_net_tx *tx, struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 hlen = skb_headlen(skb);
+ struct eea_tx_meta *meta;
+ dma_addr_t addr;
+ int i, err;
+ u16 flags;
+
+ addr = dma_map_single(tx->dma_dev, skb->data, hlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx->dma_dev, addr)))
+ return -ENOMEM;
+
+ flags = skb->ip_summed == CHECKSUM_PARTIAL ? EEA_DESC_F_DO_CSUM : 0;
+
+ meta = eea_tx_desc_fill(tx, addr, hlen, !shinfo->nr_frags, skb, flags);
+
+ if (eea_fill_desc_from_skb(skb, tx->ering, meta->desc))
+ goto err_cancel;
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ const skb_frag_t *frag = &shinfo->frags[i];
+ bool is_last = i == (shinfo->nr_frags - 1);
+
+ err = eea_tx_add_skb_frag(tx, meta, frag, is_last);
+ if (err)
+ goto err_cancel;
+ }
+
+ meta->num = shinfo->nr_frags + 1;
+ ering_sq_commit_desc(tx->ering);
+
+ return 0;
+
+err_cancel:
+ ering_sq_cancel(tx->ering);
+ eea_tx_meta_put_and_unmap(tx, meta);
+ meta->data = NULL;
+ return -ENOMEM;
+}
+
+static void eea_tx_kick(struct eea_net_tx *tx)
+{
+ ering_kick(tx->ering);
+}
+
netdev_tx_t eea_tx_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- /* Empty function; will be implemented in a subsequent commit. */
+ struct eea_net *enet = netdev_priv(netdev);
+ int qnum = skb_get_queue_mapping(skb);
+ struct eea_net_tx *tx = &enet->tx[qnum];
+ struct netdev_queue *txq;
+ int err, n;
+
+ txq = netdev_get_tx_queue(netdev, qnum);
+
+ skb_tx_timestamp(skb);
+
+ err = eea_tx_post_skb(tx, skb);
+ if (unlikely(err))
+ dev_kfree_skb_any(skb);
+
+ /* NETDEV_TX_BUSY is expensive. So stop advancing the TX queue. */
+ n = MAX_SKB_FRAGS + 1;
+ netif_txq_maybe_stop(txq, tx->ering->num_free, n, n);
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ eea_tx_kick(tx);
+
return NETDEV_TX_OK;
}
static void eea_free_meta(struct eea_net_tx *tx, struct eea_net_cfg *cfg)
{
+ struct eea_sq_free_stats stats = {0};
+ struct eea_tx_meta *meta;
+ int i;
+
+ while ((meta = eea_tx_meta_get(tx)))
+ meta->skb = NULL;
+
+ for (i = 0; i < cfg->tx_ring_depth; i++) {
+ meta = &tx->meta[i];
+
+ if (!meta->skb)
+ continue;
+
+ eea_tx_meta_put_and_unmap(tx, meta);
+
+ eea_meta_free_xmit(tx, meta, 0, NULL, &stats);
+ }
+
kvfree(tx->meta);
tx->meta = NULL;
}
--
2.32.0.3.g01195cf9f
Powered by blists - more mailing lists