[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251220235135.1078587-7-pvalerio@redhat.com>
Date: Sun, 21 Dec 2025 00:51:33 +0100
From: Paolo Valerio <pvalerio@...hat.com>
To: netdev@...r.kernel.org
Cc: Nicolas Ferre <nicolas.ferre@...rochip.com>,
Claudiu Beznea <claudiu.beznea@...on.dev>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Lorenzo Bianconi <lorenzo@...nel.org>,
Théo Lebrun <theo.lebrun@...tlin.com>
Subject: [PATCH RFC net-next v2 6/8] cadence: macb: make macb_tx_skb generic
The macb_tx_skb structure is renamed to macb_tx_buff with
no functional changes.
This is a preparatory step for adding xdp xmit support.
Signed-off-by: Paolo Valerio <pvalerio@...hat.com>
---
drivers/net/ethernet/cadence/macb.h | 10 +--
drivers/net/ethernet/cadence/macb_main.c | 96 ++++++++++++------------
2 files changed, 53 insertions(+), 53 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 815d50574267..47c25993ad40 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -960,7 +960,7 @@ struct macb_dma_desc_ptp {
/* Scaled PPM fraction */
#define PPM_FRACTION 16
-/* struct macb_tx_skb - data about an skb which is being transmitted
+/* struct macb_tx_buff - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
* @mapping: DMA address of the skb's fragment buffer
@@ -968,8 +968,8 @@ struct macb_dma_desc_ptp {
* @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
* false when buffer was mapped with dma_map_single()
*/
-struct macb_tx_skb {
- struct sk_buff *skb;
+struct macb_tx_buff {
+ void *skb;
dma_addr_t mapping;
size_t size;
bool mapped_as_page;
@@ -1254,7 +1254,7 @@ struct macb_queue {
spinlock_t tx_ptr_lock;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
- struct macb_tx_skb *tx_skb;
+ struct macb_tx_buff *tx_buff;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
bool txubr_pending;
@@ -1332,7 +1332,7 @@ struct macb {
phy_interface_t phy_interface;
/* AT91RM9200 transmit queue (1 on wire + 1 queued) */
- struct macb_tx_skb rm9200_txq[2];
+ struct macb_tx_buff rm9200_txq[2];
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f767eb2e272e..3ffad2ddc349 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -157,10 +157,10 @@ static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
return &queue->tx_ring[index];
}
-static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
- unsigned int index)
+static struct macb_tx_buff *macb_tx_buff(struct macb_queue *queue,
+ unsigned int index)
{
- return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
+ return &queue->tx_buff[macb_tx_ring_wrap(queue->bp, index)];
}
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
@@ -969,21 +969,21 @@ static int macb_halt_tx(struct macb *bp)
bp, TSR);
}
-static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_buff *tx_buff, int budget)
{
- if (tx_skb->mapping) {
- if (tx_skb->mapped_as_page)
- dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
- tx_skb->size, DMA_TO_DEVICE);
+ if (tx_buff->mapping) {
+ if (tx_buff->mapped_as_page)
+ dma_unmap_page(&bp->pdev->dev, tx_buff->mapping,
+ tx_buff->size, DMA_TO_DEVICE);
else
- dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
- tx_skb->size, DMA_TO_DEVICE);
- tx_skb->mapping = 0;
+ dma_unmap_single(&bp->pdev->dev, tx_buff->mapping,
+ tx_buff->size, DMA_TO_DEVICE);
+ tx_buff->mapping = 0;
}
- if (tx_skb->skb) {
- napi_consume_skb(tx_skb->skb, budget);
- tx_skb->skb = NULL;
+ if (tx_buff->skb) {
+ napi_consume_skb(tx_buff->skb, budget);
+ tx_buff->skb = NULL;
}
}
@@ -1029,7 +1029,7 @@ static void macb_tx_error_task(struct work_struct *work)
u32 queue_index;
u32 packets = 0;
u32 bytes = 0;
- struct macb_tx_skb *tx_skb;
+ struct macb_tx_buff *tx_buff;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
@@ -1069,16 +1069,16 @@ static void macb_tx_error_task(struct work_struct *work)
desc = macb_tx_desc(queue, tail);
ctrl = desc->ctrl;
- tx_skb = macb_tx_skb(queue, tail);
- skb = tx_skb->skb;
+ tx_buff = macb_tx_buff(queue, tail);
+ skb = tx_buff->skb;
if (ctrl & MACB_BIT(TX_USED)) {
/* skb is set for the last buffer of the frame */
while (!skb) {
- macb_tx_unmap(bp, tx_skb, 0);
+ macb_tx_unmap(bp, tx_buff, 0);
tail++;
- tx_skb = macb_tx_skb(queue, tail);
- skb = tx_skb->skb;
+ tx_buff = macb_tx_buff(queue, tail);
+ skb = tx_buff->skb;
}
/* ctrl still refers to the first buffer descriptor
@@ -1107,7 +1107,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- macb_tx_unmap(bp, tx_skb, 0);
+ macb_tx_unmap(bp, tx_buff, 0);
}
netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
@@ -1185,7 +1185,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
- struct macb_tx_skb *tx_skb;
+ struct macb_tx_buff *tx_buff;
struct sk_buff *skb;
struct macb_dma_desc *desc;
u32 ctrl;
@@ -1205,8 +1205,8 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
/* Process all buffers of the current transmitted frame */
for (;; tail++) {
- tx_skb = macb_tx_skb(queue, tail);
- skb = tx_skb->skb;
+ tx_buff = macb_tx_buff(queue, tail);
+ skb = tx_buff->skb;
/* First, update TX stats if needed */
if (skb) {
@@ -1226,7 +1226,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
/* Now we can safely release resources */
- macb_tx_unmap(bp, tx_skb, budget);
+ macb_tx_unmap(bp, tx_buff, budget);
/* skb is set only for the last buffer of the frame.
* WARNING: at this point skb has been freed by
@@ -2133,8 +2133,8 @@ static unsigned int macb_tx_map(struct macb *bp,
unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int len, i, tx_head = queue->tx_head;
u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
+ struct macb_tx_buff *tx_buff = NULL;
unsigned int eof = 1, mss_mfs = 0;
- struct macb_tx_skb *tx_skb = NULL;
struct macb_dma_desc *desc;
unsigned int offset, size;
dma_addr_t mapping;
@@ -2157,7 +2157,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
- tx_skb = macb_tx_skb(queue, tx_head);
+ tx_buff = macb_tx_buff(queue, tx_head);
mapping = dma_map_single(&bp->pdev->dev,
skb->data + offset,
@@ -2166,10 +2166,10 @@ static unsigned int macb_tx_map(struct macb *bp,
goto dma_error;
/* Save info to properly release resources */
- tx_skb->skb = NULL;
- tx_skb->mapping = mapping;
- tx_skb->size = size;
- tx_skb->mapped_as_page = false;
+ tx_buff->skb = NULL;
+ tx_buff->mapping = mapping;
+ tx_buff->size = size;
+ tx_buff->mapped_as_page = false;
len -= size;
offset += size;
@@ -2186,7 +2186,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0;
while (len) {
size = umin(len, bp->max_tx_length);
- tx_skb = macb_tx_skb(queue, tx_head);
+ tx_buff = macb_tx_buff(queue, tx_head);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
offset, size, DMA_TO_DEVICE);
@@ -2194,10 +2194,10 @@ static unsigned int macb_tx_map(struct macb *bp,
goto dma_error;
/* Save info to properly release resources */
- tx_skb->skb = NULL;
- tx_skb->mapping = mapping;
- tx_skb->size = size;
- tx_skb->mapped_as_page = true;
+ tx_buff->skb = NULL;
+ tx_buff->mapping = mapping;
+ tx_buff->size = size;
+ tx_buff->mapped_as_page = true;
len -= size;
offset += size;
@@ -2206,13 +2206,13 @@ static unsigned int macb_tx_map(struct macb *bp,
}
/* Should never happen */
- if (unlikely(!tx_skb)) {
+ if (unlikely(!tx_buff)) {
netdev_err(bp->dev, "BUG! empty skb!\n");
return 0;
}
/* This is the last buffer of the frame: save socket buffer */
- tx_skb->skb = skb;
+ tx_buff->skb = skb;
/* Update TX ring: update buffer descriptors in reverse order
* to avoid race condition
@@ -2243,10 +2243,10 @@ static unsigned int macb_tx_map(struct macb *bp,
do {
i--;
- tx_skb = macb_tx_skb(queue, i);
+ tx_buff = macb_tx_buff(queue, i);
desc = macb_tx_desc(queue, i);
- ctrl = (u32)tx_skb->size;
+ ctrl = (u32)tx_buff->size;
if (eof) {
ctrl |= MACB_BIT(TX_LAST);
eof = 0;
@@ -2269,7 +2269,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
/* Set TX buffer descriptor */
- macb_set_addr(bp, desc, tx_skb->mapping);
+ macb_set_addr(bp, desc, tx_buff->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -2285,9 +2285,9 @@ static unsigned int macb_tx_map(struct macb *bp,
netdev_err(bp->dev, "TX DMA map failed\n");
for (i = queue->tx_head; i != tx_head; i++) {
- tx_skb = macb_tx_skb(queue, i);
+ tx_buff = macb_tx_buff(queue, i);
- macb_tx_unmap(bp, tx_skb, 0);
+ macb_tx_unmap(bp, tx_buff, 0);
}
return -ENOMEM;
@@ -2603,8 +2603,8 @@ static void macb_free_consistent(struct macb *bp)
dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- kfree(queue->tx_skb);
- queue->tx_skb = NULL;
+ kfree(queue->tx_buff);
+ queue->tx_buff = NULL;
queue->tx_ring = NULL;
queue->rx_ring = NULL;
}
@@ -2682,9 +2682,9 @@ static int macb_alloc_consistent(struct macb *bp)
queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
- size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
- queue->tx_skb = kmalloc(size, GFP_KERNEL);
- if (!queue->tx_skb)
+ size = bp->tx_ring_size * sizeof(struct macb_tx_buff);
+ queue->tx_buff = kmalloc(size, GFP_KERNEL);
+ if (!queue->tx_buff)
goto out_err;
}
--
2.52.0
Powered by blists - more mailing lists