[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231201054655.3731772-9-yoshihiro.shimoda.uh@renesas.com>
Date: Fri, 1 Dec 2023 14:46:54 +0900
From: Yoshihiro Shimoda <yoshihiro.shimoda.uh@...esas.com>
To: s.shtylyov@....ru,
davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com
Cc: netdev@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
Yoshihiro Shimoda <yoshihiro.shimoda.uh@...esas.com>
Subject: [PATCH net-next v2 8/9] net: rswitch: Add jumbo frames handling for TX
If the driver would like to transmit a jumbo frame like 2KiB or more,
it should be split into multiple queues. In the near future, to support
this, add handling specific descriptor types F{START,MID,END}. However,
such jumbo frames will not happen yet because the maximum MTU size is
still default for now.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@...esas.com>
---
drivers/net/ethernet/renesas/rswitch.c | 50 ++++++++++++++++++++++----
1 file changed, 43 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 009e6bfdad27..c5e3ee8f82bc 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1631,15 +1631,44 @@ static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
return true;
}
+static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
+{
+ if (nr_desc == 1)
+ return DT_FSINGLE | DIE;
+ if (index == 0)
+ return DT_FSTART;
+ if (nr_desc - 1 == index)
+ return DT_FEND | DIE;
+ return DT_FMID;
+}
+
+static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
+{
+ switch (die_dt & DT_MASK) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
+ case DT_FSTART:
+ case DT_FMID:
+ return RSWITCH_DESC_BUF_SIZE;
+ default:
+ return 0;
+ }
+}
+
static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ dma_addr_t dma_addr, dma_addr_orig;
netdev_tx_t ret = NETDEV_TX_OK;
struct rswitch_ext_desc *desc;
- dma_addr_t dma_addr;
+ unsigned int i, nr_desc;
+ u8 die_dt;
+ u16 len;
- if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
+ nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
+ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
netif_stop_subqueue(ndev, 0);
return NETDEV_TX_BUSY;
}
@@ -1647,19 +1676,26 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (skb_put_padto(skb, ETH_ZLEN))
return ret;
- dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+ dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err_kfree;
gq->skbs[gq->cur] = skb;
gq->unmap_addrs[gq->cur] = dma_addr;
- desc = &gq->tx_ring[gq->cur];
- if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, skb->len, DT_FSINGLE | DIE))
- goto err_unmap;
+
+ /* DT_FSTART should be set at last. So, this is reverse order. */
+ for (i = nr_desc; i-- > 0; ) {
+ desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
+ die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
+ dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
+ len = rswitch_ext_desc_get_len(die_dt, skb->len);
+ if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
+ goto err_unmap;
+ }
wmb(); /* gq->cur must be incremented after die_dt was set */
- gq->cur = rswitch_next_queue_index(gq, true, 1);
+ gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
--
2.34.1
Powered by blists - more mailing lists