[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240902-fdma-sparx5-v1-11-1e7d5e5a9f34@microchip.com>
Date: Mon, 2 Sep 2024 16:54:16 +0200
From: Daniel Machon <daniel.machon@...rochip.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, Lars Povlsen <lars.povlsen@...rochip.com>, "Steen
Hegelund" <Steen.Hegelund@...rochip.com>, Horatiu Vultur
<horatiu.vultur@...rochip.com>, <UNGLinuxDriver@...rochip.com>,
<rdunlap@...radead.org>, <horms@...nel.org>
CC: <linux-kernel@...r.kernel.org>, <netdev@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
Jens Emil Schulz Østergaard
<jensemil.schulzostergaard@...rochip.com>
Subject: [PATCH net-next 11/12] net: sparx5: use contiguous memory for tx
buffers
Currently, the driver uses a linked list for storing the tx buffer
addresses. This requires a good amount of extra bookkeeping code. Ditch
the linked list in favor of tx buffers being in the same contiguous
memory space as the DCB's and the DB's. The FDMA library has a helper
for this - so use that.
The tx buffer addresses are now retrieved as an offset into the FDMA
memory space.
Signed-off-by: Daniel Machon <daniel.machon@...rochip.com>
Reviewed-by: Steen Hegelund <Steen.Hegelund@...rochip.com>
Reviewed-by: Jens Emil Schulz Østergaard <jensemil.schulzostergaard@...rochip.com>
Reviewed-by: Horatiu Vultur <horatiu.vultur@...rochip.com>
---
.../net/ethernet/microchip/sparx5/sparx5_fdma.c | 57 +++++-----------------
.../net/ethernet/microchip/sparx5/sparx5_main.h | 1 -
2 files changed, 13 insertions(+), 45 deletions(-)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 38735bac6482..7e1bdd0344d0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -24,25 +24,11 @@
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
-/* For each hardware DB there is an entry in this list and when the HW DB
- * entry is used, this SW DB entry is moved to the back of the list
- */
-struct sparx5_db {
- struct list_head list;
- void *cpu_addr;
-};
-
static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
- struct sparx5 *sparx5 = fdma->priv;
- struct sparx5_tx *tx = &sparx5->tx;
- struct sparx5_db *db_buf;
-
- db_buf = list_first_entry(&tx->db_list, struct sparx5_db, list);
- list_move_tail(&db_buf->list, &tx->db_list);
-
- *dataptr = virt_to_phys(db_buf->cpu_addr);
+ *dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((dcb * fdma->n_dbs + db) * fdma->db_size);
return 0;
}
@@ -236,15 +222,19 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
static bool first_time = true;
- struct sparx5_db *db;
+ void *virt_addr;
fdma_dcb_advance(fdma);
if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0)))
return -EINVAL;
- db = list_first_entry(&tx->db_list, struct sparx5_db, list);
- memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE);
- memcpy(db->cpu_addr, ifh, IFH_LEN * 4);
- memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len);
+
+ /* Get the virtual address of the dataptr for the next DB */
+ virt_addr = ((u8 *)fdma->dcbs +
+ (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
+ ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size));
+
+ memcpy(virt_addr, ifh, IFH_LEN * 4);
+ memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len);
fdma_dcb_add(fdma, fdma->dcb_index, 0,
FDMA_DCB_STATUS_SOF |
@@ -285,28 +275,7 @@ static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
{
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
- int idx, jdx, err;
-
- INIT_LIST_HEAD(&tx->db_list);
- /* Now for each dcb allocate the db */
- for (idx = 0; idx < fdma->n_dcbs; ++idx) {
- /* TX databuffers must be 16byte aligned */
- for (jdx = 0; jdx < fdma->n_dbs; ++jdx) {
- struct sparx5_db *db;
- void *cpu_addr;
-
- cpu_addr = devm_kzalloc(sparx5->dev,
- FDMA_XTR_BUFFER_SIZE,
- GFP_KERNEL);
- if (!cpu_addr)
- return -ENOMEM;
- db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
- db->cpu_addr = cpu_addr;
- list_add_tail(&db->list, &tx->db_list);
- }
- }
+ int err;
err = fdma_alloc_phys(fdma);
if (err)
@@ -353,7 +322,7 @@ static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
fdma->priv = sparx5;
fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
- fdma->size = fdma_get_size(&sparx5->tx.fdma);
+ fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma);
fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb;
fdma->ops.nextptr_cb = &fdma_nextptr_cb;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 81c3f8f2f474..3309060b1e4c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -122,7 +122,6 @@ struct sparx5_rx {
*/
struct sparx5_tx {
struct fdma fdma;
- struct list_head db_list;
u64 packets;
u64 dropped;
};
--
2.34.1
Powered by blists - more mailing lists