[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <23a531008936dc9a1a298643fb1e4f9a7b8e6eb3.1718709196.git.petrm@nvidia.com>
Date: Tue, 18 Jun 2024 13:34:45 +0200
From: Petr Machata <petrm@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, <netdev@...r.kernel.org>
CC: Ido Schimmel <idosch@...dia.com>, Petr Machata <petrm@...dia.com>, "Amit
Cohen" <amcohen@...dia.com>, <mlxsw@...dia.com>
Subject: [PATCH net-next 6/7] mlxsw: pci: Do not store SKB for RDQ elements
From: Amit Cohen <amcohen@...dia.com>
The previous patch used page pool to allocate buffers for RDQ. With this
change, 'elem_info->u.rdq.skb' is not used anymore, as we do not allocate
SKB before getting the packet, we hold page pointer and build the SKB
around it once packet is received.
Remove the union and store SKB pointer for SDQ only.
Signed-off-by: Amit Cohen <amcohen@...dia.com>
Reviewed-by: Petr Machata <petrm@...dia.com>
Reviewed-by: Ido Schimmel <idosch@...dia.com>
Signed-off-by: Petr Machata <petrm@...dia.com>
---
drivers/net/ethernet/mellanox/mlxsw/pci.c | 19 +++++++------------
1 file changed, 7 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c380b355b249..498b0867f9aa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -64,14 +64,9 @@ struct mlxsw_pci_mem_item {
struct mlxsw_pci_queue_elem_info {
struct page *page;
char *elem; /* pointer to actual dma mapped element mem chunk */
- union {
- struct {
- struct sk_buff *skb;
- } sdq;
- struct {
- struct sk_buff *skb;
- } rdq;
- } u;
+ struct {
+ struct sk_buff *skb;
+ } sdq;
};
struct mlxsw_pci_queue {
@@ -557,8 +552,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_lock(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
- skb = elem_info->u.sdq.skb;
+ tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
+ skb = elem_info->sdq.skb;
wqe = elem_info->elem;
for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
@@ -573,7 +568,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
if (skb)
dev_kfree_skb_any(skb);
- elem_info->u.sdq.skb = NULL;
+ elem_info->sdq.skb = NULL;
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
@@ -2019,7 +2014,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
goto unlock;
}
mlxsw_skb_cb(skb)->tx_info = *tx_info;
- elem_info->u.sdq.skb = skb;
+ elem_info->sdq.skb = skb;
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
--
2.45.0
Powered by blists - more mailing lists