[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d51ed1f65b666236e0caa197fd8669d88beba7cb.1738665783.git.petrm@nvidia.com>
Date: Tue, 4 Feb 2025 12:04:59 +0100
From: Petr Machata <petrm@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, Andrew Lunn <andrew+netdev@...n.ch>,
<netdev@...r.kernel.org>
CC: Amit Cohen <amcohen@...dia.com>, Ido Schimmel <idosch@...dia.com>, "Petr
Machata" <petrm@...dia.com>, Alexei Starovoitov <ast@...nel.org>, "Daniel
Borkmann" <daniel@...earbox.net>, Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>, <bpf@...r.kernel.org>,
<mlxsw@...dia.com>
Subject: [PATCH net-next 04/12] mlxsw: pci: Use mlxsw_pci_rx_pkt_info
From: Amit Cohen <amcohen@...dia.com>
Pass the newly added structure as an argument for mlxsw_pci_rdq_build_skb()
and use it.
Remove mlxsw_pci_elem_info_pages_ref_store(), as mlxsw_pci_rx_pkt_info
stores pointers to pages.
Pass to mlxsw_pci_rdq_pages_alloc() number of scatter/gather entries which
is stored in mlxsw_pci_rx_pkt_info.
Signed-off-by: Amit Cohen <amcohen@...dia.com>
Reviewed-by: Ido Schimmel <idosch@...dia.com>
Signed-off-by: Petr Machata <petrm@...dia.com>
---
drivers/net/ethernet/mellanox/mlxsw/pci.c | 65 ++++++-----------------
1 file changed, 16 insertions(+), 49 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index aca1857a4e70..374b3f2f117d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -433,28 +433,23 @@ mlxsw_pci_rx_pkt_info_init(const struct mlxsw_pci *pci,
return 0;
}
-static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
- struct page *pages[],
- u16 byte_count)
+static struct sk_buff *
+mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
+ const struct mlxsw_pci_rx_pkt_info *rx_pkt_info)
{
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
unsigned int linear_data_size;
struct page_pool *page_pool;
struct sk_buff *skb;
- int page_index = 0;
- bool linear_only;
void *data;
+ int i;
- linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
- linear_data_size = linear_only ? byte_count :
- PAGE_SIZE -
- MLXSW_PCI_RX_BUF_SW_OVERHEAD;
-
+ linear_data_size = rx_pkt_info->sg_entries_size[0];
page_pool = cq->u.cq.page_pool;
- page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
+ page_pool_dma_sync_for_cpu(page_pool, rx_pkt_info->pages[0],
MLXSW_PCI_SKB_HEADROOM, linear_data_size);
- data = page_address(pages[page_index]);
+ data = page_address(rx_pkt_info->pages[0]);
net_prefetch(data);
skb = napi_build_skb(data, PAGE_SIZE);
@@ -464,23 +459,18 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
skb_put(skb, linear_data_size);
- if (linear_only)
+ if (rx_pkt_info->num_sg_entries == 1)
return skb;
- byte_count -= linear_data_size;
- page_index++;
-
- while (byte_count > 0) {
+ for (i = 1; i < rx_pkt_info->num_sg_entries; i++) {
unsigned int frag_size;
struct page *page;
- page = pages[page_index];
- frag_size = min(byte_count, PAGE_SIZE);
+ page = rx_pkt_info->pages[i];
+ frag_size = rx_pkt_info->sg_entries_size[i];
page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, frag_size, PAGE_SIZE);
- byte_count -= frag_size;
- page_index++;
}
return skb;
@@ -513,24 +503,6 @@ static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
false);
}
-static int
-mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
- const struct mlxsw_pci_queue_elem_info *el,
- u16 byte_count, struct page *pages[],
- u8 *p_num_sg_entries)
-{
- u8 num_sg_entries;
- int i;
-
- num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
-
- for (i = 0; i < num_sg_entries; i++)
- pages[i] = el->pages[i];
-
- *p_num_sg_entries = num_sg_entries;
- return 0;
-}
-
static int
mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
struct mlxsw_pci_queue_elem_info *elem_info,
@@ -780,11 +752,9 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
{
struct mlxsw_pci_rx_pkt_info rx_pkt_info = {};
struct pci_dev *pdev = mlxsw_pci->pdev;
- struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
struct sk_buff *skb;
- u8 num_sg_entries;
u16 byte_count;
int err;
@@ -814,19 +784,16 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (err)
goto out;
- err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
- pages, &num_sg_entries);
+ err = mlxsw_pci_rdq_pages_alloc(q, elem_info,
+ rx_pkt_info.num_sg_entries);
if (err)
goto out;
- err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
- if (err)
- goto out;
-
- skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
+ skb = mlxsw_pci_rdq_build_skb(q, &rx_pkt_info);
if (IS_ERR(skb)) {
dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
- mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
+ mlxsw_pci_rdq_pages_recycle(q, rx_pkt_info.pages,
+ rx_pkt_info.num_sg_entries);
goto out;
}
--
2.47.0
Powered by blists - more mailing lists