[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7674318d47d36fb91a64351ca64a491ec61d5284.1738665783.git.petrm@nvidia.com>
Date: Tue, 4 Feb 2025 12:05:00 +0100
From: Petr Machata <petrm@...dia.com>
To: "David S. Miller" <davem@...emloft.net>, Eric Dumazet
<edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, Andrew Lunn <andrew+netdev@...n.ch>,
<netdev@...r.kernel.org>
CC: Amit Cohen <amcohen@...dia.com>, Ido Schimmel <idosch@...dia.com>, "Petr
Machata" <petrm@...dia.com>, Alexei Starovoitov <ast@...nel.org>, "Daniel
Borkmann" <daniel@...earbox.net>, Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>, <bpf@...r.kernel.org>,
<mlxsw@...dia.com>
Subject: [PATCH net-next 05/12] mlxsw: pci: Add a separate function for syncing buffers for CPU
From: Amit Cohen <amcohen@...dia.com>
Currently, sync for CPU is done as part of building SKB. When XDP will
be supported, such sync should be done earlier, before creating XDP
buffer. Add a function for syncing buffers for CPU and call it early in
mlxsw_pci_cqe_rdq_handle(), as in future patch, the driver will handle XDP
there.
Signed-off-by: Amit Cohen <amcohen@...dia.com>
Reviewed-by: Ido Schimmel <idosch@...dia.com>
Signed-off-by: Petr Machata <petrm@...dia.com>
---
drivers/net/ethernet/mellanox/mlxsw/pci.c | 30 +++++++++++++++++------
1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 374b3f2f117d..5796d836a7ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -433,22 +433,34 @@ mlxsw_pci_rx_pkt_info_init(const struct mlxsw_pci *pci,
return 0;
}
+static void
+mlxsw_pci_sync_for_cpu(const struct mlxsw_pci_queue *q,
+ const struct mlxsw_pci_rx_pkt_info *rx_pkt_info)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ struct page_pool *page_pool;
+ int i;
+
+ page_pool = cq->u.cq.page_pool;
+
+ for (i = 0; i < rx_pkt_info->num_sg_entries; i++) {
+ u32 offset = i ? 0 : MLXSW_PCI_SKB_HEADROOM;
+
+ page_pool_dma_sync_for_cpu(page_pool, rx_pkt_info->pages[i],
+ offset,
+ rx_pkt_info->sg_entries_size[i]);
+ }
+}
+
static struct sk_buff *
mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
const struct mlxsw_pci_rx_pkt_info *rx_pkt_info)
{
- struct mlxsw_pci_queue *cq = q->u.rdq.cq;
unsigned int linear_data_size;
- struct page_pool *page_pool;
struct sk_buff *skb;
void *data;
int i;
- linear_data_size = rx_pkt_info->sg_entries_size[0];
- page_pool = cq->u.cq.page_pool;
- page_pool_dma_sync_for_cpu(page_pool, rx_pkt_info->pages[0],
- MLXSW_PCI_SKB_HEADROOM, linear_data_size);
-
data = page_address(rx_pkt_info->pages[0]);
net_prefetch(data);
@@ -457,6 +469,7 @@ mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
return ERR_PTR(-ENOMEM);
skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
+ linear_data_size = rx_pkt_info->sg_entries_size[0];
skb_put(skb, linear_data_size);
if (rx_pkt_info->num_sg_entries == 1)
@@ -468,7 +481,6 @@ mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
page = rx_pkt_info->pages[i];
frag_size = rx_pkt_info->sg_entries_size[i];
- page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, frag_size, PAGE_SIZE);
}
@@ -784,6 +796,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (err)
goto out;
+ mlxsw_pci_sync_for_cpu(q, &rx_pkt_info);
+
err = mlxsw_pci_rdq_pages_alloc(q, elem_info,
rx_pkt_info.num_sg_entries);
if (err)
--
2.47.0
Powered by blists - more mailing lists