[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <38898d98-80c9-4bc9-8603-e968a7c495d0@gmail.com>
Date: Tue, 18 Feb 2025 14:10:27 +0200
From: Tariq Toukan <ttoukan.linux@...il.com>
To: Jakub Kicinski <kuba@...nel.org>, davem@...emloft.net
Cc: tariqt@...dia.com, idosch@...sch.org, hawk@...nel.org,
netdev@...r.kernel.org, edumazet@...gle.com, pabeni@...hat.com,
andrew+netdev@...n.ch, horms@...nel.org
Subject: Re: [PATCH net-next v3 1/4] eth: mlx4: create a page pool for Rx
On 13/02/2025 3:06, Jakub Kicinski wrote:
> Create a pool per rx queue. Subsequent patches will make use of it.
>
> Move fcs_del to a hole to make space for the pointer.
>
> Per common "wisdom" base the page pool size on the ring size.
> Note that the page pool cache size is in full pages, so just
> round up the effective buffer size to pages.
>
> Signed-off-by: Jakub Kicinski <kuba@...nel.org>
> ---
> v3:
> - use priv->rx_skb_size for effective buffer size
> - use priv->dma_dir for DMA mapping direction, instead of always BIDIR
> v2: https://lore.kernel.org/20250211192141.619024-2-kuba@kernel.org
> - update pp.pool_size
> v1: https://lore.kernel.org/20250205031213.358973-2-kuba@kernel.org
> ---
> drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 ++-
> drivers/net/ethernet/mellanox/mlx4/en_rx.c | 24 +++++++++++++++++++-
> 2 files changed, 25 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> index 28b70dcc652e..29f48e63081b 100644
> --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> @@ -335,10 +335,11 @@ struct mlx4_en_rx_ring {
> u16 stride;
> u16 log_stride;
> u16 cqn; /* index of port CQ associated with this ring */
> + u8 fcs_del;
> u32 prod;
> u32 cons;
> u32 buf_size;
> - u8 fcs_del;
> + struct page_pool *pp;
> void *buf;
> void *rx_info;
> struct bpf_prog __rcu *xdp_prog;
> diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
> index 15c57e9517e9..a8c0cf5d0d08 100644
> --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
> +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
> @@ -48,6 +48,7 @@
> #if IS_ENABLED(CONFIG_IPV6)
> #include <net/ip6_checksum.h>
> #endif
> +#include <net/page_pool/helpers.h>
>
> #include "mlx4_en.h"
>
> @@ -268,6 +269,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
> u32 size, u16 stride, int node, int queue_index)
> {
> struct mlx4_en_dev *mdev = priv->mdev;
> + struct page_pool_params pp = {};
> struct mlx4_en_rx_ring *ring;
> int err = -ENOMEM;
> int tmp;
> @@ -286,9 +288,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
> ring->log_stride = ffs(ring->stride) - 1;
> ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
>
> - if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
> + pp.flags = PP_FLAG_DMA_MAP;
> + pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
> + pp.nid = node;
> + pp.napi = &priv->rx_cq[queue_index]->napi;
> + pp.netdev = priv->dev;
> + pp.dev = &mdev->dev->persist->pdev->dev;
> + pp.dma_dir = priv->dma_dir;
> +
> + ring->pp = page_pool_create(&pp);
> + if (!ring->pp)
> goto err_ring;
>
> + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
> + goto err_pp;
> +
> + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
> + ring->pp);
> + if (err)
> + goto err_xdp_info;
> +
> tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
> sizeof(struct mlx4_en_rx_alloc));
> ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
> @@ -319,6 +338,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
> ring->rx_info = NULL;
> err_xdp_info:
> xdp_rxq_info_unreg(&ring->xdp_rxq);
> +err_pp:
> + page_pool_destroy(ring->pp);
> err_ring:
> kfree(ring);
> *pring = NULL;
> @@ -445,6 +466,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
> xdp_rxq_info_unreg(&ring->xdp_rxq);
> mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
> kvfree(ring->rx_info);
> + page_pool_destroy(ring->pp);
> ring->rx_info = NULL;
> kfree(ring);
> *pring = NULL;
Reviewed-by: Tariq Toukan <tariqt@...dia.com>
Thanks for your patches,
Tariq
Powered by blists - more mailing lists