lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <76129ce2-37a7-4e97-81f6-f73f72723a17@gmail.com>
Date: Thu, 6 Feb 2025 21:44:38 +0200
From: Tariq Toukan <ttoukan.linux@...il.com>
To: Jakub Kicinski <kuba@...nel.org>, davem@...emloft.net
Cc: netdev@...r.kernel.org, edumazet@...gle.com, pabeni@...hat.com,
 andrew+netdev@...n.ch, horms@...nel.org, tariqt@...dia.com, hawk@...nel.org
Subject: Re: [PATCH net-next 1/4] eth: mlx4: create a page pool for Rx



On 05/02/2025 5:12, Jakub Kicinski wrote:
> Create a pool per rx queue. Subsequent patches will make use of it.
> 
> Move fcs_del to a hole to make space for the pointer.
> 
> Signed-off-by: Jakub Kicinski <kuba@...nel.org>
> ---
>   drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |  3 ++-
>   drivers/net/ethernet/mellanox/mlx4/en_rx.c   | 24 +++++++++++++++++++-
>   2 files changed, 25 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> index 28b70dcc652e..29f48e63081b 100644
> --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> @@ -335,10 +335,11 @@ struct mlx4_en_rx_ring {
>   	u16 stride;
>   	u16 log_stride;
>   	u16 cqn;	/* index of port CQ associated with this ring */
> +	u8  fcs_del;
>   	u32 prod;
>   	u32 cons;
>   	u32 buf_size;
> -	u8  fcs_del;
> +	struct page_pool *pp;
>   	void *buf;
>   	void *rx_info;
>   	struct bpf_prog __rcu *xdp_prog;
> diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
> index 15c57e9517e9..2c23d75baf14 100644
> --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
> +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
> @@ -48,6 +48,7 @@
>   #if IS_ENABLED(CONFIG_IPV6)
>   #include <net/ip6_checksum.h>
>   #endif
> +#include <net/page_pool/helpers.h>
>   
>   #include "mlx4_en.h"
>   
> @@ -268,6 +269,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
>   			   u32 size, u16 stride, int node, int queue_index)
>   {
>   	struct mlx4_en_dev *mdev = priv->mdev;
> +	struct page_pool_params pp = {};
>   	struct mlx4_en_rx_ring *ring;
>   	int err = -ENOMEM;
>   	int tmp;
> @@ -286,9 +288,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
>   	ring->log_stride = ffs(ring->stride) - 1;
>   	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
>   
> -	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
> +	pp.flags = PP_FLAG_DMA_MAP;
> +	pp.pool_size = MLX4_EN_MAX_RX_SIZE;

Pool size is not accurate.
 From one side, MLX4_EN_MAX_RX_SIZE might be too big compared to the 
actual size.

However, more importantly, it can be too small when working with large 
MTU. This is mutually exclusive with XDP in mlx4.

Rx ring entries consist of 'frags', each entry needs between 1 to 4 
(MLX4_EN_MAX_RX_FRAGS) frags. In default MTU, each page shared between 
two entries.

> +	pp.nid = node;
> +	pp.napi = &priv->rx_cq[queue_index]->napi;
> +	pp.netdev = priv->dev;
> +	pp.dev = &mdev->dev->persist->pdev->dev;
> +	pp.dma_dir = DMA_BIDIRECTIONAL;
> +
> +	ring->pp = page_pool_create(&pp);
> +	if (!ring->pp)
>   		goto err_ring;
>   
> +	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
> +		goto err_pp;
> +
> +	err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
> +					 ring->pp);
> +	if (err)
> +		goto err_xdp_info;
> +
>   	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
>   					sizeof(struct mlx4_en_rx_alloc));
>   	ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
> @@ -319,6 +338,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
>   	ring->rx_info = NULL;
>   err_xdp_info:
>   	xdp_rxq_info_unreg(&ring->xdp_rxq);
> +err_pp:
> +	page_pool_destroy(ring->pp);
>   err_ring:
>   	kfree(ring);
>   	*pring = NULL;
> @@ -445,6 +466,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
>   	xdp_rxq_info_unreg(&ring->xdp_rxq);
>   	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
>   	kvfree(ring->rx_info);
> +	page_pool_destroy(ring->pp);
>   	ring->rx_info = NULL;
>   	kfree(ring);
>   	*pring = NULL;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ