lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8eab9a5a-ce82-4291-8952-5e5c4610e0b0@gmail.com>
Date: Tue, 11 Feb 2025 21:18:46 +0200
From: Tariq Toukan <ttoukan.linux@...il.com>
To: Jakub Kicinski <kuba@...nel.org>, davem@...emloft.net
Cc: netdev@...r.kernel.org, edumazet@...gle.com, pabeni@...hat.com,
 andrew+netdev@...n.ch, horms@...nel.org, tariqt@...dia.com, hawk@...nel.org
Subject: Re: [PATCH net-next 1/4] eth: mlx4: create a page pool for Rx



On 05/02/2025 5:12, Jakub Kicinski wrote:
> Create a pool per rx queue. Subsequent patches will make use of it.
> 
> Move fcs_del to a hole to make space for the pointer.
> 
> Signed-off-by: Jakub Kicinski <kuba@...nel.org>
> ---
>   drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |  3 ++-
>   drivers/net/ethernet/mellanox/mlx4/en_rx.c   | 24 +++++++++++++++++++-
>   2 files changed, 25 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> index 28b70dcc652e..29f48e63081b 100644
> --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
> +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

..


> @@ -286,9 +288,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
>   	ring->log_stride = ffs(ring->stride) - 1;
>   	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
>   
> -	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
> +	pp.flags = PP_FLAG_DMA_MAP;
> +	pp.pool_size = MLX4_EN_MAX_RX_SIZE;
> +	pp.nid = node;
> +	pp.napi = &priv->rx_cq[queue_index]->napi;
> +	pp.netdev = priv->dev;
> +	pp.dev = &mdev->dev->persist->pdev->dev;
> +	pp.dma_dir = DMA_BIDIRECTIONAL;

I just noticed one more thing, here we better take the value from 
priv->dma_dir, as it could be DMA_FROM_DEVICE or DMA_BIDIRECTIONAL 
depending on XDP program presence.

> +
> +	ring->pp = page_pool_create(&pp);
> +	if (!ring->pp)
>   		goto err_ring;
>   
> +	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
> +		goto err_pp;
> +
> +	err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
> +					 ring->pp);
> +	if (err)
> +		goto err_xdp_info;
> +
>   	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
>   					sizeof(struct mlx4_en_rx_alloc));
>   	ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
> @@ -319,6 +338,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
>   	ring->rx_info = NULL;
>   err_xdp_info:
>   	xdp_rxq_info_unreg(&ring->xdp_rxq);
> +err_pp:
> +	page_pool_destroy(ring->pp);
>   err_ring:
>   	kfree(ring);
>   	*pring = NULL;
> @@ -445,6 +466,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
>   	xdp_rxq_info_unreg(&ring->xdp_rxq);
>   	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
>   	kvfree(ring->rx_info);
> +	page_pool_destroy(ring->pp);
>   	ring->rx_info = NULL;
>   	kfree(ring);
>   	*pring = NULL;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ