[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221024135734.69673-7-saeed@kernel.org>
Date: Mon, 24 Oct 2022 14:57:26 +0100
From: Saeed Mahameed <saeed@...nel.org>
To: "David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Eric Dumazet <edumazet@...gle.com>
Cc: Saeed Mahameed <saeedm@...dia.com>, netdev@...r.kernel.org,
Tariq Toukan <tariqt@...dia.com>,
Yevgeny Kliteynik <kliteyn@...dia.com>,
Alex Vesker <valex@...dia.com>
Subject: [net-next 06/14] net/mlx5: DR, Initialize chunk's ste_arrays at chunk creation
From: Yevgeny Kliteynik <kliteyn@...dia.com>
Rather than cleaning the corresponding chunk's section of ste_arrays on
chunk deletion, initialize these areas upon chunk creation.
Chunk destruction tend to come in large batches (during pool syncing).
To reduce the "hiccup" in such cases, moving ste_arrays init from chunk
destruction to initialization.
Signed-off-by: Yevgeny Kliteynik <kliteyn@...dia.com>
Reviewed-by: Alex Vesker <valex@...dia.com>
Signed-off-by: Saeed Mahameed <saeedm@...dia.com>
---
.../mellanox/mlx5/core/steering/dr_icm_pool.c | 25 +++----------------
1 file changed, 4 insertions(+), 21 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 4cdc9e9a54e1..7ca1ef073f55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -177,42 +177,25 @@ static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
{
+ int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int ste_size = dr_icm_buddy_get_ste_size(buddy);
int index = offset / DR_STE_SIZE;
chunk->ste_arr = &buddy->ste_arr[index];
chunk->miss_list = &buddy->miss_list[index];
- chunk->hw_ste_arr = buddy->hw_ste_arr +
- index * dr_icm_buddy_get_ste_size(buddy);
-}
-
-static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
-{
- int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
- memset(chunk->hw_ste_arr, 0,
- num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
memset(chunk->ste_arr, 0,
num_of_entries * sizeof(chunk->ste_arr[0]));
}
-static enum mlx5dr_icm_type
-get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
-{
- return chunk->buddy_mem->pool->icm_type;
-}
-
static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
{
- enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
-
chunk->buddy_mem->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
- if (icm_type == DR_ICM_TYPE_STE)
- dr_icm_chunk_ste_cleanup(chunk);
-
kvfree(chunk);
}
--
2.37.3
Powered by blists - more mailing lists