[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <adbf7344-2d1e-45ff-86da-e2a7299f8c13@nvidia.com>
Date: Wed, 8 Jan 2025 14:04:03 +0200
From: Moshe Shemesh <moshe@...dia.com>
To: Przemek Kitszel <przemyslaw.kitszel@...el.com>, Tariq Toukan
<tariqt@...dia.com>
CC: <netdev@...r.kernel.org>, Saeed Mahameed <saeedm@...dia.com>, Gal Pressman
<gal@...dia.com>, Leon Romanovsky <leonro@...dia.com>, Mark Bloch
<mbloch@...dia.com>, Yevgeny Kliteynik <kliteyn@...dia.com>, "David S.
Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni
<pabeni@...hat.com>, Eric Dumazet <edumazet@...gle.com>, Andrew Lunn
<andrew+netdev@...n.ch>
Subject: Re: [PATCH net-next 06/13] net/mlx5: fs, add HWS modify header API
function
On 1/7/2025 2:09 PM, Przemek Kitszel wrote:
>
> On 1/7/25 07:07, Tariq Toukan wrote:
>> From: Moshe Shemesh <moshe@...dia.com>
>>
>> Add modify header alloc and dealloc API functions to provide modify
>> header actions for steering rules. Use fs hws pools to get actions from
>> shared bulks of modify header actions.
>>
>> Signed-off-by: Moshe Shemesh <moshe@...dia.com>
>> Reviewed-by: Yevgeny Kliteynik <kliteyn@...dia.com>
>> Reviewed-by: Mark Bloch <mbloch@...dia.com>
>> Signed-off-by: Tariq Toukan <tariqt@...dia.com>
>> ---
>> .../net/ethernet/mellanox/mlx5/core/fs_core.h | 1 +
>> .../mellanox/mlx5/core/steering/hws/fs_hws.c | 117 +++++++++++++
>> .../mellanox/mlx5/core/steering/hws/fs_hws.h | 2 +
>> .../mlx5/core/steering/hws/fs_hws_pools.c | 164 ++++++++++++++++++
>> .../mlx5/core/steering/hws/fs_hws_pools.h | 22 +++
>> 5 files changed, 306 insertions(+)
>>
>> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
>> b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
>> index 9b0575a61362..06ec48f51b6d 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
>> @@ -65,6 +65,7 @@ struct mlx5_modify_hdr {
>> enum mlx5_flow_resource_owner owner;
>> union {
>> struct mlx5_fs_dr_action fs_dr_action;
>> + struct mlx5_fs_hws_action fs_hws_action;
>> u32 id;
>> };
>> };
>> diff --git
>> a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
>> b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
>> index 723865140b2e..a75e5ce168c7 100644
>> --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
>> +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
>> @@ -14,6 +14,8 @@ static struct mlx5hws_action *
>> create_action_remove_header_vlan(struct mlx5hws_context *ctx);
>> static void destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray
>> *pr_pools,
>> unsigned long index);
>> +static void destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray
>> *mh_pools,
>> + unsigned long index);
>
> usual "please add your suffix" complain
OK
>
> sorry for mostly nitpicks, I will take deeper look later
>
>>
>> static int init_hws_actions_pool(struct mlx5_core_dev *dev,
>> struct mlx5_fs_hws_context *fs_ctx)
>> @@ -56,6 +58,7 @@ static int init_hws_actions_pool(struct
>> mlx5_core_dev *dev,
>> goto cleanup_insert_hdr;
>> xa_init(&hws_pool->el2tol3tnl_pools);
>> xa_init(&hws_pool->el2tol2tnl_pools);
>> + xa_init(&hws_pool->mh_pools);
>> return 0;
>>
>> cleanup_insert_hdr:
>> @@ -81,6 +84,9 @@ static void cleanup_hws_actions_pool(struct
>> mlx5_fs_hws_context *fs_ctx)
>> struct mlx5_fs_pool *pool;
>> unsigned long i;
>>
>> + xa_for_each(&hws_pool->mh_pools, i, pool)
>> + destroy_mh_pool(pool, &hws_pool->mh_pools, i);
>> + xa_destroy(&hws_pool->mh_pools);
>> xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
>> destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
>> xa_destroy(&hws_pool->el2tol2tnl_pools);
>> @@ -528,6 +534,115 @@ static void
>> mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
>> pkt_reformat->fs_hws_action.pr_data = NULL;
>> }
>>
>> +static struct mlx5_fs_pool *
>> +create_mh_pool(struct mlx5_core_dev *dev,
>
> ditto prefix
OK
>
> [...]
>
>> +static int mlx5_cmd_hws_modify_header_alloc(struct
>> mlx5_flow_root_namespace *ns,
>> + u8 namespace, u8 num_actions,
>> + void *modify_actions,
>> + struct mlx5_modify_hdr
>> *modify_hdr)
>> +{
>> + struct mlx5_fs_hws_actions_pool *hws_pool =
>> &ns->fs_hws_context.hws_pool;
>> + struct mlx5hws_action_mh_pattern pattern = {};
>> + struct mlx5_fs_hws_mh *mh_data = NULL;
>> + struct mlx5hws_action *hws_action;
>> + struct mlx5_fs_pool *pool;
>> + unsigned long i, cnt = 0;
>> + bool known_pattern;
>> + int err;
>> +
>> + pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
>> num_actions;
>> + pattern.data = modify_actions;
>> +
>> + known_pattern = false;
>> + xa_for_each(&hws_pool->mh_pools, i, pool) {
>> + if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
>> + known_pattern = true;
>> + break;
>> + }
>> + cnt++;
>> + }
>> +
>> + if (!known_pattern) {
>> + pool = create_mh_pool(ns->dev, &pattern,
>> &hws_pool->mh_pools, cnt);
>> + if (IS_ERR(pool))
>> + return PTR_ERR(pool);
>> + }
>
> if, by any chance, .mh_pools was empty, next line has @pool
> uninitialized
If .mh_pools was empty then known_pattern is false and create_mh_pool()
is called which returns valid pool or error.
>
>> + mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
>> + if (IS_ERR(mh_data)) {
>> + err = PTR_ERR(mh_data);
>> + goto destroy_pool;
>> + }
>> + hws_action = mh_data->bulk->hws_action;
>> + mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
>> + if (!mh_data->data) {
>> + err = -ENOMEM;
>> + goto release_mh;
>> + }
>> + modify_hdr->fs_hws_action.mh_data = mh_data;
>> + modify_hdr->fs_hws_action.fs_pool = pool;
>> + modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
>> + modify_hdr->fs_hws_action.hws_action = hws_action;
>> +
>> + return 0;
>> +
>> +release_mh:
>> + mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
>> +destroy_pool:
>> + if (!known_pattern)
>> + destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
>> + return err;
>> +}
>
> [...]
>
>> +static struct mlx5_fs_bulk *
>> +mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
>> +{
>> + struct mlx5hws_action_mh_pattern *pattern;
>> + struct mlx5_flow_root_namespace *root_ns;
>> + struct mlx5_fs_hws_mh_bulk *mh_bulk;
>> + struct mlx5hws_context *ctx;
>> + int bulk_len;
>> + int i;
>
> meld @i to prev line, or better declare within the for loop
OK
>
>> +
>> + root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
>> + if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
>> + return NULL;
>> +
>> + ctx = root_ns->fs_hws_context.hws_ctx;
>> + if (!ctx)
>> + return NULL;
>> +
>> + if (!pool_ctx)
>> + return NULL;
>
> you could combine the two checks above
>
> [...]
>
>> +bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
>> + struct mlx5hws_action_mh_pattern *pattern)
>> +{
>> + struct mlx5hws_action_mh_pattern *pool_pattern;
>> + int num_actions, i;
>> +
>> + pool_pattern = mh_pool->pool_ctx;
>> + if (WARN_ON_ONCE(!pool_pattern))
>> + return false;
>> +
>> + if (pattern->sz != pool_pattern->sz)
>> + return false;
>> + num_actions = pattern->sz /
>> MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
>> + for (i = 0; i < num_actions; i++)
>
> missing braces
Ack
>
>> + if ((__force __be32)pattern->data[i] !=
>> + (__force __be32)pool_pattern->data[i])
>> + return false;
>> + return true;
>> +}
>
Powered by blists - more mailing lists