[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <94a5d82c0c399747117d8a558f9beebfbcf26154.1680162300.git.leonro@nvidia.com>
Date: Thu, 30 Mar 2023 11:02:31 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Paolo Abeni <pabeni@...hat.com>, Jakub Kicinski <kuba@...nel.org>,
"David S . Miller" <davem@...emloft.net>
Cc: Leon Romanovsky <leonro@...dia.com>,
Saeed Mahameed <saeedm@...dia.com>,
linux-netdev <netdev@...r.kernel.org>,
Raed Salem <raeds@...dia.com>,
Eric Dumazet <edumazet@...gle.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
Steffen Klassert <steffen.klassert@...unet.com>
Subject: [PATCH net-next 10/10] net/mlx5e: Simulate missing IPsec TX limits hardware functionality
From: Leon Romanovsky <leonro@...dia.com>
ConnectX-7 devices don't have ability to send TX hard/soft limits
events. As a possible workaround, let's rely on existing infrastructure
and use periodic check of cached flow counter. In these periodic checks,
we call to xfrm_state_check_expire() to check and mark state accordingly.
Once the state is marked as XFRM_STATE_EXPIRED, the SA flow rule is
changed to drop all the traffic.
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
.../mellanox/mlx5/core/en_accel/ipsec.c | 65 ++++++++++++++++++-
.../mellanox/mlx5/core/en_accel/ipsec.h | 8 +++
.../mellanox/mlx5/core/en_accel/ipsec_fs.c | 31 +++++++--
3 files changed, 99 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 89d5802888a9..bb5e9f5b904e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -40,6 +40,8 @@
#include "ipsec.h"
#include "ipsec_rxtx.h"
+#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
+
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
@@ -50,6 +52,28 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}
+static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_dwork *dwork =
+ container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
+ struct xfrm_state *x = sa_entry->x;
+
+ spin_lock(&x->lock);
+ xfrm_state_check_expire(x);
+ if (x->km.state == XFRM_STATE_EXPIRED) {
+ sa_entry->attrs.drop = true;
+ mlx5e_accel_ipsec_fs_modify(sa_entry);
+ }
+ spin_unlock(&x->lock);
+
+ if (sa_entry->attrs.drop)
+ return;
+
+ queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
+ MLX5_IPSEC_RESCHED);
+}
+
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_state *x = sa_entry->x;
@@ -464,6 +488,31 @@ static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}
+static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct xfrm_state *x = sa_entry->x;
+ struct mlx5e_ipsec_dwork *dwork;
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return 0;
+
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
+ return 0;
+
+ if (x->lft.soft_packet_limit == XFRM_INF &&
+ x->lft.hard_packet_limit == XFRM_INF)
+ return 0;
+
+ dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
+ if (!dwork)
+ return -ENOMEM;
+
+ dwork->sa_entry = sa_entry;
+ INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
+ sa_entry->dwork = dwork;
+ return 0;
+}
+
static int mlx5e_xfrm_add_state(struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
@@ -504,10 +553,14 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_xfrm;
+ err = mlx5e_ipsec_create_dwork(sa_entry);
+ if (err)
+ goto release_work;
+
/* create hw context */
err = mlx5_ipsec_create_sa_ctx(sa_entry);
if (err)
- goto release_work;
+ goto release_dwork;
err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
@@ -523,6 +576,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
goto err_add_rule;
mlx5e_ipsec_set_esn_ops(sa_entry);
+
+ if (sa_entry->dwork)
+ queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
+ MLX5_IPSEC_RESCHED);
out:
x->xso.offload_handle = (unsigned long)sa_entry;
return 0;
@@ -531,6 +588,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
+release_dwork:
+ kfree(sa_entry->dwork);
release_work:
kfree(sa_entry->work);
err_xfrm:
@@ -562,8 +621,12 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work)
cancel_work_sync(&sa_entry->work->work);
+ if (sa_entry->dwork)
+ cancel_delayed_work_sync(&sa_entry->dwork->dwork);
+
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
+ kfree(sa_entry->dwork);
kfree(sa_entry->work);
sa_entry_free:
kfree(sa_entry);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index ab48fb9b4698..52890d7dce6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -93,6 +93,7 @@ struct mlx5_accel_esp_xfrm_attrs {
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
+ u8 drop : 1;
u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
@@ -140,6 +141,11 @@ struct mlx5e_ipsec_work {
void *data;
};
+struct mlx5e_ipsec_dwork {
+ struct delayed_work dwork;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+};
+
struct mlx5e_ipsec_aso {
u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
@@ -193,6 +199,7 @@ struct mlx5e_ipsec_sa_entry {
u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5e_ipsec_work *work;
+ struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_limits limits;
};
@@ -235,6 +242,7 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0539640a4d88..b47794d4146e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -926,9 +926,12 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ if (attrs->drop)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ else
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1018,9 +1021,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ if (attrs->drop)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ else
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1430,3 +1437,19 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
kfree(ipsec->tx);
return err;
}
+
+void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
+ int err;
+
+ memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
+ memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
+
+ err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
+ if (err)
+ return;
+
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
+ memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
+}
--
2.39.2
Powered by blists - more mailing lists