lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Sun, 31 May 2020 11:06:41 +0300 From: Oz Shlomo <ozsh@...lanox.com> To: wenxu@...oud.cn, paulb@...lanox.com, saeedm@...lanox.com, ecree@...arflare.com Cc: netdev@...r.kernel.org Subject: Re: [PATCH net-next v2] net/mlx5e: add conntrack offload rules only in ct or ct_nat flow table Hi Wenxu, Just saw v2. Please see my comment in v1. On 5/30/2020 9:27 AM, wenxu@...oud.cn wrote: > From: wenxu <wenxu@...oud.cn> > > In the ct offload all the conntrack entry offload rules > will be add to both ct ft and ct_nat ft twice. It is not > make sense. > The driver can distinguish NAT from non-NAT conntrack > through the FLOW_ACTION_MANGLE action. > > Signed-off-by: wenxu <wenxu@...oud.cn> > --- > drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 52 ++++++++++++---------- > 1 file changed, 28 insertions(+), 24 deletions(-) > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c > index 995b2ef..2281549 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c > @@ -59,7 +59,6 @@ struct mlx5_ct_zone_rule { > struct mlx5_flow_handle *rule; > struct mlx5_esw_flow_attr attr; > int tupleid; > - bool nat; > }; > > struct mlx5_tc_ct_pre { > @@ -88,7 +87,7 @@ struct mlx5_ct_entry { > struct mlx5_fc *counter; > unsigned long cookie; > unsigned long restore_cookie; > - struct mlx5_ct_zone_rule zone_rules[2]; > + struct mlx5_ct_zone_rule zone_rule; > }; > > static const struct rhashtable_params cts_ht_params = { > @@ -238,10 +237,9 @@ struct mlx5_ct_entry { > > static void > mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, > - struct mlx5_ct_entry *entry, > - bool nat) > + struct mlx5_ct_entry *entry) > { > - struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; > + struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rule; > struct mlx5_esw_flow_attr *attr = &zone_rule->attr; > struct mlx5_eswitch *esw = ct_priv->esw; > > @@ -256,8 +254,7 @@ struct mlx5_ct_entry { > mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv, > struct mlx5_ct_entry *entry) > { > - mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); > - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); > + mlx5_tc_ct_entry_del_rule(ct_priv, entry); > > mlx5_fc_destroy(ct_priv->esw->dev, entry->counter); > } > @@ -493,15 +490,13 @@ struct mlx5_ct_entry { > struct mlx5_ct_entry *entry, > bool nat) > { > - struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; > + struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rule; > struct mlx5_esw_flow_attr *attr = &zone_rule->attr; > struct mlx5_eswitch *esw = ct_priv->esw; > struct mlx5_flow_spec *spec = NULL; > u32 tupleid; > int err; > > - zone_rule->nat = nat; > - > spec = kzalloc(sizeof(*spec), GFP_KERNEL); > if (!spec) > return -ENOMEM; > @@ -562,7 +557,8 @@ struct mlx5_ct_entry { > static int > mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, > struct flow_rule *flow_rule, > - struct mlx5_ct_entry *entry) > + struct mlx5_ct_entry *entry, > + bool nat) > { > struct mlx5_eswitch *esw = ct_priv->esw; > int err; > @@ -574,21 +570,26 @@ struct mlx5_ct_entry { > return err; > } > > - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false); > + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, nat); > if (err) > - goto err_orig; > + mlx5_fc_destroy(esw->dev, entry->counter); > > - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true); > - if (err) > - goto err_nat; > + return err; > +} > > - return 0; > +static bool > +mlx5_tc_ct_has_mangle_action(struct flow_rule *flow_rule) > +{ > + struct flow_action *flow_action = &flow_rule->action; > + struct flow_action_entry *act; > + int i; > > -err_nat: > - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); > -err_orig: > - mlx5_fc_destroy(esw->dev, entry->counter); > - return err; > + flow_action_for_each(i, act, flow_action) { > + if (act->id == FLOW_ACTION_MANGLE) > + return true; > + } > + > + return false; > } > > static int > @@ -600,6 +601,7 @@ struct mlx5_ct_entry { > struct flow_action_entry *meta_action; > unsigned long cookie = flow->cookie; > struct mlx5_ct_entry *entry; > + bool nat; > int err; > > meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule); > @@ -619,7 +621,9 @@ struct mlx5_ct_entry { > entry->cookie = flow->cookie; > entry->restore_cookie = meta_action->ct_metadata.cookie; > > - err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry); > + nat = mlx5_tc_ct_has_mangle_action(flow_rule); > + > + err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry, nat); > if (err) > goto err_rules; > > @@ -1620,7 +1624,7 @@ struct mlx5_flow_handle * > return false; > > entry = container_of(zone_rule, struct mlx5_ct_entry, > - zone_rules[zone_rule->nat]); > + zone_rule); > tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie); > > return true; >
Powered by blists - more mailing lists