[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190729234934.23595-9-saeedm@mellanox.com>
Date: Mon, 29 Jul 2019 23:50:27 +0000
From: Saeed Mahameed <saeedm@...lanox.com>
To: "David S. Miller" <davem@...emloft.net>
CC: "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
Vlad Buslov <vladbu@...lanox.com>,
Jianbo Liu <jianbol@...lanox.com>,
Roi Dayan <roid@...lanox.com>,
Saeed Mahameed <saeedm@...lanox.com>
Subject: [net-next 08/13] net/mlx5e: Protect tc flows hashtable with rcu
From: Vlad Buslov <vladbu@...lanox.com>
In order to remove dependency on rtnl lock, access to tc flows hashtable
must be explicitly protected from concurrent flows removal.
Extend tc flow structure with rcu to allow concurrent parallel access. Use
rcu read lock to safely lookup flow in tc flows hash table, and take
reference to it. Use rcu free for flow deletion to accommodate concurrent
stats requests.
Add new DELETED flow flag. Imlement new flow_flag_test_and_set() helper
that is used to set a flag and return its previous value. Use it to
atomically set the flag in mlx5e_delete_flower() to guarantee that flow can
only be deleted once, even when same flow is deleted concurrently by
multiple tasks.
Signed-off-by: Vlad Buslov <vladbu@...lanox.com>
Reviewed-by: Jianbo Liu <jianbol@...lanox.com>
Reviewed-by: Roi Dayan <roid@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
.../net/ethernet/mellanox/mlx5/core/en_tc.c | 47 ++++++++++++++++---
1 file changed, 40 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 241157b699df..a39f8a07de0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -79,6 +79,7 @@ enum {
MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
+ MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -122,6 +123,7 @@ struct mlx5e_tc_flow {
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
refcount_t refcnt;
+ struct rcu_head rcu_head;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -201,7 +203,7 @@ static void mlx5e_flow_put(struct mlx5e_priv *priv,
{
if (refcount_dec_and_test(&flow->refcnt)) {
mlx5e_tc_del_flow(priv, flow);
- kfree(flow);
+ kfree_rcu(flow, rcu_head);
}
}
@@ -214,6 +216,17 @@ static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
+static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
+ unsigned long flag)
+{
+ /* test_and_set_bit() provides all necessary barriers */
+ return test_and_set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_test_and_set(flow, flag) \
+ __flow_flag_test_and_set(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
{
/* Complete all memory stores before clearing bit. */
@@ -3451,7 +3464,9 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_lock();
+ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_unlock();
if (flow) {
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
@@ -3466,7 +3481,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (err)
goto out;
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+ err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
@@ -3492,16 +3507,32 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
+ int err;
+ rcu_read_lock();
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ if (!flow || !same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
+ * set.
+ */
+ if (flow_flag_test_and_set(flow, DELETED)) {
+ err = -EINVAL;
+ goto errout;
+ }
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
+ rcu_read_unlock();
mlx5e_flow_put(priv, flow);
return 0;
+
+errout:
+ rcu_read_unlock();
+ return err;
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
@@ -3517,8 +3548,10 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 bytes = 0;
int err = 0;
- flow = mlx5e_flow_get(rhashtable_lookup_fast(tc_ht, &f->cookie,
- tc_ht_params));
+ rcu_read_lock();
+ flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
+ tc_ht_params));
+ rcu_read_unlock();
if (IS_ERR(flow))
return PTR_ERR(flow);
--
2.21.0
Powered by blists - more mailing lists