[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200221175415.390884-9-idosch@idosch.org>
Date: Fri, 21 Feb 2020 19:54:11 +0200
From: Ido Schimmel <idosch@...sch.org>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net, jiri@...lanox.com, mlxsw@...lanox.com,
Ido Schimmel <idosch@...lanox.com>
Subject: [PATCH net-next 08/12] mlxsw: spectrum_dpipe: Take router lock from dpipe code
From: Ido Schimmel <idosch@...lanox.com>
The dpipe code traverses internal router structures such as neighbours
and adjacency entries and dumps them to user space via netlink. Up until
now the routing code did not have its own locks and relied on RTNL lock
to serialize access. This is going to change with the introduction of
the router lock.
Take the router lock in the code paths where RTNL lock is currently
taken so that the latter could be removed by subsequent patches.
Signed-off-by: Ido Schimmel <idosch@...lanox.com>
Acked-by: Jiri Pirko <jiri@...lanox.com>
---
.../ethernet/mellanox/mlxsw/spectrum_dpipe.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 2dc0978428e6..63fc1f56ef00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <net/devlink.h>
#include "spectrum.h"
@@ -211,6 +212,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
i = 0;
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
@@ -241,6 +243,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
devlink_dpipe_entry_ctx_close(dump_ctx);
if (i != rif_count)
goto start_again;
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
devlink_dpipe_entry_clear(&entry);
@@ -248,6 +251,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
err_entry_append:
err_entry_get:
err_ctx_prepare:
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
devlink_dpipe_entry_clear(&entry);
return err;
@@ -259,6 +263,7 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
int i;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@@ -271,6 +276,7 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return 0;
}
@@ -547,6 +553,7 @@ mlxsw_sp_dpipe_table_host_entries_get(struct mlxsw_sp *mlxsw_sp,
int err;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
i = 0;
rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
start_again:
@@ -602,11 +609,13 @@ mlxsw_sp_dpipe_table_host_entries_get(struct mlxsw_sp *mlxsw_sp,
if (i != rif_count)
goto start_again;
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return 0;
err_ctx_prepare:
err_entry_append:
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return err;
}
@@ -663,6 +672,7 @@ mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
int i;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -684,6 +694,7 @@ mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
enable);
}
}
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
}
@@ -702,6 +713,7 @@ mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
int i;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -721,6 +733,7 @@ mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
size++;
}
}
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return size;
@@ -1094,6 +1107,7 @@ mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
int err;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
@@ -1130,12 +1144,14 @@ mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
devlink_dpipe_entry_ctx_close(dump_ctx);
if (nh_count != nh_count_max)
goto start_again;
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return 0;
err_ctx_prepare:
err_entry_append:
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return err;
}
@@ -1207,7 +1223,9 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
u64 size;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
return size;
--
2.24.1
Powered by blists - more mailing lists