lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 27 Oct 2021 18:19:59 +0300
From:   Ido Schimmel <idosch@...sch.org>
To:     netdev@...r.kernel.org
Cc:     davem@...emloft.net, kuba@...nel.org, petrm@...dia.com,
        jiri@...dia.com, mlxsw@...dia.com, Ido Schimmel <idosch@...dia.com>
Subject: [PATCH net-next 1/3] mlxsw: spectrum_qdisc: Offload root TBF as port shaper

From: Petr Machata <petrm@...dia.com>

The Spectrum ASIC allows configuration of maximum shaper on all levels of
the scheduling hierarchy: TCs, subgroups, groups and also ports. Currently,
TBF always configures a subgroup. But a user could reasonably express the
intent to configure port shaper by putting TBF to a root position, around
ETS / PRIO. Accept this usage and offload appropriately.

Signed-off-by: Petr Machata <petrm@...dia.com>
Signed-off-by: Ido Schimmel <idosch@...dia.com>
---
 .../ethernet/mellanox/mlxsw/spectrum_qdisc.c  | 55 +++++++++++++------
 1 file changed, 37 insertions(+), 18 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index ddb5ad88b350..4243d3b883ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -271,6 +271,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
 
 struct mlxsw_sp_qdisc_tree_validate {
 	bool forbid_ets;
+	bool forbid_root_tbf;
 	bool forbid_tbf;
 	bool forbid_red;
 };
@@ -310,18 +311,26 @@ __mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
 		if (validate.forbid_red)
 			return -EINVAL;
 		validate.forbid_red = true;
+		validate.forbid_root_tbf = true;
 		validate.forbid_ets = true;
 		break;
 	case MLXSW_SP_QDISC_TBF:
-		if (validate.forbid_tbf)
-			return -EINVAL;
-		validate.forbid_tbf = true;
-		validate.forbid_ets = true;
+		if (validate.forbid_root_tbf) {
+			if (validate.forbid_tbf)
+				return -EINVAL;
+			/* This is a TC TBF. */
+			validate.forbid_tbf = true;
+			validate.forbid_ets = true;
+		} else {
+			/* This is root TBF. */
+			validate.forbid_root_tbf = true;
+		}
 		break;
 	case MLXSW_SP_QDISC_PRIO:
 	case MLXSW_SP_QDISC_ETS:
 		if (validate.forbid_ets)
 			return -EINVAL;
+		validate.forbid_root_tbf = true;
 		validate.forbid_ets = true;
 		break;
 	default:
@@ -905,16 +914,34 @@ mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 	mlxsw_sp_qdisc->stats_base.backlog = 0;
 }
 
+static enum mlxsw_reg_qeec_hr
+mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
+		      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+	if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
+		return MLXSW_REG_QEEC_HR_PORT;
+
+	/* Configure subgroup shaper, so that both UC and MC traffic is subject
+	 * to shaping. That is unlike RED, however UC queue lengths are going to
+	 * be different than MC ones due to different pool and quota
+	 * configurations, so the configuration is not applicable. For shaper on
+	 * the other hand, subjecting the overall stream to the configured
+	 * shaper makes sense. Also note that that is what we do for
+	 * ieee_setmaxrate().
+	 */
+	return MLXSW_REG_QEEC_HR_SUBGROUP;
+}
+
 static int
 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
 {
+	enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+							  mlxsw_sp_qdisc);
 	int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
 						       mlxsw_sp_qdisc);
 
-	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
-					     MLXSW_REG_QEEC_HR_SUBGROUP,
-					     tclass_num, 0,
+	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
 					     MLXSW_REG_QEEC_MAS_DIS, 0);
 }
 
@@ -996,6 +1023,8 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
 			   void *params)
 {
+	enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+							  mlxsw_sp_qdisc);
 	struct tc_tbf_qopt_offload_replace_params *p = params;
 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
 	int tclass_num;
@@ -1016,17 +1045,7 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
 		/* check_params above was supposed to reject this value. */
 		return -EINVAL;
 
-	/* Configure subgroup shaper, so that both UC and MC traffic is subject
-	 * to shaping. That is unlike RED, however UC queue lengths are going to
-	 * be different than MC ones due to different pool and quota
-	 * configurations, so the configuration is not applicable. For shaper on
-	 * the other hand, subjecting the overall stream to the configured
-	 * shaper makes sense. Also note that that is what we do for
-	 * ieee_setmaxrate().
-	 */
-	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
-					     MLXSW_REG_QEEC_HR_SUBGROUP,
-					     tclass_num, 0,
+	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
 					     rate_kbps, burst_size);
 }
 
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ