lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 25 Nov 2021 16:48:12 +0100
From:   Maxime Chevallier <maxime.chevallier@...tlin.com>
To:     davem@...emloft.net
Cc:     Maxime Chevallier <maxime.chevallier@...tlin.com>,
        netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
        thomas.petazzoni@...tlin.com, gregory.clement@...tlin.com,
        Andrew Lunn <andrew@...n.ch>,
        Pali Rohár <pali@...nel.org>
Subject: [PATCH net-next 3/4] net: mvneta: Allow having more than one queue per TC

The current mqprio implementation assumed that we are only using one
queue per TC. Use the offset and count parameters to allow using
multiple queues per TC. In that case, the controller will use a standard
round-robin algorithm to pick queues assigned to the same TC, with the
same priority.

This only applies to VLAN priorities in ingress traffic, each TC
corresponding to a vlan priority.

Signed-off-by: Maxime Chevallier <maxime.chevallier@...tlin.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 35 +++++++++++++++------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d3ce87e69d2a..aba452e8abfe 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -493,7 +493,6 @@ struct mvneta_port {
 	u8 mcast_count[256];
 	u16 tx_ring_size;
 	u16 rx_ring_size;
-	u8 prio_tc_map[8];
 
 	phy_interface_t phy_interface;
 	struct device_node *dn;
@@ -4897,13 +4896,12 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
 }
 
-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
 {
-	u32 val = 0;
-	int i;
+	u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
 
-	for (i = 0; i < rxq_number; i++)
-		val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
+	val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
+	val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
 
 	mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
 }
@@ -4912,8 +4910,8 @@ static int mvneta_setup_mqprio(struct net_device *dev,
 			       struct tc_mqprio_qopt_offload *mqprio)
 {
 	struct mvneta_port *pp = netdev_priv(dev);
+	int rxq, tc;
 	u8 num_tc;
-	int i;
 
 	if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
 		return 0;
@@ -4923,21 +4921,28 @@ static int mvneta_setup_mqprio(struct net_device *dev,
 	if (num_tc > rxq_number)
 		return -EINVAL;
 
+	mvneta_clear_rx_prio_map(pp);
+
 	if (!num_tc) {
-		mvneta_clear_rx_prio_map(pp);
 		netdev_reset_tc(dev);
 		return 0;
 	}
 
-	memcpy(pp->prio_tc_map, mqprio->qopt.prio_tc_map,
-	       sizeof(pp->prio_tc_map));
+	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+
+	for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+		netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
+				    mqprio->qopt.offset[tc]);
 
-	mvneta_setup_rx_prio_map(pp);
+		for (rxq = mqprio->qopt.offset[tc];
+		     rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+		     rxq++) {
+			if (rxq >= rxq_number)
+				return -EINVAL;
 
-	netdev_set_num_tc(dev, mqprio->qopt.num_tc);
-	for (i = 0; i < mqprio->qopt.num_tc; i++)
-		netdev_set_tc_queue(dev, i, mqprio->qopt.count[i],
-				    mqprio->qopt.offset[i]);
+			mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
+		}
+	}
 
 	return 0;
 }
-- 
2.25.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ