lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181202124749.21420-5-jerinj@marvell.com>
Date:   Sun,  2 Dec 2018 18:17:39 +0530
From:   Jerin Jacob <jerinjacobk@...il.com>
To:     netdev@...r.kernel.org, davem@...emloft.net
Cc:     sgoutham@...vell.com, lcherian@...vell.com, gakula@...vell.com,
        jerinj@...vell.com, jerinjacobk@...il.com
Subject: [PATCH net-next v2 04/14] octeontx2-af: Add support for runtime RSS algo index reservation

Introduced reserve_flowkey_alg_idx()to reserve RSS algorithm index,
it would internally use set_flowkey_fields() to generate fields
based on the flow key dynamically.

On AF driver init, it would reserve a predefined set RSS algo indexes,
which will be available all the time for all the AF driver consumers.
The leftover algo indexes can be reserved at runtime through
exiting nix_rss_flowkey_cfg mailbox message.

The NIX_FLOW_KEY_TYPE_PORT is removed from predefined a set of RSS flow
type as it is not used by any consumer.

Signed-off-by: Jerin Jacob <jerinj@...vell.com>
---
 .../ethernet/marvell/octeontx2/af/common.h    |  14 --
 .../net/ethernet/marvell/octeontx2/af/mbox.h  |   1 +
 .../net/ethernet/marvell/octeontx2/af/rvu.h   |   7 +
 .../ethernet/marvell/octeontx2/af/rvu_nix.c   | 193 ++++++++++--------
 4 files changed, 120 insertions(+), 95 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index ceb3f1dbede5..72ad04eb8769 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -193,18 +193,4 @@ enum nix_scheduler {
 #define DEFAULT_RSS_CONTEXT_GROUP	0
 #define MAX_RSS_INDIR_TBL_SIZE		256 /* 1 << Max adder bits */
 
-/* NIX flow tag algorithm indices, max is 31 */
-enum {
-	NIX_FLOW_KEY_ALG_PORT,
-	NIX_FLOW_KEY_ALG_IP,
-	NIX_FLOW_KEY_ALG_TCP,
-	NIX_FLOW_KEY_ALG_UDP,
-	NIX_FLOW_KEY_ALG_SCTP,
-	NIX_FLOW_KEY_ALG_TCP_UDP,
-	NIX_FLOW_KEY_ALG_TCP_SCTP,
-	NIX_FLOW_KEY_ALG_UDP_SCTP,
-	NIX_FLOW_KEY_ALG_TCP_UDP_SCTP,
-	NIX_FLOW_KEY_ALG_MAX,
-};
-
 #endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a293091cb414..640a230c43a2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -415,6 +415,7 @@ enum nix_af_status {
 	NIX_AF_SMQ_FLUSH_FAILED     = -413,
 	NIX_AF_ERR_LF_RESET         = -414,
 	NIX_AF_ERR_RSS_NOSPC_FIELD  = -415,
+	NIX_AF_ERR_RSS_NOSPC_ALGO   = -416,
 	NIX_AF_INVAL_NPA_PF_FUNC    = -419,
 	NIX_AF_INVAL_SSO_PF_FUNC    = -420,
 };
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 48dbbaa9dd4d..a0e445d8eb2d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -164,9 +164,16 @@ struct npc_pkind {
 	u32	*pfchan_map;
 };
 
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+	u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+	int in_use;
+};
+
 struct nix_hw {
 	struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
 	struct nix_mcast mcast;
+	struct nix_flowkey flowkey;
 };
 
 struct rvu_hwinfo {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index c9f51ab4d875..a1f268652b04 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1597,57 +1597,16 @@ int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
 }
 
 /* Returns the ALG index to be set into NPC_RX_ACTION */
-static int get_flowkey_alg_idx(u32 flow_cfg)
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
 {
-	u32 ip_cfg;
+	int i;
 
-	flow_cfg &= ~NIX_FLOW_KEY_TYPE_PORT;
-	ip_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
-	if (flow_cfg == ip_cfg)
-		return NIX_FLOW_KEY_ALG_IP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP))
-		return NIX_FLOW_KEY_ALG_TCP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_UDP))
-		return NIX_FLOW_KEY_ALG_UDP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_SCTP))
-		return NIX_FLOW_KEY_ALG_SCTP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP |
-			      NIX_FLOW_KEY_TYPE_UDP))
-		return NIX_FLOW_KEY_ALG_TCP_UDP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP |
-			      NIX_FLOW_KEY_TYPE_SCTP))
-		return NIX_FLOW_KEY_ALG_TCP_SCTP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_UDP |
-			      NIX_FLOW_KEY_TYPE_SCTP))
-		return NIX_FLOW_KEY_ALG_UDP_SCTP;
-	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP |
-			      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP))
-		return NIX_FLOW_KEY_ALG_TCP_UDP_SCTP;
+	/* Scan over exiting algo entries to find a match */
+	for (i = 0; i < nix_hw->flowkey.in_use; i++)
+		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+			return i;
 
-	return NIX_FLOW_KEY_ALG_PORT;
-}
-
-int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
-					 struct nix_rss_flowkey_cfg *req,
-					 struct nix_rss_flowkey_cfg_rsp *rsp)
-{
-	struct rvu_hwinfo *hw = rvu->hw;
-	u16 pcifunc = req->hdr.pcifunc;
-	int alg_idx, nixlf, blkaddr;
-
-	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
-	if (blkaddr < 0)
-		return NIX_AF_ERR_AF_LF_INVALID;
-
-	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
-	if (nixlf < 0)
-		return NIX_AF_ERR_AF_LF_INVALID;
-
-	alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
-	rsp->alg_idx = alg_idx;
-	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
-				       alg_idx, req->mcam_index);
-	return 0;
+	return -ERANGE;
 }
 
 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
@@ -1781,71 +1740,141 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
 }
 
-static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
+{
+	u64 field[FIELDS_PER_ALG];
+	struct nix_hw *hw;
+	int fid, rc;
+
+	hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!hw)
+		return -EINVAL;
+
+	/* No room to add new flow hash algoritham */
+	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+		return NIX_AF_ERR_RSS_NOSPC_ALGO;
+
+	/* Generate algo fields for the given flow_cfg */
+	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+	if (rc)
+		return rc;
+
+	/* Update ALGX_FIELDX register with generated fields */
+	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+		rvu_write64(rvu, blkaddr,
+			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+							   fid), field[fid]);
+
+	/* Store the flow_cfg for futher lookup */
+	rc = hw->flowkey.in_use;
+	hw->flowkey.flowkey[rc] = flow_cfg;
+	hw->flowkey.in_use++;
+
+	return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+					 struct nix_rss_flowkey_cfg *req,
+					 struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+	struct rvu_hwinfo *hw = rvu->hw;
+	u16 pcifunc = req->hdr.pcifunc;
+	int alg_idx, nixlf, blkaddr;
+	struct nix_hw *nix_hw;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+	if (nixlf < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+	/* Failed to get algo index from the exiting list, reserve new  */
+	if (alg_idx < 0) {
+		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+						  req->flowkey_cfg);
+		if (alg_idx < 0)
+			return alg_idx;
+	}
+	rsp->alg_idx = alg_idx;
+	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+				       alg_idx, req->mcam_index);
+	return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
 {
-	u64 field[NIX_FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
 	u32 flowkey_cfg, minkey_cfg;
-	int alg, fid;
+	int alg, fid, rc;
 
-	memset(&field, 0, sizeof(u64) * NIX_FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
-
-	/* Only incoming channel number */
-	flowkey_cfg = NIX_FLOW_KEY_TYPE_PORT;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_PORT], flowkey_cfg);
-
-	/* For a incoming pkt if none of the fields match then flowkey
-	 * will be zero, hence tag generated will also be zero.
-	 * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
-	 * be used to queue the packet.
-	 */
+	/* Disable all flow key algx fieldx */
+	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+			rvu_write64(rvu, blkaddr,
+				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+				    0);
+	}
 
 	/* IPv4/IPv6 SIP/DIPs */
 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_IP], flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
 	minkey_cfg = flowkey_cfg;
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP], flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_UDP], flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_SCTP], flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
 			NIX_FLOW_KEY_TYPE_UDP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP_UDP],
-			   flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
 			NIX_FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP_SCTP],
-			   flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
 			NIX_FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_UDP_SCTP],
-			   flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
-	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP_UDP_SCTP],
-			   flowkey_cfg);
+	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+	if (rc < 0)
+		return rc;
 
-	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
-		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
-			rvu_write64(rvu, blkaddr,
-				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
-				    field[alg][fid]);
-	}
+	return 0;
 }
 
 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
@@ -2314,7 +2343,9 @@ int rvu_nix_init(struct rvu *rvu)
 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
 
-		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+		if (err)
+			return err;
 
 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
 		nix_link_config(rvu, blkaddr);
-- 
2.19.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ