lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1573709193-15446-18-git-send-email-sunil.kovvuri@gmail.com>
Date:   Thu, 14 Nov 2019 10:56:32 +0530
From:   sunil.kovvuri@...il.com
To:     netdev@...r.kernel.org
Cc:     davem@...emloft.net, Sunil Goutham <sgoutham@...vell.com>
Subject: [PATCH v2 17/18] octeontx2-af: Add option to disable dynamic entry caching in NDC

From: Sunil Goutham <sgoutham@...vell.com>

A config option is added to disable caching of dynamic entries
like SQEs and stack pages. Also locks down all HW contexts in NDC,
preventing them from being evicted.

This option is useful when the queue count is large and there are
huge NDC cache misses. It's trade off between SQ context misses and
dynamically changing entries like SQE and stack page pointers.

Signed-off-by: Sunil Goutham <sgoutham@...vell.com>
---
 drivers/net/ethernet/marvell/octeontx2/Kconfig     | 10 ++++
 .../net/ethernet/marvell/octeontx2/af/rvu_nix.c    | 64 +++++++++++++++++++++-
 .../net/ethernet/marvell/octeontx2/af/rvu_npa.c    | 42 ++++++++++++++
 3 files changed, 113 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 711ada7..fb34f61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -16,3 +16,13 @@ config OCTEONTX2_AF
 	  Unit's admin function manager which manages all RVU HW resources
 	  and provides a medium to other PF/VFs to configure HW. Should be
 	  enabled for other RVU device drivers to work.
+
+config NDC_DIS_DYNAMIC_CACHING
+	bool "Disable caching of dynamic entries in NDC"
+	depends on OCTEONTX2_AF
+	default n
+	---help---
+	  This config option disables caching of dynamic entries such as NIX SQEs
+	  , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
+	  NPA Aura/Pool contexts.
+
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 86042a7..63190b8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -661,6 +661,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 	return 0;
 }
 
+static const char *nix_get_ctx_name(int ctype)
+{
+	switch (ctype) {
+	case NIX_AQ_CTYPE_CQ:
+		return "CQ";
+	case NIX_AQ_CTYPE_SQ:
+		return "SQ";
+	case NIX_AQ_CTYPE_RQ:
+		return "RQ";
+	case NIX_AQ_CTYPE_RSS:
+		return "RSS";
+	}
+	return "";
+}
+
 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 {
 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -705,21 +720,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 		if (rc) {
 			err = rc;
 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
-				(req->ctype == NIX_AQ_CTYPE_CQ) ?
-				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
-				"RQ" : "SQ"), qidx);
+				nix_get_ctx_name(req->ctype), qidx);
 		}
 	}
 
 	return err;
 }
 
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+	struct nix_aq_enq_req lock_ctx_req;
+	int err;
+
+	if (req->op != NIX_AQ_INSTOP_INIT)
+		return 0;
+
+	if (req->ctype == NIX_AQ_CTYPE_MCE ||
+	    req->ctype == NIX_AQ_CTYPE_DYNO)
+		return 0;
+
+	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+	lock_ctx_req.ctype = req->ctype;
+	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+	lock_ctx_req.qidx = req->qidx;
+	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+	if (err)
+		dev_err(rvu->dev,
+			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+			req->hdr.pcifunc,
+			nix_get_ctx_name(req->ctype), req->qidx);
+	return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+				struct nix_aq_enq_req *req,
+				struct nix_aq_enq_rsp *rsp)
+{
+	int err;
+
+	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+	if (!err)
+		err = nix_lf_hwctx_lockdown(rvu, req);
+	return err;
+}
+#else
+
 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
 				struct nix_aq_enq_req *req,
 				struct nix_aq_enq_rsp *rsp)
 {
 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
 }
+#endif
 
 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
 				       struct hwctx_disable_req *req,
@@ -2871,6 +2925,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
 	/* Do not bypass NDC cache */
 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
 	cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+	/* Disable caching of SQB aka SQEs */
+	cfg |= 0x04ULL;
+#endif
 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
 
 	/* Result structure can be followed by RQ/SQ/CQ context at
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index a8f9376..6e7c7f4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 	return err;
 }
 
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+	struct npa_aq_enq_req lock_ctx_req;
+	int err;
+
+	if (req->op != NPA_AQ_INSTOP_INIT)
+		return 0;
+
+	memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+	lock_ctx_req.ctype = req->ctype;
+	lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+	lock_ctx_req.aura_id = req->aura_id;
+	err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+	if (err)
+		dev_err(rvu->dev,
+			"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+			req->hdr.pcifunc,
+			(req->ctype == NPA_AQ_CTYPE_AURA) ?
+			"Aura" : "Pool", req->aura_id);
+	return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+				struct npa_aq_enq_req *req,
+				struct npa_aq_enq_rsp *rsp)
+{
+	int err;
+
+	err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+	if (!err)
+		err = npa_lf_hwctx_lockdown(rvu, req);
+	return err;
+}
+#else
+
 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
 				struct npa_aq_enq_req *req,
 				struct npa_aq_enq_rsp *rsp)
 {
 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
 }
+#endif
 
 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
 				       struct hwctx_disable_req *req,
@@ -427,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
 	/* Do not bypass NDC cache */
 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
 	cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+	/* Disable caching of stack pages */
+	cfg |= 0x10ULL;
+#endif
 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
 
 	/* Result structure can be followed by Aura/Pool context at
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ