lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251026150916.352061-13-tanmay@marvell.com>
Date: Sun, 26 Oct 2025 20:39:07 +0530
From: Tanmay Jagdale <tanmay@...vell.com>
To: <davem@...emloft.net>, <horms@...nel.org>, <leon@...nel.org>,
        <herbert@...dor.apana.org.au>, <bbhushan2@...vell.com>,
        <sgoutham@...vell.com>
CC: <linux-crypto@...r.kernel.org>, <netdev@...r.kernel.org>,
        Tanmay Jagdale
	<tanmay@...vell.com>
Subject: [PATCH net-next v5 12/15] octeontx2-pf: ipsec: Configure backpressure

Backpressure can be asserted by NIX and CPT hardware blocks in the first
and second pass. In the first pass, backpressure can be asserted by CPT.
Since we have a global CPT LF across the system to handle IPsec traffic,
we need to allocate a global backpressure ID and configure the NIX RX
channel to listen to this ID as well. Define a 'bpid' to refer to the
first pass backpressure and configure the corresponding NIX RX channels.

During the second pass, NIX can assert backpressure to CPT due to
buffers pool reaching a threshold. Hence allocate another BPID for the
second pass and configure the CPT X2P link to listen to this back
pressure ID. Additionally, the CQs in the second pass can assert
backpressure, so configure that CPT X2P link to listen to that as well.

Signed-off-by: Tanmay Jagdale <tanmay@...vell.com>
---
Changes in V5:
- This is a new patch added to this series by removing backpressure
  related changes from the previous patch (11/15) and adding more
  BPID related configuration.

 .../ethernet/marvell/octeontx2/af/rvu_cpt.c   |   1 +
 .../marvell/octeontx2/nic/cn10k_ipsec.c       | 175 +++++++++++++++++-
 .../marvell/octeontx2/nic/cn10k_ipsec.h       |   3 +
 .../ethernet/marvell/octeontx2/nic/otx2_reg.h |   9 +
 4 files changed, 185 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index edc3c356dba3..797e2dd83d29 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -609,6 +609,7 @@ static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
 	if (!is_rvu_otx2(rvu)) {
 		val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
 		val |= (u64)rvu->hw->cpt_chan_base;
+		val |= 0x2 << 20;
 
 		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
 		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 664ccfc7e80d..d545e56e0b6d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -439,6 +439,7 @@ static int cn10k_inb_nix_inline_ipsec_cfg(struct otx2_nic *pfvf)
 	req->opcode = CN10K_IPSEC_MAJOR_OP_INB_IPSEC | (1 << 6);
 	req->param1 = 7; /* bit 0:ip_csum_dis 1:tcp_csum_dis 2:esp_trailer_dis */
 	req->param2 = 0;
+	req->bpid = pfvf->ipsec.bpid;
 	req->credit = (pfvf->qset.rqe_cnt * 3) / 4;
 	req->credit_th = pfvf->qset.rqe_cnt / 10;
 	req->ctx_ilen_valid = 1;
@@ -485,7 +486,35 @@ static int cn10k_ipsec_ingress_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_
 	return otx2_sync_mbox_msg(&pfvf->mbox);
 }
 
-static int cn10k_ipsec_aura_and_pool_init(struct otx2_nic *pfvf, int pool_id)
+/* Enable backpressure for the specified aura since
+ * it cannot be enabled during aura initialization.
+ */
+static int cn10k_ipsec_enable_aura_backpressure(struct otx2_nic *pfvf,
+						int aura_id, int bpid)
+{
+	struct npa_aq_enq_req *npa_aq;
+
+	npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+	if (!npa_aq)
+		return -ENOMEM;
+
+	npa_aq->aura.bp_ena = 1;
+	npa_aq->aura_mask.bp_ena = 1;
+	npa_aq->aura.nix0_bpid = bpid;
+	npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+	npa_aq->aura.bp = (255 - ((50 * 256) / 100));
+	npa_aq->aura_mask.bp = GENMASK(7, 0);
+
+	/* Fill NPA AQ info */
+	npa_aq->aura_id = aura_id;
+	npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+	npa_aq->op = NPA_AQ_INSTOP_WRITE;
+
+	return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int cn10k_ipsec_aura_and_pool_init(struct otx2_nic *pfvf, int pool_id,
+					  int bpid)
 {
 	struct otx2_hw *hw = &pfvf->hw;
 	struct otx2_pool *pool = NULL;
@@ -523,6 +552,11 @@ static int cn10k_ipsec_aura_and_pool_init(struct otx2_nic *pfvf, int pool_id)
 		pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM);
 	}
 
+	/* Enable backpressure for the aura */
+	err = cn10k_ipsec_enable_aura_backpressure(pfvf, pool_id, bpid);
+	if (err)
+		goto free_auras;
+
 	return err;
 
 free_auras:
@@ -539,7 +573,8 @@ static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
 	mutex_lock(&pfvf->mbox.lock);
 
 	/* Initialize Pool for first pass */
-	err = cn10k_ipsec_aura_and_pool_init(pfvf, pfvf->ipsec.inb_ipsec_pool);
+	err = cn10k_ipsec_aura_and_pool_init(pfvf, pfvf->ipsec.inb_ipsec_pool,
+					     pfvf->ipsec.bpid);
 	if (err)
 		return err;
 
@@ -555,7 +590,8 @@ static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
 
 	for (pool = pfvf->ipsec.inb_ipsec_spb_pool;
 	     pool < pfvf->hw.rx_queues + pfvf->ipsec.inb_ipsec_spb_pool; pool++) {
-		err = cn10k_ipsec_aura_and_pool_init(pfvf, pool);
+		err = cn10k_ipsec_aura_and_pool_init(pfvf, pool,
+						     pfvf->ipsec.spb_bpid);
 		if (err)
 			goto free_auras;
 	}
@@ -1166,6 +1202,29 @@ void cn10k_ipsec_free_aura_ptrs(struct otx2_nic *pfvf)
 	}
 }
 
+static int cn10k_ipsec_free_cpt_bpid(struct otx2_nic *pfvf)
+{
+	struct nix_bpids *req;
+	int rc;
+
+	req = otx2_mbox_alloc_msg_nix_free_bpids(&pfvf->mbox);
+	if (!req)
+		return -ENOMEM;
+
+	req->bpid_cnt = 2;
+	req->bpids[0] = pfvf->ipsec.bpid;
+	req->bpids[1] = pfvf->ipsec.spb_bpid;
+
+	rc = otx2_sync_mbox_msg(&pfvf->mbox);
+	if (rc)
+		return rc;
+
+	/* Clear the bpids */
+	pfvf->ipsec.bpid = 0;
+	pfvf->ipsec.spb_bpid = 0;
+	return 0;
+}
+
 static void cn10k_ipsec_free_hw_resources(struct otx2_nic *pfvf)
 {
 	int vec;
@@ -1187,6 +1246,111 @@ static void cn10k_ipsec_free_hw_resources(struct otx2_nic *pfvf)
 
 	vec = pci_irq_vector(pfvf->pdev, pfvf->hw.npa_msixoff);
 	free_irq(vec, pfvf);
+
+	if (pfvf->ipsec.bpid && pfvf->ipsec.spb_bpid)
+		cn10k_ipsec_free_cpt_bpid(pfvf);
+}
+
+static int cn10k_ipsec_configure_cpt_bpid(struct otx2_nic *pfvf)
+{
+	struct nix_rx_chan_cfg *chan_cfg, *chan_cfg_rsp;
+	struct nix_alloc_bpid_req *req;
+	int chan, chan_cnt = 1;
+	struct nix_bpids *rsp;
+	u64 rx_chan_cfg;
+	int rc;
+
+	req = otx2_mbox_alloc_msg_nix_alloc_bpids(&pfvf->mbox);
+	if (!req)
+		return -ENOMEM;
+
+	/* Request 2 BPIDs:
+	 * One for 1st pass LPB pool and another for 2nd pass SPB pool
+	 */
+	req->bpid_cnt = 2;
+	req->type = NIX_INTF_TYPE_CPT;
+
+	rc = otx2_sync_mbox_msg(&pfvf->mbox);
+	if (rc)
+		return rc;
+
+	rsp = (struct nix_bpids *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+	if (IS_ERR(rsp))
+		return PTR_ERR(rsp);
+
+	/* Store the bpid for configuring it in the future */
+	pfvf->ipsec.bpid = rsp->bpids[0];
+	pfvf->ipsec.spb_bpid = rsp->bpids[1];
+
+	/* Get the default RX channel configuration */
+	chan_cfg = otx2_mbox_alloc_msg_nix_rx_chan_cfg(&pfvf->mbox);
+	if (!chan_cfg)
+		return -ENOMEM;
+
+	chan_cfg->read = true;
+	rc = otx2_sync_mbox_msg(&pfvf->mbox);
+	if (rc)
+		return rc;
+
+	/* Get the response */
+	chan_cfg_rsp = (struct nix_rx_chan_cfg *)
+			otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &chan_cfg->hdr);
+
+	rx_chan_cfg = chan_cfg_rsp->val;
+	/* Find a free backpressure ID slot to configure */
+	if (!FIELD_GET(NIX_AF_RX_CHANX_CFG_BP1_ENA, rx_chan_cfg)) {
+		rx_chan_cfg |= FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP1_ENA, 1) |
+			       FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP1, pfvf->ipsec.bpid);
+	} else if (!FIELD_GET(NIX_AF_RX_CHANX_CFG_BP2_ENA, rx_chan_cfg)) {
+		rx_chan_cfg |= FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP2_ENA, 1) |
+			       FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP2, pfvf->ipsec.bpid);
+	} else if (!FIELD_GET(NIX_AF_RX_CHANX_CFG_BP3_ENA, rx_chan_cfg)) {
+		rx_chan_cfg |= FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP3_ENA, 1) |
+			       FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP3, pfvf->ipsec.bpid);
+	} else {
+		netdev_err(pfvf->netdev, "No BPID available in RX channel\n");
+		return -ENONET;
+	}
+
+	/* Update the RX_CHAN_CFG to listen to backpressure due to IPsec traffic */
+	chan_cfg = otx2_mbox_alloc_msg_nix_rx_chan_cfg(&pfvf->mbox);
+	if (!chan_cfg)
+		return -ENOMEM;
+
+	/* Configure BPID for PF RX channel */
+	chan_cfg->val = rx_chan_cfg;
+	rc = otx2_sync_mbox_msg(&pfvf->mbox);
+	if (rc)
+		return rc;
+
+	/* Enable backpressure in CPT Link's RX Channel(s) */
+#ifdef CONFIG_DCB
+	chan_cnt = IEEE_8021QAZ_MAX_TCS;
+#endif
+	for (chan = 0; chan < chan_cnt; chan++) {
+		chan_cfg = otx2_mbox_alloc_msg_nix_rx_chan_cfg(&pfvf->mbox);
+		if (!chan_cfg)
+			return -ENOMEM;
+
+		/* CPT Link can be backpressured due to buffers reaching the
+		 * threshold in SPB pool (pfvf->ipsec.spb_bpid) or due to CQ
+		 * (pfvf->bpid[chan]) entries crossing the configured threshold
+		 */
+		chan_cfg->chan = chan;
+		chan_cfg->type = NIX_INTF_TYPE_CPT;
+		chan_cfg->val = FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP0_ENA, 1) |
+				FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP0, pfvf->bpid[chan]) |
+				FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP1_ENA, 1) |
+				FIELD_PREP(NIX_AF_RX_CHANX_CFG_BP1, pfvf->ipsec.spb_bpid);
+
+		rc = otx2_sync_mbox_msg(&pfvf->mbox);
+		if (rc)
+			netdev_err(pfvf->netdev,
+				   "Failed to enable backpressure on CPT channel %d\n",
+				   chan);
+	}
+
+	return 0;
 }
 
 int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
@@ -1204,6 +1368,11 @@ int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
 		if (ret)
 			return ret;
 
+		/* Configure NIX <-> CPT backpressure */
+		ret = cn10k_ipsec_configure_cpt_bpid(pf);
+		if (ret)
+			goto out;
+
 		ret = cn10k_inb_cpt_init(netdev);
 		if (ret)
 			goto out;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index 34154f002d22..a7d82757ff90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -104,6 +104,9 @@ struct cn10k_ipsec {
 	atomic_t cpt_state;
 	struct cn10k_cpt_inst_queue iq;
 
+	u32 bpid;	/* Backpressure ID for 1st pass CPT -> NIX */
+	u32 spb_bpid;	/* Backpressure ID for 2nd pass NIX -> CPT */
+
 	/* SA info */
 	u32 sa_size;
 	u32 outb_sa_count;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index e05763fbb559..209a35299061 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -168,6 +168,15 @@
 #define LMT_LF_LMTLINEX(a)		(LMT_LFBASE | 0x000 | (a) << 12)
 #define LMT_LF_LMTCANCEL		(LMT_LFBASE | 0x400)
 
+#define NIX_AF_RX_CHANX_CFG_BP0_ENA     BIT_ULL(16)
+#define NIX_AF_RX_CHANX_CFG_BP0         GENMASK_ULL(8, 0)
+#define NIX_AF_RX_CHANX_CFG_BP1_ENA     BIT_ULL(15)
+#define NIX_AF_RX_CHANX_CFG_BP1         GENMASK_ULL(28, 20)
+#define NIX_AF_RX_CHANX_CFG_BP2_ENA     BIT_ULL(14)
+#define NIX_AF_RX_CHANX_CFG_BP2         GENMASK_ULL(40, 32)
+#define NIX_AF_RX_CHANX_CFG_BP3_ENA     BIT_ULL(13)
+#define NIX_AF_RX_CHANX_CFG_BP3         GENMASK_ULL(52, 44)
+
 /* CN20K registers */
 #define RVU_PF_DISC			(0x0)
 
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ