[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502132005.611698-13-tanmay@marvell.com>
Date: Fri, 2 May 2025 18:49:53 +0530
From: Tanmay Jagdale <tanmay@...vell.com>
To: <bbrezillon@...nel.org>, <arno@...isbad.org>, <schalla@...vell.com>,
<herbert@...dor.apana.org.au>, <davem@...emloft.net>,
<sgoutham@...vell.com>, <lcherian@...vell.com>, <gakula@...vell.com>,
<jerinj@...vell.com>, <hkelam@...vell.com>, <sbhatta@...vell.com>,
<andrew+netdev@...n.ch>, <edumazet@...gle.com>, <kuba@...nel.org>,
<pabeni@...hat.com>, <bbhushan2@...vell.com>, <bhelgaas@...gle.com>,
<pstanner@...hat.com>, <gregkh@...uxfoundation.org>,
<peterz@...radead.org>, <linux@...blig.org>,
<krzysztof.kozlowski@...aro.org>, <giovanni.cabiddu@...el.com>
CC: <linux-crypto@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<netdev@...r.kernel.org>, <rkannoth@...vell.com>, <sumang@...vell.com>,
<gcherian@...vell.com>, Tanmay Jagdale <tanmay@...vell.com>
Subject: [net-next PATCH v1 12/15] octeontx2-pf: ipsec: Initialize ingress IPsec
Initialize ingress inline IPsec offload when ESP offload feature
is enabled via Ethtool. As part of initialization, the following
mailboxes must be invoked to configure inline IPsec:
NIX_INLINE_IPSEC_LF_CFG - Every NIX LF has the provision to maintain a
contiguous SA Table. This mailbox configure
the SA table base address, size of each SA,
maximum number entries in the table. Currently,
we support 128 entry table with each SA of size
1024 bytes.
NIX_LF_INLINE_RQ_CFG - Post decryption, CPT sends a metapacket of 256
bytes which have enough packet headers to help
NIX RX classify it. However, since the packet is
not complete, we cannot perform checksum and
packet length verification. Hence, configure the
RQ context to disable L3, L4 checksum and length
verification for packets coming from CPT.
NIX_INLINE_IPSEC_CFG - RVU hardware supports 1 common CPT LF for inbound
ingress IPsec flows. This CPT LF is configured via
this mailbox and is a one time system-wide
configuration.
NIX_ALLOC_BPID - Configure bacpkpressure between NIX and CPT blocks
by allocating a backpressure ID using this mailbox
ingress inline IPsec flows.
NIX_FREE_BPID - Free this BPID when ESP offload is disabled via
ethtool.
Signed-off-by: Tanmay Jagdale <tanmay@...vell.com>
---
.../marvell/octeontx2/nic/cn10k_ipsec.c | 167 ++++++++++++++++++
.../marvell/octeontx2/nic/cn10k_ipsec.h | 2 +
2 files changed, 169 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 365327ab9079..c6f408007511 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -346,6 +346,97 @@ static int cn10k_outb_cpt_init(struct net_device *netdev)
return ret;
}
+static int cn10k_inb_nix_inline_lf_cfg(struct otx2_nic *pfvf)
+{
+ struct nix_inline_ipsec_lf_cfg *req;
+ int ret = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(&pfvf->mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ req->sa_base_addr = pfvf->ipsec.inb_sa->iova;
+ req->ipsec_cfg0.tag_const = 0;
+ req->ipsec_cfg0.tt = 0;
+ req->ipsec_cfg0.lenm1_max = 11872; /* (Max packet size - 128 (first skip)) */
+ req->ipsec_cfg0.sa_pow2_size = 0xb; /* 2048 */
+ req->ipsec_cfg1.sa_idx_max = CN10K_IPSEC_INB_MAX_SA - 1;
+ req->ipsec_cfg1.sa_idx_w = 0x7;
+ req->enable = 1;
+
+ ret = otx2_sync_mbox_msg(&pfvf->mbox);
+error:
+ mutex_unlock(&pfvf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_inb_nix_inline_lf_rq_cfg(struct otx2_nic *pfvf)
+{
+ struct nix_rq_cpt_field_mask_cfg_req *req;
+ int ret = 0, i;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_lf_inline_rq_cfg(&pfvf->mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < RQ_CTX_MASK_MAX; i++)
+ req->rq_ctx_word_mask[i] = 0xffffffffffffffff;
+
+ req->rq_set.len_ol3_dis = 1;
+ req->rq_set.len_ol4_dis = 1;
+ req->rq_set.len_il3_dis = 1;
+
+ req->rq_set.len_il4_dis = 1;
+ req->rq_set.csum_ol4_dis = 1;
+ req->rq_set.csum_il4_dis = 1;
+
+ req->rq_set.lenerr_dis = 1;
+ req->rq_set.port_ol4_dis = 1;
+ req->rq_set.port_il4_dis = 1;
+
+ req->ipsec_cfg1.rq_mask_enable = 1;
+ req->ipsec_cfg1.spb_cpt_enable = 0;
+
+ ret = otx2_sync_mbox_msg(&pfvf->mbox);
+error:
+ mutex_unlock(&pfvf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_inb_nix_inline_ipsec_cfg(struct otx2_nic *pfvf)
+{
+ struct cpt_rx_inline_lf_cfg_msg *req;
+ int ret = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(&pfvf->mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ req->sso_pf_func = 0;
+ req->opcode = CN10K_IPSEC_MAJOR_OP_INB_IPSEC | (1 << 6);
+ req->param1 = 7; /* bit 0:ip_csum_dis 1:tcp_csum_dis 2:esp_trailer_dis */
+ req->param2 = 0;
+ req->bpid = pfvf->ipsec.bpid;
+ req->credit = 8160;
+ req->credit_th = 100;
+ req->ctx_ilen_valid = 1;
+ req->ctx_ilen = 5;
+
+ ret = otx2_sync_mbox_msg(&pfvf->mbox);
+error:
+ mutex_unlock(&pfvf->mbox.lock);
+ return ret;
+}
+
static int cn10k_ipsec_ingress_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
{
@@ -625,6 +716,28 @@ static int cn10k_inb_cpt_init(struct net_device *netdev)
/* Enable interrupt */
otx2_write64(pfvf, NPA_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
+ /* Enable inbound inline IPSec in NIX LF */
+ ret = cn10k_inb_nix_inline_lf_cfg(pfvf);
+ if (ret) {
+ netdev_err(netdev, "Error configuring NIX for Inline IPSec\n");
+ goto out;
+ }
+
+ /* IPsec specific RQ settings in NIX LF */
+ ret = cn10k_inb_nix_inline_lf_rq_cfg(pfvf);
+ if (ret) {
+ netdev_err(netdev, "Error configuring NIX for Inline IPSec\n");
+ goto out;
+ }
+
+ /* One-time configuration to enable CPT LF for inline inbound IPSec */
+ ret = cn10k_inb_nix_inline_ipsec_cfg(pfvf);
+ if (ret && ret != -EEXIST)
+ netdev_err(netdev, "CPT LF configuration error\n");
+ else
+ ret = 0;
+
+out:
return ret;
}
@@ -1044,6 +1157,53 @@ static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
rtnl_unlock();
}
+static int cn10k_ipsec_configure_cpt_bpid(struct otx2_nic *pfvf)
+{
+ struct nix_alloc_bpid_req *req;
+ struct nix_bpids *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_alloc_bpids(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+ req->bpid_cnt = 1;
+ req->type = NIX_INTF_TYPE_CPT;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_bpids *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Store the bpid for configuring it in the future */
+ pfvf->ipsec.bpid = rsp->bpids[0];
+
+ return 0;
+}
+
+static int cn10k_ipsec_free_cpt_bpid(struct otx2_nic *pfvf)
+{
+ struct nix_bpids *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_free_bpids(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->bpid_cnt = 1;
+ req->bpids[0] = pfvf->ipsec.bpid;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ /* Clear the bpid */
+ pfvf->ipsec.bpid = 0;
+ return 0;
+}
+
int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1062,6 +1222,10 @@ int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
ret = cn10k_inb_cpt_init(netdev);
if (ret)
return ret;
+
+ /* Configure NIX <-> CPT backpresure */
+ ret = cn10k_ipsec_configure_cpt_bpid(pf);
+ return ret;
}
/* Don't do CPT cleanup if SA installed */
@@ -1070,6 +1234,7 @@ int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
return -EBUSY;
}
+ cn10k_ipsec_free_cpt_bpid(pf);
return cn10k_outb_cpt_clean(pf);
}
@@ -1143,6 +1308,8 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
vec = pci_irq_vector(pf->pdev, pf->hw.npa_msixoff);
free_irq(vec, pf);
+
+ cn10k_ipsec_free_cpt_bpid(pf);
}
EXPORT_SYMBOL(cn10k_ipsec_clean);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index 30d5812d52ad..f042cbadf054 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -104,6 +104,8 @@ struct cn10k_ipsec {
atomic_t cpt_state;
struct cn10k_cpt_inst_queue iq;
+ u32 bpid; /* Backpressure ID for NIX <-> CPT */
+
/* SA info */
u32 sa_size;
u32 outb_sa_count;
--
2.43.0
Powered by blists - more mailing lists