[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502132005.611698-12-tanmay@marvell.com>
Date: Fri, 2 May 2025 18:49:52 +0530
From: Tanmay Jagdale <tanmay@...vell.com>
To: <bbrezillon@...nel.org>, <arno@...isbad.org>, <schalla@...vell.com>,
<herbert@...dor.apana.org.au>, <davem@...emloft.net>,
<sgoutham@...vell.com>, <lcherian@...vell.com>, <gakula@...vell.com>,
<jerinj@...vell.com>, <hkelam@...vell.com>, <sbhatta@...vell.com>,
<andrew+netdev@...n.ch>, <edumazet@...gle.com>, <kuba@...nel.org>,
<pabeni@...hat.com>, <bbhushan2@...vell.com>, <bhelgaas@...gle.com>,
<pstanner@...hat.com>, <gregkh@...uxfoundation.org>,
<peterz@...radead.org>, <linux@...blig.org>,
<krzysztof.kozlowski@...aro.org>, <giovanni.cabiddu@...el.com>
CC: <linux-crypto@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<netdev@...r.kernel.org>, <rkannoth@...vell.com>, <sumang@...vell.com>,
<gcherian@...vell.com>, Tanmay Jagdale <tanmay@...vell.com>
Subject: [net-next PATCH v1 11/15] octeontx2-pf: ipsec: Handle NPA threshold interrupt
The NPA Aura pool that is dedicated for 1st pass inline IPsec flows
raises an interrupt when the buffers of that aura_id drop below a
threshold value.
Add the following changes to handle this interrupt
- Increase the number of MSIX vectors requested for the PF/VF to
include NPA vector.
- Create a workqueue (refill_npa_inline_ipsecq) to allocate and
refill buffers to the pool.
- When the interrupt is raised, schedule the workqueue entry,
cn10k_ipsec_npa_refill_inb_ipsecq(), where the current count of
consumed buffers is determined via NPA_LF_AURA_OP_CNT and then
replenished.
Signed-off-by: Tanmay Jagdale <tanmay@...vell.com>
---
.../marvell/octeontx2/nic/cn10k_ipsec.c | 102 +++++++++++++++++-
.../marvell/octeontx2/nic/cn10k_ipsec.h | 1 +
.../ethernet/marvell/octeontx2/nic/otx2_pf.c | 4 +
.../ethernet/marvell/octeontx2/nic/otx2_vf.c | 4 +
4 files changed, 110 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index b88c1b4c5839..365327ab9079 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -519,10 +519,77 @@ static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
return err;
}
+static void cn10k_ipsec_npa_refill_inb_ipsecq(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ refill_npa_inline_ipsecq);
+ struct otx2_nic *pfvf = container_of(ipsec, struct otx2_nic, ipsec);
+ struct otx2_pool *pool = NULL;
+ struct otx2_qset *qset = NULL;
+ u64 val, *ptr, op_int = 0, count;
+ int err, pool_id, idx;
+ dma_addr_t bufptr;
+
+ qset = &pfvf->qset;
+
+ val = otx2_read64(pfvf, NPA_LF_QINTX_INT(0));
+ if (!(val & 1))
+ return;
+
+ ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_INT);
+ val = otx2_atomic64_add(((u64)pfvf->ipsec.inb_ipsec_pool << 44), ptr);
+
+ /* Error interrupt bits */
+ if (val & 0xff)
+ op_int = (val & 0xff);
+
+ /* Refill buffers on a Threshold interrupt */
+ if (val & (1 << 16)) {
+ /* Get the current number of buffers consumed */
+ ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_CNT);
+ count = otx2_atomic64_add(((u64)pfvf->ipsec.inb_ipsec_pool << 44), ptr);
+ count &= GENMASK_ULL(35, 0);
+
+ /* Refill */
+ pool_id = pfvf->ipsec.inb_ipsec_pool;
+ pool = &pfvf->qset.pool[pool_id];
+
+ for (idx = 0; idx < count; idx++) {
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, idx);
+ if (err) {
+ netdev_err(pfvf->netdev,
+ "Insufficient memory for IPsec pool buffers\n");
+ break;
+ }
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
+ }
+
+ op_int |= (1 << 16);
+ }
+
+ /* Clear/ACK Interrupt */
+ if (op_int)
+ otx2_write64(pfvf, NPA_LF_AURA_OP_INT,
+ ((u64)pfvf->ipsec.inb_ipsec_pool << 44) | op_int);
+}
+
+static irqreturn_t cn10k_ipsec_npa_inb_ipsecq_intr_handler(int irq, void *data)
+{
+ struct otx2_nic *pf = data;
+
+ schedule_work(&pf->ipsec.refill_npa_inline_ipsecq);
+
+ return IRQ_HANDLED;
+}
+
static int cn10k_inb_cpt_init(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int ret = 0;
+ int ret = 0, vec;
+ char *irq_name;
+ void *ptr;
+ u64 val;
ret = cn10k_ipsec_setup_nix_rx_hw_resources(pfvf);
if (ret) {
@@ -530,6 +597,34 @@ static int cn10k_inb_cpt_init(struct net_device *netdev)
return ret;
}
+ /* Work entry for refilling the NPA queue for ingress inline IPSec */
+ INIT_WORK(&pfvf->ipsec.refill_npa_inline_ipsecq,
+ cn10k_ipsec_npa_refill_inb_ipsecq);
+
+ /* Register NPA interrupt */
+ vec = pfvf->hw.npa_msixoff;
+ irq_name = &pfvf->hw.irq_name[vec * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "%s-npa-qint", pfvf->netdev->name);
+
+ ret = request_irq(pci_irq_vector(pfvf->pdev, vec),
+ cn10k_ipsec_npa_inb_ipsecq_intr_handler, 0,
+ irq_name, pfvf);
+ if (ret) {
+ dev_err(pfvf->dev,
+ "RVUPF%d: IRQ registration failed for NPA QINT%d\n",
+ rvu_get_pf(pfvf->pcifunc), 0);
+ return ret;
+ }
+
+ /* Enable NPA threshold interrupt */
+ ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_INT);
+ val = BIT_ULL(43) | BIT_ULL(17);
+ otx2_write64(pfvf, NPA_LF_AURA_OP_INT,
+ ((u64)pfvf->ipsec.inb_ipsec_pool << 44) | val);
+
+ /* Enable interrupt */
+ otx2_write64(pfvf, NPA_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
+
return ret;
}
@@ -1028,6 +1123,8 @@ EXPORT_SYMBOL(cn10k_ipsec_init);
void cn10k_ipsec_clean(struct otx2_nic *pf)
{
+ int vec;
+
if (!is_dev_support_ipsec_offload(pf->pdev))
return;
@@ -1043,6 +1140,9 @@ void cn10k_ipsec_clean(struct otx2_nic *pf)
/* Free Ingress SA table */
qmem_free(pf->dev, pf->ipsec.inb_sa);
+
+ vec = pci_irq_vector(pf->pdev, pf->hw.npa_msixoff);
+ free_irq(vec, pf);
}
EXPORT_SYMBOL(cn10k_ipsec_clean);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index 5b7b8f3db913..30d5812d52ad 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -117,6 +117,7 @@ struct cn10k_ipsec {
struct qmem *inb_sa;
struct list_head inb_sw_ctx_list;
DECLARE_BITMAP(inb_sa_table, CN10K_IPSEC_INB_MAX_SA);
+ struct work_struct refill_npa_inline_ipsecq;
};
/* CN10K IPSEC Security Association (SA) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 8f1c17fa5a0b..0ffc56efcc23 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2909,6 +2909,10 @@ int otx2_realloc_msix_vectors(struct otx2_nic *pf)
num_vec = hw->nix_msixoff;
num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+ /* Update number of vectors to include NPA */
+ if (hw->nix_msixoff < hw->npa_msixoff)
+ num_vec = hw->npa_msixoff + 1;
+
otx2_disable_mbox_intr(pf);
pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index fb4da816d218..0b0f8a94ca41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -521,6 +521,10 @@ static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
num_vec = hw->nix_msixoff;
num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+ /* Update number of vectors to include NPA */
+ if (hw->nix_msixoff < hw->npa_msixoff)
+ num_vec = hw->npa_msixoff + 1;
+
otx2vf_disable_mbox_intr(vf);
pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
--
2.43.0
Powered by blists - more mailing lists