[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1752598924-32705-8-git-send-email-sbhatta@marvell.com>
Date: Tue, 15 Jul 2025 22:32:00 +0530
From: Subbaraya Sundeep <sbhatta@...vell.com>
To: <andrew+netdev@...n.ch>, <davem@...emloft.net>, <edumazet@...gle.com>,
<kuba@...nel.org>, pabeni4redhat.com@...a-0016f401.pphosted.com,
<horms@...nel.org>
CC: <gakula@...vell.com>, <hkelam@...vell.com>, <bbhushan2@...vell.com>,
<jerinj@...vell.com>, <lcherian@...vell.com>, <sgoutham@...vell.com>,
<netdev@...r.kernel.org>, Subbaraya Sundeep <sbhatta@...vell.com>
Subject: [net-next PATCH v2 07/11] octeontx2-pf: Initialize cn20k specific aura and pool contexts
From: Linu Cherian <lcherian@...vell.com>
With new CN20K NPA pool and aura contexts supported in AF
driver this patch modifies PF driver to use new NPA contexts.
Implement new hw_ops for intializing aura and pool contexts
for all the silicons.
Signed-off-by: Linu Cherian <lcherian@...vell.com>
Signed-off-by: Subbaraya Sundeep <sbhatta@...vell.com>
---
.../ethernet/marvell/octeontx2/nic/cn10k.c | 4 +
.../ethernet/marvell/octeontx2/nic/cn20k.c | 178 ++++++++++++++++--
.../marvell/octeontx2/nic/otx2_common.c | 14 ++
.../marvell/octeontx2/nic/otx2_common.h | 10 +
4 files changed, 195 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index bec7d5b4d7cc..cab157aac251 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -15,6 +15,8 @@ static struct dev_hw_ops otx2_hw_ops = {
.aura_freeptr = otx2_aura_freeptr,
.refill_pool_ptrs = otx2_refill_pool_ptrs,
.pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
static struct dev_hw_ops cn10k_hw_ops = {
@@ -23,6 +25,8 @@ static struct dev_hw_ops cn10k_hw_ops = {
.aura_freeptr = cn10k_aura_freeptr,
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
.pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
void otx2_init_hw_ops(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
index ec8cde98076d..037548f36940 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
@@ -10,17 +10,6 @@
#include "otx2_struct.h"
#include "cn10k.h"
-static struct dev_hw_ops cn20k_hw_ops = {
- .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
- .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
- .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
-};
-
-void cn20k_init(struct otx2_nic *pfvf)
-{
- pfvf->hw_ops = &cn20k_hw_ops;
-}
-EXPORT_SYMBOL(cn20k_init);
/* CN20K mbox AF => PFx irq handler */
irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
@@ -250,3 +239,170 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
return 0;
}
+
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
+
+static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ struct npa_cn20k_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err;
+
+ pool = &pfvf->qset.pool[pool_id];
+
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err)
+ return err;
+ }
+
+ /* Initialize this aura's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ return err;
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+
+ aq->aura_id = aura_id;
+
+ /* Will be filled by AF with correct pool context address */
+ aq->aura.pool_addr = pool_id;
+ aq->aura.pool_caching = 1;
+ aq->aura.shift = ilog2(numptrs) - 8;
+ aq->aura.count = numptrs;
+ aq->aura.limit = numptrs;
+ aq->aura.avg_level = 255;
+ aq->aura.ena = 1;
+ aq->aura.fc_ena = 1;
+ aq->aura.fc_addr = pool->fc_addr->iova;
+ aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
+ aq->aura.bp_ena = 0;
+ /* If NIX1 LF is attached then specify NIX1_RX.
+ *
+ * Below NPA_AURA_S[BP_ENA] is set according to the
+ * NPA_BPINTF_E enumeration given as:
+ * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
+ * NIX0_RX is 0x0 + 0*0x1 = 0
+ * NIX1_RX is 0x0 + 1*0x1 = 1
+ * But in HRM it is given that
+ * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
+ * NIX-RX based on [BP] level. One bit per NIX-RX; index
+ * enumerated by NPA_BPINTF_E."
+ */
+ if (pfvf->nix_blkaddr == BLKADDR_NIX1)
+ aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
+ aq->aura.bpid = pfvf->bpid[0];
+#endif
+
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_AURA;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ return 0;
+}
+
+static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct npa_cn20k_aq_enq_req *aq;
+ struct otx2_pool *pool;
+ int err, sz;
+
+ pool = &pfvf->qset.pool[pool_id];
+ /* Alloc memory for stack which is used to store buffer pointers */
+ err = qmem_alloc(pfvf->dev, &pool->stack,
+ stack_pages, pfvf->hw.stack_pg_bytes);
+ if (err)
+ return err;
+
+ pool->rbsize = buf_size;
+
+ /* Initialize this pool's context via AF */
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ /* Shared mbox memory buffer is full, flush it and retry */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ qmem_free(pfvf->dev, pool->stack);
+ return -ENOMEM;
+ }
+ }
+
+ aq->aura_id = pool_id;
+ aq->pool.stack_base = pool->stack->iova;
+ aq->pool.stack_caching = 1;
+ aq->pool.ena = 1;
+ aq->pool.buf_size = buf_size / 128;
+ aq->pool.stack_max_pages = stack_pages;
+ aq->pool.shift = ilog2(numptrs) - 8;
+ aq->pool.ptr_start = 0;
+ aq->pool.ptr_end = ~0ULL;
+
+ /* Fill AQ info */
+ aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->op = NPA_AQ_INSTOP_INIT;
+
+ if (type != AURA_NIX_RQ) {
+ pool->page_pool = NULL;
+ return 0;
+ }
+
+ sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE);
+ pp_params.order = (sz / PAGE_SIZE) - 1;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
+
+ return 0;
+}
+
+static struct dev_hw_ops cn20k_hw_ops = {
+ .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
+ .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
+ .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
+ .sq_aq_init = cn10k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .aura_aq_init = cn20k_aura_aq_init,
+ .pool_aq_init = cn20k_pool_aq_init,
+};
+
+void cn20k_init(struct otx2_nic *pfvf)
+{
+ pfvf->hw_ops = &cn20k_hw_ops;
+}
+EXPORT_SYMBOL(cn20k_init);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f674729124e6..ad57ded39bd1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1366,6 +1366,13 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
+{
+ return pfvf->hw_ops->aura_aq_init(pfvf, aura_id, pool_id,
+ numptrs);
+}
+
+int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
{
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
@@ -1443,6 +1450,13 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
+{
+ return pfvf->hw_ops->pool_aq_init(pfvf, pool_id, stack_pages, numptrs,
+ buf_size, type);
+}
+
+int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type)
{
struct page_pool_params pp_params = { 0 };
struct xsk_buff_pool *xsk_pool;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e3765b73c434..b587ff931820 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -14,6 +14,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/soc/marvell/silicons.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/macsec.h>
#include <net/pkt_cls.h>
@@ -374,6 +375,11 @@ struct dev_hw_ops {
irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq);
irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq);
irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq);
+ int (*aura_aq_init)(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+ int (*pool_aq_init)(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type);
};
#define CN10K_MCS_SA_PER_SC 4
@@ -1058,6 +1064,10 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
+int otx2_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs);
+int otx2_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size, int type);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
--
2.34.1
Powered by blists - more mailing lists