[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260106021447.2359108-6-rkannoth@marvell.com>
Date: Tue, 6 Jan 2026 07:44:42 +0530
From: Ratheesh Kannoth <rkannoth@...vell.com>
To: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<andrew+netdev@...n.ch>
CC: <sgoutham@...vell.com>, <davem@...emloft.net>, <edumazet@...gle.com>,
<kuba@...nel.org>, <pabeni@...hat.com>,
Ratheesh Kannoth
<rkannoth@...vell.com>
Subject: [PATCH net-next 05/10] octeontx2-af: switch: Enable Switch hw port for all channels
Switch HW should be able to fwd packets to any link based
on flow rules. Set txlink enable for all channels.
Signed-off-by: Ratheesh Kannoth <rkannoth@...vell.com>
---
.../net/ethernet/marvell/octeontx2/af/mbox.h | 4 ++
.../ethernet/marvell/octeontx2/af/rvu_nix.c | 50 ++++++++++++++++---
.../marvell/octeontx2/af/rvu_npc_fs.c | 2 +-
.../marvell/octeontx2/nic/otx2_txrx.h | 2 +
4 files changed, 51 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index bb38d06c925c..9404c935669d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1122,6 +1122,8 @@ struct nix_txsch_alloc_req {
/* Scheduler queue count request at each level */
u16 schq_contig[NIX_TXSCH_LVL_CNT]; /* No of contiguous queues */
u16 schq[NIX_TXSCH_LVL_CNT]; /* No of non-contiguous queues */
+#define NIX_TXSCH_ALLOC_FLAG_PAN BIT_ULL(0)
+ u64 flags;
};
struct nix_txsch_alloc_rsp {
@@ -1140,6 +1142,7 @@ struct nix_txsch_alloc_rsp {
struct nix_txsch_free_req {
struct mbox_msghdr hdr;
#define TXSCHQ_FREE_ALL BIT_ULL(0)
+#define TXSCHQ_FREE_PAN_TL1 BIT_ULL(1)
u16 flags;
/* Scheduler queue level to be freed */
u16 schq_lvl;
@@ -1958,6 +1961,7 @@ struct npc_install_flow_req {
u16 entry;
u16 channel;
u16 chan_mask;
+ u8 set_chanmask;
u8 intf;
u8 set_cntr; /* If counter is available set counter for this entry ? */
u8 default_rule;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index e2cc33ad2b2c..9d9d59affd68 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1586,7 +1586,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (err)
goto free_mem;
- pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
+ pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long) * 16, GFP_KERNEL);
if (!pfvf->sq_bmap)
goto free_mem;
@@ -2106,11 +2106,14 @@ static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
if (!req_schq)
return 0;
- link = nix_get_tx_link(rvu, pcifunc);
+ if (req->flags & NIX_TXSCH_ALLOC_FLAG_PAN)
+ link = hw->cgx_links + hw->lbk_links + 1;
+ else
+ link = nix_get_tx_link(rvu, pcifunc);
/* For traffic aggregating scheduler level, one queue is enough */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
- if (req_schq != 1)
+ if (req_schq != 1 && !(req->flags & NIX_TXSCH_ALLOC_FLAG_PAN))
return NIX_AF_ERR_TLX_ALLOC_FAIL;
return 0;
}
@@ -2147,11 +2150,41 @@ static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = rsp->hdr.pcifunc;
int idx, schq;
+ bool alloc;
/* For traffic aggregating levels, queue alloc is based
* on transmit link to which PF_FUNC is mapped to.
*/
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
+ if (start != end) {
+ idx = 0;
+ alloc = false;
+ for (schq = start; schq <= end; schq++, idx++) {
+ if (test_bit(schq, txsch->schq.bmap))
+ continue;
+
+ set_bit(schq, txsch->schq.bmap);
+
+ /* A single TL queue is allocated each time */
+ if (rsp->schq_contig[lvl]) {
+ alloc = true;
+ rsp->schq_contig_list[lvl][idx] = schq;
+ continue;
+ }
+
+ if (rsp->schq[lvl]) {
+ alloc = true;
+ rsp->schq_list[lvl][idx] = schq;
+ continue;
+ }
+ }
+
+ if (!alloc)
+ dev_err(rvu->dev,
+ "Could not allocate schq at lvl=%u start=%u end=%u\n",
+ lvl, start, end);
+ return;
+ }
/* A single TL queue is allocated */
if (rsp->schq_contig[lvl]) {
rsp->schq_contig[lvl] = 1;
@@ -2268,11 +2301,14 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
rsp->schq[lvl] = req->schq[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl];
- link = nix_get_tx_link(rvu, pcifunc);
+ if (req->flags & NIX_TXSCH_ALLOC_FLAG_PAN)
+ link = hw->cgx_links + hw->lbk_links + 1;
+ else
+ link = nix_get_tx_link(rvu, pcifunc);
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
start = link;
- end = link;
+ end = link + !!(req->flags & NIX_TXSCH_ALLOC_FLAG_PAN);
} else if (hw->cap.nix_fixed_txschq_mapping) {
nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
} else {
@@ -2637,7 +2673,9 @@ static int nix_txschq_free_one(struct rvu *rvu,
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
- if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
+ if ((lvl >= hw->cap.nix_tx_aggr_lvl &&
+ !(req->flags & TXSCHQ_FREE_PAN_TL1)) ||
+ schq >= txsch->schq.max)
return 0;
pfvf_map = txsch->pfvf_map;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 3d6f780635a5..925b0b02279e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1469,7 +1469,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
}
/* ignore chan_mask in case pf func is not AF, revisit later */
- if (!is_pffunc_af(req->hdr.pcifunc))
+ if (!req->set_chanmask && !is_pffunc_af(req->hdr.pcifunc))
req->chan_mask = 0xFFF;
err = npc_check_unsupported_flows(rvu, req->features, req->intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index acf259d72008..73a98b94426b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -78,6 +78,8 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
u16 flags;
+ u16 cq_idx;
+ u16 len;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
--
2.43.0
Powered by blists - more mailing lists