[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502132005.611698-9-tanmay@marvell.com>
Date: Fri, 2 May 2025 18:49:49 +0530
From: Tanmay Jagdale <tanmay@...vell.com>
To: <bbrezillon@...nel.org>, <arno@...isbad.org>, <schalla@...vell.com>,
<herbert@...dor.apana.org.au>, <davem@...emloft.net>,
<sgoutham@...vell.com>, <lcherian@...vell.com>, <gakula@...vell.com>,
<jerinj@...vell.com>, <hkelam@...vell.com>, <sbhatta@...vell.com>,
<andrew+netdev@...n.ch>, <edumazet@...gle.com>, <kuba@...nel.org>,
<pabeni@...hat.com>, <bbhushan2@...vell.com>, <bhelgaas@...gle.com>,
<pstanner@...hat.com>, <gregkh@...uxfoundation.org>,
<peterz@...radead.org>, <linux@...blig.org>,
<krzysztof.kozlowski@...aro.org>, <giovanni.cabiddu@...el.com>
CC: <linux-crypto@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<netdev@...r.kernel.org>, <rkannoth@...vell.com>, <sumang@...vell.com>,
<gcherian@...vell.com>, Amit Singh Tomar <amitsinght@...vell.com>,
"Tanmay
Jagdale" <tanmay@...vell.com>
Subject: [net-next PATCH v1 08/15] octeontx2-af: Add mbox to alloc/free BPIDs
From: Geetha sowjanya <gakula@...vell.com>
Adds mbox handlers to allocate/free BPIDs from the free BPIDs pool.
This can be used by the PF/VF to request up to 8 BPIds.
Also add a mbox handler to configure NIXX_AF_RX_CHANX with multiple
Bpids.
Signed-off-by: Amit Singh Tomar <amitsinght@...vell.com>
Signed-off-by: Geetha sowjanya <gakula@...vell.com>
Signed-off-by: Tanmay Jagdale <tanmay@...vell.com>
---
.../ethernet/marvell/octeontx2/af/common.h | 1 +
.../net/ethernet/marvell/octeontx2/af/mbox.h | 26 +++++
.../ethernet/marvell/octeontx2/af/rvu_nix.c | 100 ++++++++++++++++++
3 files changed, 127 insertions(+)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 406c59100a35..a7c1223dedc6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -191,6 +191,7 @@ enum nix_scheduler {
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
#define NIX_INTF_TYPE_SDP 2
+#define NIX_INTF_TYPE_CPT 3
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 5cebf10a15a7..71cf507c2591 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -338,6 +338,9 @@ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \
+M(NIX_ALLOC_BPIDS, 0x8028, nix_alloc_bpids, nix_alloc_bpid_req, nix_bpids) \
+M(NIX_FREE_BPIDS, 0x8029, nix_free_bpids, nix_bpids, msg_rsp) \
+M(NIX_RX_CHAN_CFG, 0x802a, nix_rx_chan_cfg, nix_rx_chan_cfg, nix_rx_chan_cfg) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -1347,6 +1350,29 @@ struct nix_mcast_grp_update_rsp {
u32 mce_start_index;
};
+struct nix_alloc_bpid_req {
+ struct mbox_msghdr hdr;
+ u8 bpid_cnt;
+ u8 type;
+ u64 rsvd;
+};
+
+struct nix_bpids {
+ struct mbox_msghdr hdr;
+ u8 bpid_cnt;
+ u16 bpids[8];
+ u64 rsvd;
+};
+
+struct nix_rx_chan_cfg {
+ struct mbox_msghdr hdr;
+ u8 type; /* Interface type(CGX/CPT/LBK) */
+ u8 read;
+ u16 chan; /* RX channel to be configured */
+ u64 val; /* NIX_AF_RX_CHAN_CFG value */
+ u64 rsvd;
+};
+
/* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 68525bfc8e6d..d5ec6ad0f30c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -569,6 +569,106 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
+int rvu_mbox_handler_nix_rx_chan_cfg(struct rvu *rvu,
+ struct nix_rx_chan_cfg *req,
+ struct nix_rx_chan_cfg *rsp)
+{
+ struct rvu_pfvf *pfvf;
+ int blkaddr;
+ u16 chan;
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ chan = pfvf->rx_chan_base + req->chan;
+
+ if (req->type == NIX_INTF_TYPE_CPT)
+ chan = chan | BIT(11);
+
+ if (req->read) {
+ rsp->val = rvu_read64(rvu, blkaddr,
+ NIX_AF_RX_CHANX_CFG(chan));
+ rsp->chan = req->chan;
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), req->val);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_nix_alloc_bpids(struct rvu *rvu,
+ struct nix_alloc_bpid_req *req,
+ struct nix_bpids *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ int blkaddr, cnt = 0;
+ struct nix_bp *bp;
+ int bpid, err;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ bp = &nix_hw->bp;
+
+ /* For interface like sso uses same bpid across multiple
+ * application. Find the bpid is it already allocate or
+ * allocate a new one.
+ */
+ if (req->type > NIX_INTF_TYPE_CPT) {
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->intf_map[bpid] == req->type) {
+ rsp->bpids[cnt] = bpid + bp->free_pool_base;
+ rsp->bpid_cnt++;
+ bp->ref_cnt[bpid]++;
+ cnt++;
+ }
+ }
+ if (rsp->bpid_cnt)
+ return 0;
+ }
+
+ for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0)
+ return 0;
+ rsp->bpids[cnt] = bpid + bp->free_pool_base;
+ bp->intf_map[bpid] = req->type;
+ bp->fn_map[bpid] = pcifunc;
+ bp->ref_cnt[bpid]++;
+ rsp->bpid_cnt++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_nix_free_bpids(struct rvu *rvu,
+ struct nix_bpids *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, cnt, err, id;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+ u16 bpid;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ bp = &nix_hw->bp;
+ for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
+ bpid = req->bpids[cnt] - bp->free_pool_base;
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ for (id = 0; id < bp->bpids.max; id++) {
+ if (bp->fn_map[id] == pcifunc)
+ bp->fn_map[id] = 0;
+ }
+ }
+ return 0;
+}
+
static u16 nix_get_channel(u16 chan, bool cpt_link)
{
/* CPT channel for a given link channel is always
--
2.43.0
Powered by blists - more mailing lists