[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181201091403.30166-7-jerinjacobk@gmail.com>
Date: Sat, 1 Dec 2018 14:43:55 +0530
From: Jerin Jacob <jerinjacobk@...il.com>
To: netdev@...r.kernel.org, davem@...emloft.net
Cc: sgoutham@...vell.com, lcherian@...vell.com, gakula@...vell.com,
jerinj@...vell.com, Nithin Dabilpuram <ndabilpuram@...vell.com>,
Krzysztof Kanas <kkanas@...vell.com>
Subject: [PATCH v1 net-next 06/14] octeontx2-af: Allow freeing single TLx Tx schedule queue
From: Nithin Dabilpuram <ndabilpuram@...vell.com>
The default behavior was to free all the TLx Tx schedule
queues. This patch adds support for freeing a single Tx
schedule queue if TXSCHQ_FREE_ALL flag is not set.
Signed-off-by: Krzysztof Kanas <kkanas@...vell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@...vell.com>
Signed-off-by: Jerin Jacob <jerinj@...vell.com>
---
.../ethernet/marvell/octeontx2/af/rvu_nix.c | 72 ++++++++++++++++++-
1 file changed, 71 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 90779cff1463..4c73a9c661e8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1255,11 +1255,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return 0;
}
+static int nix_txschq_free_one(struct rvu *rvu,
+ struct nix_txsch_free_req *req)
+{
+ int lvl, schq, nixlf, blkaddr, rc;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lvl = req->schq_lvl;
+ schq = req->schq;
+ txsch = &nix_hw->txsch[lvl];
+
+ /* Don't allow freeing TL1 */
+ if (lvl > NIX_TXSCH_LVL_TL2 ||
+ schq >= txsch->schq.max)
+ goto err;
+
+ pfvf_map = txsch->pfvf_map;
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+ mutex_unlock(&rvu->rsrc_lock);
+ goto err;
+ }
+
+ /* Flush if it is a SMQ. Onus of disabling
+ * TL2/3 queue links before SMQ flush is on user
+ */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ rc = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (rc) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Free the resource */
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+err:
+ return NIX_AF_ERR_TLX_INVALID;
+}
+
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp)
{
- return nix_txschq_free(rvu, req->hdr.pcifunc);
+ if (req->flags & TXSCHQ_FREE_ALL)
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+ else
+ return nix_txschq_free_one(rvu, req);
}
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
--
2.19.2
Powered by blists - more mailing lists