[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230118120354.1017961-6-schalla@marvell.com>
Date: Wed, 18 Jan 2023 17:33:52 +0530
From: Srujana Challa <schalla@...vell.com>
To: <netdev@...r.kernel.org>
CC: <davem@...emloft.net>, <kuba@...nel.org>, <edumazet@...gle.com>,
<pabeni@...hat.com>, <jerinj@...vell.com>, <sgoutham@...vell.com>,
<lcherian@...vell.com>, <gakula@...vell.com>,
<sbhatta@...vell.com>, <hkelam@...vell.com>, <schalla@...vell.com>,
Nithin Dabilpuram <ndabilpuram@...vell.com>
Subject: [PATCH net-next v3 5/7] octeontx2-af: restore rxc conf after teardown sequence
From: Nithin Dabilpuram <ndabilpuram@...vell.com>
CN10K CPT coprocessor includes a component named RXC which
is responsible for reassembly of inner IP packets. RXC has
the feature to evict oldest entries based on age/threshold.
The age/threshold is being set to minimum values to evict
all entries at the time of teardown.
This patch adds code to restore timeout and threshold config
after teardown sequence is complete as it is global config.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@...vell.com>
---
.../ethernet/marvell/octeontx2/af/rvu_cpt.c | 22 +++++++++++++++----
1 file changed, 18 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index f970cb9b0bff..302ff549284e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -812,10 +812,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
- int blkaddr)
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
{
u64 dfrg_reg;
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
@@ -840,7 +851,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
- cpt_rxc_time_cfg(rvu, req, blkaddr);
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0;
}
@@ -886,7 +897,7 @@ int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
- struct cpt_rxc_time_cfg_req req;
+ struct cpt_rxc_time_cfg_req req, prev;
int timeout = 2000;
u64 reg;
@@ -902,7 +913,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1;
req.active_limit = 1;
- cpt_rxc_time_cfg(rvu, &req, blkaddr);
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
@@ -928,6 +939,9 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
#define INFLIGHT GENMASK_ULL(8, 0)
--
2.25.1
Powered by blists - more mailing lists