[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1629893926-18398-2-git-send-email-sgoutham@marvell.com>
Date: Wed, 25 Aug 2021 17:48:38 +0530
From: Sunil Goutham <sgoutham@...vell.com>
To: <netdev@...r.kernel.org>, <davem@...emloft.net>, <kuba@...nel.org>
CC: Nithin Dabilpuram <ndabilpuram@...vell.com>,
Geetha sowjanya <gakula@...vell.com>,
Subbaraya Sundeep <sbhatta@...vell.com>,
Sunil Goutham <sgoutham@...vell.com>
Subject: [net-next PATCH 1/9] octeontx2-af: enable tx shaping feature for 96xx C0
From: Nithin Dabilpuram <ndabilpuram@...vell.com>
Starting from 96xx C0 onwards all silicons support traffic shaping.
This patch enables that feature along with other changes
- When PIR/CIR shaping config is modified, toggle SW_XOFF
for config to take effect
- Before SMQ flush, clear SW_XOFF at all parent schedulers
- Support to read current transmit scheduler configuration via mbox
Signed-off-by: Nithin Dabilpuram <ndabilpuram@...vell.com>
Signed-off-by: Geetha sowjanya <gakula@...vell.com>
Signed-off-by: Subbaraya Sundeep <sbhatta@...vell.com>
Signed-off-by: Sunil Goutham <sgoutham@...vell.com>
---
drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 22 +-
drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 7 +-
drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 23 +-
.../net/ethernet/marvell/octeontx2/af/rvu_nix.c | 262 +++++++++++++++++++--
.../net/ethernet/marvell/octeontx2/af/rvu_reg.c | 4 +-
5 files changed, 281 insertions(+), 37 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index d8f5e61..5ffb6b6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -87,7 +87,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0007)
+#define OTX2_MBOX_VERSION (0x0009)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -243,7 +243,8 @@ M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
-M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \
+ nix_txschq_config) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
nix_vtag_config_rsp) \
@@ -370,16 +371,20 @@ struct msix_offset_rsp {
struct mbox_msghdr hdr;
u16 npa_msixoff;
u16 nix_msixoff;
- u8 sso;
- u8 ssow;
- u8 timlfs;
- u8 cptlfs;
+ u16 sso;
+ u16 ssow;
+ u16 timlfs;
+ u16 cptlfs;
u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
- u8 cpt1_lfs;
+ u16 cpt1_lfs;
+ u16 ree0_lfs;
+ u16 ree1_lfs;
u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT];
+ u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT];
};
struct get_hw_cap_rsp {
@@ -836,6 +841,7 @@ struct nix_txsch_free_req {
struct nix_txschq_config {
struct mbox_msghdr hdr;
u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
+ u8 read;
#define TXSCHQ_IDX_SHIFT 16
#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
@@ -843,6 +849,8 @@ struct nix_txschq_config {
#define MAX_REGS_PER_MBOX_MSG 20
u64 reg[MAX_REGS_PER_MBOX_MSG];
u64 regval[MAX_REGS_PER_MBOX_MSG];
+ /* All 0's => overwrite with new value */
+ u64 regval_mask[MAX_REGS_PER_MBOX_MSG];
};
struct nix_vtag_config {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5bdeed2..8a7ecce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -70,18 +70,21 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_shaping = true;
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
+ hw->cap.nix_shaper_toggle_wait = false;
hw->rvu = rvu;
- if (is_rvu_96xx_B0(rvu)) {
+ if (is_rvu_pre_96xx_C0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
hw->cap.nix_txsch_per_cgx_lmac = 4;
hw->cap.nix_txsch_per_lbk_lmac = 132;
hw->cap.nix_txsch_per_sdp_lmac = 76;
hw->cap.nix_shaping = false;
hw->cap.nix_tx_link_bp = false;
- if (is_rvu_96xx_A0(rvu))
+ if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
+ if (!is_rvu_pre_96xx_C0(rvu))
+ hw->cap.nix_shaper_toggle_wait = true;
if (!is_rvu_otx2(rvu))
hw->cap.per_pf_mbox_regs = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d86c6b3..ab79232 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -328,6 +328,7 @@ struct hw_cap {
u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
bool nix_shaping; /* Is shaping and coloring supported */
+ bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */
@@ -517,20 +518,34 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
}
/* Silicon revisions */
+static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ /* 96XX A0/B0, 95XX A0/A1/B0 chips */
+ return ((pdev->revision == 0x00) || (pdev->revision == 0x01) ||
+ (pdev->revision == 0x10) || (pdev->revision == 0x11) ||
+ (pdev->revision == 0x14));
+}
+
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return (pdev->revision == 0x00) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00);
}
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
- return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
- (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+ return (pdev->revision == 0x00) || (pdev->revision == 0x01);
+}
+
+static inline bool is_rvu_95xx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x10) || (pdev->revision == 0x11);
}
/* REVID for PCIe devices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5c8067..54d2dfa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1423,12 +1423,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
return 0;
}
+/* Handle shaper update specially for few revisions */
+static bool
+handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
+ int lvl, u64 reg, u64 regval)
+{
+ u64 regbase, oldval, sw_xoff = 0;
+ u64 dbgval, md_debug0 = 0;
+ unsigned long poll_tmo;
+ bool rate_reg = 0;
+ u32 schq;
+
+ regbase = reg & 0xFFFF;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ /* Check for rate register */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
+
+ rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
+ regbase == NIX_AF_TL2X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
+ regbase == NIX_AF_TL3X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
+ sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
+
+ rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
+ regbase == NIX_AF_TL4X_PIR(0));
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
+ rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0));
+ break;
+ }
+
+ if (!rate_reg)
+ return false;
+
+ /* Nothing special to do when state is not toggled */
+ oldval = rvu_read64(rvu, blkaddr, reg);
+ if ((oldval & 0x1) == (regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, reg, regval);
+ return true;
+ }
+
+ /* PIR/CIR disable */
+ if (!(regval & 0x1)) {
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ rvu_write64(rvu, blkaddr, reg, 0);
+ udelay(4);
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+ }
+
+ /* PIR/CIR enable */
+ rvu_write64(rvu, blkaddr, sw_xoff, 1);
+ if (md_debug0) {
+ poll_tmo = jiffies + usecs_to_jiffies(10000);
+ /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
+ do {
+ if (time_after(jiffies, poll_tmo)) {
+ dev_err(rvu->dev,
+ "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
+ nixlf, schq, lvl);
+ goto exit;
+ }
+ usleep_range(1, 5);
+ dbgval = rvu_read64(rvu, blkaddr, md_debug0);
+ } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
+ }
+ rvu_write64(rvu, blkaddr, reg, regval);
+exit:
+ rvu_write64(rvu, blkaddr, sw_xoff, 0);
+ return true;
+}
+
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
- int lvl, int schq)
+ int nixlf, int lvl, int schq)
{
+ struct rvu_hwinfo *hw = rvu->hw;
u64 cir_reg = 0, pir_reg = 0;
u64 cfg;
@@ -1449,6 +1541,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
cir_reg = NIX_AF_TL4X_CIR(schq);
pir_reg = NIX_AF_TL4X_PIR(schq);
break;
+ case NIX_TXSCH_LVL_MDQ:
+ cir_reg = NIX_AF_MDQX_CIR(schq);
+ pir_reg = NIX_AF_MDQX_PIR(schq);
+ break;
+ }
+
+ /* Shaper state toggle needs wait/poll */
+ if (hw->cap.nix_shaper_toggle_wait) {
+ if (cir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, cir_reg, 0);
+ if (pir_reg)
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ lvl, pir_reg, 0);
+ return;
}
if (!cir_reg)
@@ -1466,6 +1573,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
int lvl, int schq)
{
struct rvu_hwinfo *hw = rvu->hw;
+ int link_level;
int link;
if (lvl >= hw->cap.nix_tx_aggr_lvl)
@@ -1475,7 +1583,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
- if (lvl != NIX_TXSCH_LVL_TL2)
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl != link_level)
return;
/* Reset TL2's CGX or LBK link config */
@@ -1484,6 +1594,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
+static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
+ int lvl, int schq)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 reg;
+
+ /* Skip this if shaping is not supported */
+ if (!hw->cap.nix_shaping)
+ return;
+
+ /* Clear level specific SW_XOFF */
+ switch (lvl) {
+ case NIX_TXSCH_LVL_TL1:
+ reg = NIX_AF_TL1X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg = NIX_AF_TL2X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg = NIX_AF_TL3X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg = NIX_AF_TL4X_SW_XOFF(schq);
+ break;
+ case NIX_TXSCH_LVL_MDQ:
+ reg = NIX_AF_MDQX_SW_XOFF(schq);
+ break;
+ default:
+ return;
+ }
+
+ rvu_write64(rvu, blkaddr, reg, 0x0);
+}
+
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1661,15 +1805,14 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
int link, blkaddr, rc = 0;
int lvl, idx, start, end;
struct nix_txsch *txsch;
- struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
u32 *pfvf_map;
+ int nixlf;
u16 schq;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -1718,7 +1861,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
for (idx = 0; idx < req->schq[lvl]; idx++) {
@@ -1727,7 +1870,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
- nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
+ nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
}
}
@@ -1744,8 +1887,8 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
return rc;
}
-static void nix_smq_flush(struct rvu *rvu, int blkaddr,
- int smq, u16 pcifunc, int nixlf)
+static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
@@ -1780,6 +1923,7 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr,
/* restore cgx tx state */
if (restore_tx_en)
cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ return err;
}
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
@@ -1788,6 +1932,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ u16 map_func;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1801,19 +1946,36 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- /* Disable TL2/3 queue links before SMQ flush*/
+ /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
mutex_lock(&rvu->rsrc_lock);
- for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
+ for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+
+ if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
- txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
}
}
+ nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
+ nix_get_tx_link(rvu, pcifunc));
+
+ /* On PF cleanup, clear cfg done flag as
+ * PF would have changed default config.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ schq = nix_get_tx_link(rvu, pcifunc);
+ /* Do not clear pcifunc in txsch->pfvf_map[schq] because
+ * VF might be using this TL1 queue
+ */
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
+ }
/* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
@@ -1859,6 +2021,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
+ int rc;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -1883,15 +2046,24 @@ static int nix_txschq_free_one(struct rvu *rvu,
mutex_lock(&rvu->rsrc_lock);
if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
- mutex_unlock(&rvu->rsrc_lock);
+ rc = NIX_AF_ERR_TLX_INVALID;
goto err;
}
+ /* Clear SW_XOFF of this resource only.
+ * For SMQ level, all path XOFF's
+ * need to be made clear by user
+ */
+ nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
+
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
- if (lvl == NIX_TXSCH_LVL_SMQ)
- nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
+ if (lvl == NIX_TXSCH_LVL_SMQ &&
+ nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
+ rc = NIX_AF_SMQ_FLUSH_FAILED;
+ goto err;
+ }
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
@@ -1899,7 +2071,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
- return NIX_AF_ERR_TLX_INVALID;
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
}
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
@@ -1982,6 +2155,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
regbase == NIX_AF_TL4X_PIR(0))
return false;
break;
+ case NIX_TXSCH_LVL_MDQ:
+ if (regbase == NIX_AF_MDQX_CIR(0) ||
+ regbase == NIX_AF_MDQX_PIR(0))
+ return false;
+ break;
}
return true;
}
@@ -2014,6 +2192,33 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
+/* Register offset - [15:0]
+ * Scheduler Queue number - [25:16]
+ */
+#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
+
+static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, struct nix_txschq_config *req,
+ struct nix_txschq_config *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx, schq;
+ u64 reg;
+
+ for (idx = 0; idx < req->num_regs; idx++) {
+ reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+ if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
+ !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
+ return NIX_AF_INVAL_TXSCHQ_CFG;
+ rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
+ }
+ rsp->lvl = req->lvl;
+ rsp->num_regs = req->num_regs;
+ return 0;
+}
+
static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
u16 pcifunc, struct nix_txsch *txsch)
{
@@ -2045,11 +2250,11 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
- struct msg_rsp *rsp)
+ struct nix_txschq_config *rsp)
{
+ u64 reg, val, regval, schq_regbase, val_mask;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
@@ -2068,6 +2273,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
+ if (req->read)
+ return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
+
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
@@ -2082,8 +2290,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
+ reg &= NIX_TX_SCHQ_MASK;
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
+ val_mask = req->regval_mask[idx];
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
txsch->lvl, reg, regval))
@@ -2093,6 +2303,15 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
continue;
+ val = rvu_read64(rvu, blkaddr, reg);
+ regval = (val & val_mask) | (regval & ~val_mask);
+
+ /* Handle shaping state toggle specially */
+ if (hw->cap.nix_shaper_toggle_wait &&
+ handle_txschq_shaper_update(rvu, blkaddr, nixlf,
+ req->lvl, reg, regval))
+ continue;
+
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
@@ -2133,7 +2352,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
&nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
index e266f0c..c7a7fd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -33,8 +33,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
{0x1200, 0x12E0} } },
- {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
- {0x1610, 0x1618} } },
+ {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
+ {0x1610, 0x1618}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
--
2.7.4
Powered by blists - more mailing lists