lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <20230110062019.892719-7-schalla@marvell.com> Date: Tue, 10 Jan 2023 11:50:17 +0530 From: Srujana Challa <schalla@...vell.com> To: <netdev@...r.kernel.org> CC: <davem@...emloft.net>, <kuba@...nel.org>, <edumazet@...gle.com>, <pabeni@...hat.com>, <jerinj@...vell.com>, <sgoutham@...vell.com>, <lcherian@...vell.com>, <gakula@...vell.com>, <sbhatta@...vell.com>, <hkelam@...vell.com>, <schalla@...vell.com> Subject: [PATCH v1 net-next,6/8] octeontx2-af: update CPT inbound inline IPsec config mailbox Updates CPT inbound inline IPsec configure mailbox to take CPT credit, opcode, credit_th and bpid from VF. This patch also adds a mailbox to read inbound IPsec configuration. Signed-off-by: Srujana Challa <schalla@...vell.com> --- .../net/ethernet/marvell/octeontx2/af/mbox.h | 6 ++- .../ethernet/marvell/octeontx2/af/rvu_nix.c | 46 +++++++++++++++++-- 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index b121e3d9f561..9eac73bfc9cb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -298,6 +298,8 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ +M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ + msg_req, nix_inline_ipsec_cfg) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -1197,7 +1199,7 @@ struct nix_inline_ipsec_cfg { u32 cpt_credit; struct { u8 egrp; - u8 opcode; + u16 opcode; u16 param1; u16 param2; } gen_cfg; @@ -1206,6 +1208,8 @@ struct nix_inline_ipsec_cfg { u8 cpt_slot; } inst_qsel; u8 enable; + u16 bpid; + u32 credit_th; }; /* Per NIX LF inline IPSec configuration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 6b8747ebc08c..89e94569e74c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4731,6 +4731,10 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) +#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) +#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) +#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) + static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, int blkaddr) { @@ -4767,14 +4771,23 @@ static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *r val); /* Set CPT credit */ - rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), - req->cpt_credit); + val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); + if ((val & 0x3FFFFF) != 0x3FFFFF) + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF - val); + + val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); + val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); + val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); } else { rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), 0x0); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), - 0x3FFFFF); + val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); + if ((val & 0x3FFFFF) != 0x3FFFFF) + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF - val); } } @@ -4792,6 +4805,30 @@ int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, + struct msg_req *req, + struct nix_inline_ipsec_cfg *rsp) + +{ + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); + rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); + rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); + rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); + rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); + + val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); + rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); + rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); + rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); + + return 0; +} + int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, struct nix_inline_ipsec_lf_cfg *req, struct msg_rsp *rsp) @@ -4835,6 +4872,7 @@ int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, return 0; } + void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); -- 2.25.1
Powered by blists - more mailing lists