[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231204141956.3956942-3-sumang@marvell.com>
Date: Mon, 4 Dec 2023 19:49:56 +0530
From: Suman Ghosh <sumang@...vell.com>
To: <sgoutham@...vell.com>, <gakula@...vell.com>, <sbhatta@...vell.com>,
<hkelam@...vell.com>, <davem@...emloft.net>, <edumazet@...gle.com>,
<kuba@...nel.org>, <pabeni@...hat.com>, <netdev@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <lcherian@...vell.com>,
<jerinj@...vell.com>, <horms@...nel.org>, <wojciech.drewek@...el.com>
CC: Suman Ghosh <sumang@...vell.com>
Subject: [net-next PATCH v6 2/2] octeontx2-pf: TC flower offload support for mirror
This patch extends TC flower offload support for mirroring ingress
traffic to a different PF/VF. Below is an example command,
'tc filter add dev eth1 ingress protocol ip flower src_ip <ip-addr>
skip_sw action mirred ingress mirror dev eth2'
Signed-off-by: Suman Ghosh <sumang@...vell.com>
Reviewed-by: Wojciech Drewek <wojciech.drewek@...el.com>
Reviewed-by: Simon Horman <horms@...nel.org>
---
.../ethernet/marvell/octeontx2/nic/otx2_tc.c | 113 +++++++++++++++++-
1 file changed, 110 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 757806be48a2..0750fdcbcfd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -29,6 +29,8 @@
#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+#define MCAST_INVALID_GRP (-1U)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -50,6 +52,7 @@ struct otx2_tc_flow {
u64 rate;
u32 burst;
bool is_pps;
+ u32 mcast_grp_idx;
};
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
@@ -355,22 +358,96 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc;
}
+static int otx2_tc_update_mcast(struct otx2_nic *nic,
+ struct npc_install_flow_req *req,
+ struct netlink_ext_ack *extack,
+ struct otx2_tc_flow *node,
+ struct nix_mcast_grp_update_req *ureq,
+ u8 num_intf)
+{
+ struct nix_mcast_grp_update_req *grp_update_req;
+ struct nix_mcast_grp_create_req *creq;
+ struct nix_mcast_grp_create_rsp *crsp;
+ u32 grp_index;
+ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+ creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
+ if (!creq) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ creq->dir = NIX_MCAST_INGRESS;
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
+ goto error;
+ }
+
+ crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0,
+ &creq->hdr);
+ if (IS_ERR(crsp)) {
+ rc = PTR_ERR(crsp);
+ goto error;
+ }
+
+ grp_index = crsp->mcast_grp_idx;
+ grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
+ if (!grp_update_req) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ ureq->op = NIX_MCAST_OP_ADD_ENTRY;
+ ureq->mcast_grp_idx = grp_index;
+ ureq->num_mce_entry = num_intf;
+ ureq->pcifunc[0] = nic->pcifunc;
+ ureq->channel[0] = nic->hw.tx_chan_base;
+
+ ureq->dest_type[0] = NIX_RX_RSS;
+ ureq->rq_rss_index[0] = 0;
+ memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ goto error;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ req->op = NIX_RX_ACTIONOP_MCAST;
+ req->index = grp_index;
+ node->mcast_grp_idx = grp_index;
+ return 0;
+
+error:
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req,
struct flow_cls_offload *f,
struct otx2_tc_flow *node)
{
+ struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
struct netlink_ext_ack *extack = f->common.extack;
+ bool pps = false, mcast = false;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps = false;
+ u8 num_intf = 1;
+ int err, i;
u64 rate;
- int err;
- int i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
@@ -442,11 +519,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
req->index = act->rx_queue;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
+ dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
+ dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
+ dummy_grp_update_req.rq_rss_index[num_intf] = 0;
+ mcast = true;
+ num_intf++;
+ break;
+
default:
return -EOPNOTSUPP;
}
}
+ if (mcast) {
+ err = otx2_tc_update_mcast(nic, req, extack, node,
+ &dummy_grp_update_req,
+ num_intf);
+ if (err)
+ return err;
+ }
+
if (nr_police > 1) {
NL_SET_ERR_MSG_MOD(extack,
"rate limit police offload requires a single action");
@@ -1066,6 +1162,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct nix_mcast_grp_destroy_req *grp_destroy_req;
struct otx2_tc_flow *flow_node;
int err;
@@ -1101,6 +1198,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
}
free_mcam_flow:
+ /* Remove the multicast/mirror related nodes */
+ if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
+ mutex_lock(&nic->mbox.lock);
+ grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
+ grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
+ otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ }
+
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
@@ -1138,6 +1244,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
new_node->prio = tc_flow_cmd->common.prio;
+ new_node->mcast_grp_idx = MCAST_INVALID_GRP;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
--
2.25.1
Powered by blists - more mailing lists