[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150120202909.1741.1884.stgit@nitbit.x32>
Date: Tue, 20 Jan 2015 12:29:10 -0800
From: John Fastabend <john.fastabend@...il.com>
To: tgraf@...g.ch, simon.horman@...ronome.com, sfeldma@...il.com
Cc: netdev@...r.kernel.org, jhs@...atatu.com, davem@...emloft.net,
gerlitz.or@...il.com, andy@...yhouse.net, ast@...mgrid.com
Subject: [net-next PATCH v3 07/12] net: rocker: add set rule ops
Implement set rule operations for existing rocker tables.
Signed-off-by: John Fastabend <john.r.fastabend@...el.com>
---
drivers/net/ethernet/rocker/rocker.c | 421 ++++++++++++++++++++++++++++++++++
1 file changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index d2ea451..51290882 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -3830,6 +3830,422 @@ static struct net_flow_hdr_node **rocker_get_hgraph(struct net_device *d)
{
return rocker_header_nodes;
}
+
+static u32 rocker_goto_value(u32 id)
+{
+ switch (id) {
+ case ROCKER_FLOW_TABLE_ID_INGRESS_PORT:
+ return ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
+ case ROCKER_FLOW_TABLE_ID_VLAN:
+ return ROCKER_OF_DPA_TABLE_ID_VLAN;
+ case ROCKER_FLOW_TABLE_ID_TERMINATION_MAC:
+ return ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ case ROCKER_FLOW_TABLE_ID_UNICAST_ROUTING:
+ return ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+ case ROCKER_FLOW_TABLE_ID_MULTICAST_ROUTING:
+ return ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
+ case ROCKER_FLOW_TABLE_ID_BRIDGING:
+ return ROCKER_OF_DPA_TABLE_ID_BRIDGING;
+ case ROCKER_FLOW_TABLE_ID_ACL_POLICY:
+ return ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ default:
+ return 0;
+ }
+}
+
+static int rocker_flow_set_ig_port(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 in_lport_mask, in_lport;
+ int flags = 0;
+
+ /* ingress port table only supports one field/mask/action this
+ * simplifies the key construction and we can assume the values
+ * are the correct types/mask/action by valid check above. The
+ * user could pass multiple match/actions in a message with the
+ * same field multiple times currently the valid test does not
+ * catch this and we just use the first specified.
+ */
+ in_lport = rule->matches[0].value_u32;
+ in_lport_mask = rule->matches[0].mask_u32;
+ goto_tbl = rocker_goto_value(rule->actions[0].args[0].value_u16);
+
+ return rocker_flow_tbl_ig_port(rocker_port, flags,
+ in_lport, in_lport_mask,
+ goto_tbl);
+}
+
+static int rocker_flow_set_vlan(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ __be16 vlan_id, vlan_id_mask, new_vlan_id;
+ bool untagged, have_in_lport = false;
+ enum rocker_of_dpa_table_id goto_tbl;
+ int i, flags = 0;
+ u32 in_lport;
+
+ goto_tbl = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+
+ /* If user does not specify vid match default to any */
+ vlan_id = htons(1);
+ vlan_id_mask = 0;
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ switch (rule->matches[i].instance) {
+ case ROCKER_HEADER_INSTANCE_IN_LPORT:
+ in_lport = rule->matches[i].value_u32;
+ have_in_lport = true;
+ break;
+ case ROCKER_HEADER_INSTANCE_VLAN_OUTER:
+ if (rule->matches[i].field != HEADER_VLAN_VID)
+ break;
+
+ vlan_id = htons(rule->matches[i].value_u16);
+ vlan_id_mask = htons(rule->matches[i].mask_u16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!have_in_lport)
+ return -EINVAL;
+
+ /* If user does not specify a new vlan id use default vlan id */
+ new_vlan_id = rocker_port_vid_to_vlan(rocker_port, vlan_id, &untagged);
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ struct net_flow_action_arg *arg = &rule->actions[i].args[0];
+
+ switch (rule->actions[i].uid) {
+ case ACTION_SET_VLAN_ID:
+ new_vlan_id = htons(arg->value_u16);
+ if (new_vlan_id)
+ untagged = false;
+ break;
+ }
+ }
+
+ return rocker_flow_tbl_vlan(rocker_port, flags, in_lport,
+ vlan_id, vlan_id_mask, goto_tbl,
+ untagged, new_vlan_id);
+}
+
+static int rocker_flow_set_term_mac(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ __be16 vlan_id, vlan_id_mask, ethtype = 0;
+ const u8 *eth_dst, *eth_dst_mask;
+ u32 in_lport, in_lport_mask;
+ int i, flags = 0;
+ bool copy_to_cpu;
+
+ /* If user does not specify vid match default to any */
+ vlan_id = rocker_port->internal_vlan_id;
+ vlan_id_mask = 0;
+
+ /* If user does not specify in_lport match default to any */
+ in_lport = rocker_port->lport;
+ in_lport_mask = 0;
+
+ /* If user does not specify a mac address match any */
+ eth_dst = rocker_port->dev->dev_addr;
+ eth_dst_mask = zero_mac;
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ switch (rule->matches[i].instance) {
+ case ROCKER_HEADER_INSTANCE_IN_LPORT:
+ in_lport = rule->matches[i].value_u32;
+ in_lport_mask = rule->matches[i].mask_u32;
+ break;
+ case ROCKER_HEADER_INSTANCE_VLAN_OUTER:
+ if (rule->matches[i].field != HEADER_VLAN_VID)
+ break;
+
+ vlan_id = htons(rule->matches[i].value_u16);
+ vlan_id_mask = htons(rule->matches[i].mask_u16);
+ break;
+ case ROCKER_HEADER_INSTANCE_ETHERNET:
+ switch (rule->matches[i].field) {
+ case HEADER_ETHERNET_DST_MAC:
+ eth_dst = (u8 *)&rule->matches[i].value_u64;
+ eth_dst_mask = (u8 *)&rule->matches[i].mask_u64;
+ break;
+ case HEADER_ETHERNET_ETHERTYPE:
+ ethtype = htons(rule->matches[i].value_u16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!ethtype)
+ return -EINVAL;
+
+ /* By default do not copy to cpu */
+ copy_to_cpu = false;
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ switch (rule->actions[i].uid) {
+ case ACTION_COPY_TO_CPU:
+ copy_to_cpu = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rocker_flow_tbl_term_mac(rocker_port, in_lport, in_lport_mask,
+ ethtype, eth_dst, eth_dst_mask,
+ vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+}
+
+static int rocker_flow_set_ucast_routing(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ return -EOPNOTSUPP;
+}
+
+static int rocker_flow_set_mcast_routing(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ return -EOPNOTSUPP;
+}
+
+static int rocker_flow_set_bridge(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ enum rocker_of_dpa_table_id goto_tbl;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ u32 in_lport, in_lport_mask, group_id, tunnel_id;
+ __be16 vlan_id, vlan_id_mask;
+ const u8 *eth_dst, *eth_dst_mask;
+ int i, flags = 0;
+ bool copy_to_cpu;
+
+ goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+
+ /* If user does not specify vid match default to any */
+ vlan_id = rocker_port->internal_vlan_id;
+ vlan_id_mask = 0;
+
+ /* If user does not specify in_lport match default to any */
+ in_lport = rocker_port->lport;
+ in_lport_mask = 0;
+
+ /* If user does not specify a mac address match any */
+ eth_dst = rocker_port->dev->dev_addr;
+ eth_dst_mask = NULL;
+
+ /* Do not support for tunnel_id yet. */
+ tunnel_id = 0;
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ switch (rule->matches[i].instance) {
+ case ROCKER_HEADER_INSTANCE_IN_LPORT:
+ in_lport = rule->matches[i].value_u32;
+ in_lport_mask = rule->matches[i].mask_u32;
+ break;
+ case ROCKER_HEADER_INSTANCE_VLAN_OUTER:
+ if (rule->matches[i].field != HEADER_VLAN_VID)
+ break;
+
+ vlan_id = htons(rule->matches[i].value_u16);
+ vlan_id_mask = htons(rule->matches[i].mask_u16);
+ break;
+ case ROCKER_HEADER_INSTANCE_ETHERNET:
+ switch (rule->matches[i].field) {
+ case HEADER_ETHERNET_DST_MAC:
+ eth_dst = (u8 *)&rule->matches[i].value_u64;
+ eth_dst_mask = (u8 *)&rule->matches[i].mask_u64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* By default do not copy to cpu and skip group assignment */
+ copy_to_cpu = false;
+ group_id = ROCKER_GROUP_NONE;
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ struct net_flow_action_arg *arg = &rule->actions[i].args[0];
+
+ switch (rule->actions[i].uid) {
+ case ACTION_COPY_TO_CPU:
+ copy_to_cpu = true;
+ break;
+ case ROCKER_ACTION_SET_GROUP_ID:
+ group_id = arg->value_u32;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Ignoring eth_dst_mask it seems to cause a EINVAL return code */
+ return rocker_flow_tbl_bridge(rocker_port, flags,
+ eth_dst, eth_dst_mask,
+ vlan_id, tunnel_id,
+ goto_tbl, group_id, copy_to_cpu);
+}
+
+static int rocker_flow_set_acl(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ u32 in_lport, in_lport_mask, group_id, tunnel_id;
+ __be16 vlan_id, vlan_id_mask, ethtype = 0;
+ const u8 *eth_dst, *eth_src, *eth_dst_mask, *eth_src_mask;
+ u8 protocol, protocol_mask, dscp, dscp_mask;
+ int i, flags = 0;
+
+ /* If user does not specify vid match default to any */
+ vlan_id = rocker_port->internal_vlan_id;
+ vlan_id_mask = 0;
+
+ /* If user does not specify in_lport match default to any */
+ in_lport = rocker_port->lport;
+ in_lport_mask = 0;
+
+ /* If user does not specify a mac address match any */
+ eth_dst = rocker_port->dev->dev_addr;
+ eth_src = zero_mac;
+ eth_dst_mask = NULL;
+ eth_src_mask = NULL;
+
+ /* If user does not set protocol/dscp mask them out */
+ protocol = 0;
+ dscp = 0;
+ protocol_mask = 0;
+ dscp_mask = 0;
+
+ /* Do not support for tunnel_id yet. */
+ tunnel_id = 0;
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ switch (rule->matches[i].instance) {
+ case ROCKER_HEADER_INSTANCE_IN_LPORT:
+ in_lport = rule->matches[i].value_u32;
+ in_lport_mask = rule->matches[i].mask_u32;
+ break;
+ case ROCKER_HEADER_INSTANCE_VLAN_OUTER:
+ if (rule->matches[i].field != HEADER_VLAN_VID)
+ break;
+
+ vlan_id = htons(rule->matches[i].value_u16);
+ vlan_id_mask = htons(rule->matches[i].mask_u16);
+ break;
+ case ROCKER_HEADER_INSTANCE_ETHERNET:
+ switch (rule->matches[i].field) {
+ case HEADER_ETHERNET_SRC_MAC:
+ eth_src = (u8 *)&rule->matches[i].value_u64;
+ eth_src_mask = (u8 *)&rule->matches[i].mask_u64;
+ break;
+ case HEADER_ETHERNET_DST_MAC:
+ eth_dst = (u8 *)&rule->matches[i].value_u64;
+ eth_dst_mask = (u8 *)&rule->matches[i].mask_u64;
+ break;
+ case HEADER_ETHERNET_ETHERTYPE:
+ ethtype = htons(rule->matches[i].value_u16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ROCKER_HEADER_INSTANCE_IPV4:
+ switch (rule->matches[i].field) {
+ case HEADER_IPV4_PROTOCOL:
+ protocol = rule->matches[i].value_u8;
+ protocol_mask = rule->matches[i].mask_u8;
+ break;
+ case HEADER_IPV4_DSCP:
+ dscp = rule->matches[i].value_u8;
+ dscp_mask = rule->matches[i].mask_u8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* By default do not copy to cpu and skip group assignment */
+ group_id = ROCKER_GROUP_NONE;
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ switch (rule->actions[i].uid) {
+ case ROCKER_ACTION_SET_GROUP_ID:
+ group_id = rule->actions[i].args[0].value_u32;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rocker_flow_tbl_acl(rocker_port, flags,
+ in_lport, in_lport_mask,
+ eth_src, eth_src_mask,
+ eth_dst, eth_dst_mask, ethtype,
+ vlan_id, vlan_id_mask,
+ protocol, protocol_mask,
+ dscp, dscp_mask,
+ group_id);
+}
+
+static int rocker_set_rules(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ int err = -EINVAL;
+
+ switch (rule->table_id) {
+ case ROCKER_FLOW_TABLE_ID_INGRESS_PORT:
+ err = rocker_flow_set_ig_port(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_VLAN:
+ err = rocker_flow_set_vlan(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_TERMINATION_MAC:
+ err = rocker_flow_set_term_mac(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_UNICAST_ROUTING:
+ err = rocker_flow_set_ucast_routing(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_MULTICAST_ROUTING:
+ err = rocker_flow_set_mcast_routing(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_BRIDGING:
+ err = rocker_flow_set_bridge(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_ACL_POLICY:
+ err = rocker_flow_set_acl(dev, rule);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int rocker_del_rules(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ return -EOPNOTSUPP;
+}
#endif
static const struct net_device_ops rocker_port_netdev_ops = {
@@ -3852,6 +4268,9 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_flow_get_actions = rocker_get_actions,
.ndo_flow_get_tbl_graph = rocker_get_tgraph,
.ndo_flow_get_hdr_graph = rocker_get_hgraph,
+
+ .ndo_flow_set_rule = rocker_set_rules,
+ .ndo_flow_del_rule = rocker_del_rules,
#endif
};
@@ -4084,7 +4503,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
err = rocker_init_flow_tables(rocker_port);
if (err) {
- dev_err(&pdev->dev, "install flow table failed\n");
+ dev_err(&pdev->dev, "install rule table failed\n");
goto err_port_ig_tbl;
}
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists