[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20181119160013.GA2364@nanopsycho>
Date: Mon, 19 Nov 2018 17:00:13 +0100
From: Jiri Pirko <jiri@...nulli.us>
To: Pablo Neira Ayuso <pablo@...filter.org>
Cc: netdev@...r.kernel.org, davem@...emloft.net,
thomas.lendacky@....com, f.fainelli@...il.com,
ariel.elior@...ium.com, michael.chan@...adcom.com,
santosh@...lsio.com, madalin.bucur@....com,
yisen.zhuang@...wei.com, salil.mehta@...wei.com,
jeffrey.t.kirsher@...el.com, tariqt@...lanox.com,
saeedm@...lanox.com, jiri@...lanox.com, idosch@...lanox.com,
jakub.kicinski@...ronome.com, peppe.cavallaro@...com,
grygorii.strashko@...com, andrew@...n.ch,
vivien.didelot@...oirfairelinux.com, alexandre.torgue@...com,
joabreu@...opsys.com, linux-net-drivers@...arflare.com,
ganeshgr@...lsio.com, ogerlitz@...lanox.com
Subject: Re: [PATCH net-next 12/12] qede: use ethtool_rx_flow_rule() to
remove duplicated parser code
Mon, Nov 19, 2018 at 01:15:19AM CET, pablo@...filter.org wrote:
>The qede driver supports for ethtool_rx_flow_spec and flower, both
>codebases look very similar.
>
>This patch uses the ethtool_rx_flow_rule() infrastructure to remove the
>duplicated ethtool_rx_flow_spec parser and consolidate ACL offload
>support around the flow_rule infrastructure.
>
>Furthermore, more code can be consolidated by merging
>qede_add_cls_rule() and qede_add_tc_flower_fltr(), these two functions
>also look very similar.
>
>This driver currently provides simple ACL support, such as 5-tuple
>matching, drop policy and queue to CPU.
>
>Drivers that support more features can benefit from this infrastructure
>to save even more redundant codebase.
>
>Signed-off-by: Pablo Neira Ayuso <pablo@...filter.org>
>---
>Note that, after this patch, qede_add_cls_rule() and
>qede_add_tc_flower_fltr() can be also consolidated since their code is
>redundant.
>
> drivers/net/ethernet/qlogic/qede/qede_filter.c | 246 ++++++-------------------
> 1 file changed, 53 insertions(+), 193 deletions(-)
>
>diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
>index aca302c3261b..f82b26ba8f80 100644
>--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
>+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
>@@ -1578,30 +1578,6 @@ static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
> ports[1] = t->dst_port;
> }
>
>-/* Validate fields which are set and not accepted by the driver */
>-static int qede_flow_spec_validate_unused(struct qede_dev *edev,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- if (fs->flow_type & FLOW_MAC_EXT) {
>- DP_INFO(edev, "Don't support MAC extensions\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if ((fs->flow_type & FLOW_EXT) &&
>- (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
>- DP_INFO(edev, "Don't support vlan-based classification\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if ((fs->flow_type & FLOW_EXT) &&
>- (fs->h_ext.data[0] || fs->h_ext.data[1])) {
>- DP_INFO(edev, "Don't support user defined data\n");
>- return -EOPNOTSUPP;
>- }
>-
>- return 0;
>-}
>-
> static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
> struct qede_arfs_tuple *t)
> {
>@@ -1665,132 +1641,6 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
> return 0;
> }
>
>-static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- if ((fs->h_u.tcp_ip4_spec.ip4src &
>- fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) {
>- DP_INFO(edev, "Don't support IP-masks\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if ((fs->h_u.tcp_ip4_spec.ip4dst &
>- fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) {
>- DP_INFO(edev, "Don't support IP-masks\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if ((fs->h_u.tcp_ip4_spec.psrc &
>- fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) {
>- DP_INFO(edev, "Don't support port-masks\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if ((fs->h_u.tcp_ip4_spec.pdst &
>- fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) {
>- DP_INFO(edev, "Don't support port-masks\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if (fs->h_u.tcp_ip4_spec.tos) {
>- DP_INFO(edev, "Don't support tos\n");
>- return -EOPNOTSUPP;
>- }
>-
>- t->eth_proto = htons(ETH_P_IP);
>- t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src;
>- t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst;
>- t->src_port = fs->h_u.tcp_ip4_spec.psrc;
>- t->dst_port = fs->h_u.tcp_ip4_spec.pdst;
>-
>- return qede_set_v4_tuple_to_profile(edev, t);
>-}
>-
>-static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- t->ip_proto = IPPROTO_TCP;
>-
>- if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
>- return -EINVAL;
>-
>- return 0;
>-}
>-
>-static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- t->ip_proto = IPPROTO_UDP;
>-
>- if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs))
>- return -EINVAL;
>-
>- return 0;
>-}
>-
>-static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- struct in6_addr zero_addr;
>-
>- memset(&zero_addr, 0, sizeof(zero_addr));
>-
>- if ((fs->h_u.tcp_ip6_spec.psrc &
>- fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) {
>- DP_INFO(edev, "Don't support port-masks\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if ((fs->h_u.tcp_ip6_spec.pdst &
>- fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) {
>- DP_INFO(edev, "Don't support port-masks\n");
>- return -EOPNOTSUPP;
>- }
>-
>- if (fs->h_u.tcp_ip6_spec.tclass) {
>- DP_INFO(edev, "Don't support tclass\n");
>- return -EOPNOTSUPP;
>- }
>-
>- t->eth_proto = htons(ETH_P_IPV6);
>- memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src,
>- sizeof(struct in6_addr));
>- memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst,
>- sizeof(struct in6_addr));
>- t->src_port = fs->h_u.tcp_ip6_spec.psrc;
>- t->dst_port = fs->h_u.tcp_ip6_spec.pdst;
>-
>- return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
>-}
>-
>-static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- t->ip_proto = IPPROTO_TCP;
>-
>- if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
>- return -EINVAL;
>-
>- return 0;
>-}
>-
>-static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>-{
>- t->ip_proto = IPPROTO_UDP;
>-
>- if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs))
>- return -EINVAL;
>-
>- return 0;
>-}
>-
> /* Must be called while qede lock is held */
> static struct qede_arfs_fltr_node *
> qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
>@@ -1875,25 +1725,32 @@ static int qede_parse_actions(struct qede_dev *edev,
> struct flow_action *flow_action)
> {
> const struct flow_action_key *act;
>- int rc = -EINVAL, num_act = 0, i;
>- bool is_drop = false;
>+ int i;
>
> if (!flow_action_has_keys(flow_action)) {
>- DP_NOTICE(edev, "No tc actions received\n");
>- return rc;
>+ DP_NOTICE(edev, "No actions received\n");
>+ return -EINVAL;
> }
>
> flow_action_for_each(i, act, flow_action) {
>- num_act++;
>+ switch (act->id) {
>+ case FLOW_ACTION_KEY_DROP:
>+ break;
>+ case FLOW_ACTION_KEY_QUEUE:
>+ if (ethtool_get_flow_spec_ring_vf(act->queue_index))
>+ break;
>
>- if (act->id == FLOW_ACTION_KEY_DROP)
>- is_drop = true;
>+ if (act->queue_index >= QEDE_RSS_COUNT(edev)) {
>+ DP_INFO(edev, "Queue out-of-bounds\n");
>+ return -EINVAL;
>+ }
>+ break;
>+ default:
>+ return -EINVAL;
>+ }
> }
>
>- if (num_act == 1 && is_drop)
>- return 0;
>-
>- return rc;
>+ return 0;
> }
>
> static int
>@@ -2147,16 +2004,17 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
> }
>
> static int qede_flow_spec_validate(struct qede_dev *edev,
>- struct ethtool_rx_flow_spec *fs,
>- struct qede_arfs_tuple *t)
>+ struct flow_action *flow_action,
>+ struct qede_arfs_tuple *t,
>+ __u32 location)
> {
>- if (fs->location >= QEDE_RFS_MAX_FLTR) {
>+ if (location >= QEDE_RFS_MAX_FLTR) {
> DP_INFO(edev, "Location out-of-bounds\n");
> return -EINVAL;
> }
>
> /* Check location isn't already in use */
>- if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) {
>+ if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
> DP_INFO(edev, "Location already in use\n");
> return -EINVAL;
> }
>@@ -2170,46 +2028,53 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
> return -EINVAL;
> }
>
>- /* If drop requested then no need to validate other data */
>- if (fs->ring_cookie == RX_CLS_FLOW_DISC)
>- return 0;
>-
>- if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie))
>- return 0;
>-
>- if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) {
>- DP_INFO(edev, "Queue out-of-bounds\n");
>+ if (qede_parse_actions(edev, flow_action))
> return -EINVAL;
>- }
>
> return 0;
> }
>
>-static int qede_flow_spec_to_tuple(struct qede_dev *edev,
>- struct qede_arfs_tuple *t,
>- struct ethtool_rx_flow_spec *fs)
>+static int qede_flow_spec_to_rule(struct qede_dev *edev,
>+ struct qede_arfs_tuple *t,
>+ struct ethtool_rx_flow_spec *fs)
> {
>- memset(t, 0, sizeof(*t));
>-
>- if (qede_flow_spec_validate_unused(edev, fs))
>- return -EOPNOTSUPP;
>+ struct tc_cls_flower_offload f = {};
>+ struct flow_rule *flow_rule;
>+ __be16 proto;
>+ int err = 0;
>
> switch ((fs->flow_type & ~FLOW_EXT)) {
> case TCP_V4_FLOW:
>- return qede_flow_spec_to_tuple_tcpv4(edev, t, fs);
> case UDP_V4_FLOW:
>- return qede_flow_spec_to_tuple_udpv4(edev, t, fs);
>+ proto = htons(ETH_P_IP);
>+ break;
> case TCP_V6_FLOW:
>- return qede_flow_spec_to_tuple_tcpv6(edev, t, fs);
> case UDP_V6_FLOW:
>- return qede_flow_spec_to_tuple_udpv6(edev, t, fs);
>+ proto = htons(ETH_P_IPV6);
>+ break;
> default:
> DP_VERBOSE(edev, NETIF_MSG_IFUP,
> "Can't support flow of type %08x\n", fs->flow_type);
> return -EOPNOTSUPP;
> }
>
>- return 0;
>+ flow_rule = ethtool_rx_flow_rule(fs);
>+ if (!flow_rule)
>+ return -ENOMEM;
>+
>+ f.rule = *flow_rule;
This does not look right. I undersntand that you want to use the same
driver code to parse same struct coming either from tc-flower or
ethtool. That is why you introduced flow_rule as a intermediate layer.
However, here, you use struct that is very tc-flower specific. Common
parser should work on struct flow_rule.
qede_parse_flower_attr() should accept struct flow_rule * and should be
renamed to something like qede_parse_flow_rule()
>+
>+ if (qede_parse_flower_attr(edev, proto, &f, t)) {
>+ err = -EINVAL;
>+ goto err_out;
>+ }
>+
>+ /* Make sure location is valid and filter isn't already set */
>+ err = qede_flow_spec_validate(edev, &f.rule.action, t, fs->location);
>+err_out:
>+ ethtool_rx_flow_rule_free(flow_rule);
>+ return err;
>+
> }
>
> int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
>@@ -2227,12 +2092,7 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
> }
>
> /* Translate the flow specification into something fittign our DB */
>- rc = qede_flow_spec_to_tuple(edev, &t, fsp);
>- if (rc)
>- goto unlock;
>-
>- /* Make sure location is valid and filter isn't already set */
>- rc = qede_flow_spec_validate(edev, fsp, &t);
>+ rc = qede_flow_spec_to_rule(edev, &t, fsp);
> if (rc)
> goto unlock;
>
>--
>2.11.0
>
Powered by blists - more mailing lists