[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180625043431.13413-8-jakub.kicinski@netronome.com>
Date: Sun, 24 Jun 2018 21:34:31 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: davem@...emloft.net, jiri@...nulli.us
Cc: xiyou.wangcong@...il.com, jhs@...atatu.com, gerlitz.or@...il.com,
netdev@...r.kernel.org, oss-drivers@...ronome.com,
John Hurley <john.hurley@...ronome.com>,
Jakub Kicinski <jakub.kicinski@...ronome.com>
Subject: [PATCH net-next 7/7] net: sched: call reoffload op on block callback reg
From: John Hurley <john.hurley@...ronome.com>
Call the reoffload tcf_proto_op on all tcf_proto nodes in all chains of a
block when a callback tries to register to a block that already has
offloaded rules. If all existing rules cannot be offloaded then the
registration is rejected. This replaces the previous policy of rejecting
such callback registration outright.
On unregistration of a callback, the rules are flushed for that given cb.
The implementation of block sharing in the NFP driver, for example,
duplicates shared rules to all devs bound to a block. This meant that
rules could still exist in hw even after a device is unbound from a block
(assuming the block still remains active).
Signed-off-by: John Hurley <john.hurley@...ronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
---
.../net/ethernet/mellanox/mlxsw/spectrum.c | 4 +-
include/net/pkt_cls.h | 6 ++-
net/sched/cls_api.c | 54 ++++++++++++++++---
3 files changed, 52 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d2bc335dda11..52437363766a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1542,7 +1542,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
err_block_bind:
if (!tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
err_cb_register:
mlxsw_sp_acl_block_destroy(acl_block);
}
@@ -1572,7 +1572,7 @@ mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
mlxsw_sp_port, ingress);
if (!err && !tcf_block_cb_decref(block_cb)) {
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
mlxsw_sp_acl_block_destroy(acl_block);
}
}
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a2c6d35ba057..4070b8eb6d14 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -78,7 +78,8 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
void *cb_priv, struct netlink_ext_ack *extack);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
@@ -177,7 +178,8 @@ int tcf_block_cb_register(struct tcf_block *block,
}
static inline
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb)
{
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8c9fb4b827a1..3e5132e41b3a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -751,19 +751,53 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
}
EXPORT_SYMBOL(tcf_block_cb_decref);
+static int
+tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
+ void *cb_priv, bool add, bool offload_in_use,
+ struct netlink_ext_ack *extack)
+{
+ struct tcf_chain *chain;
+ struct tcf_proto *tp;
+ int err;
+
+ list_for_each_entry(chain, &block->chain_list, list) {
+ for (tp = rtnl_dereference(chain->filter_chain); tp;
+ tp = rtnl_dereference(tp->next)) {
+ if (tp->ops->reoffload) {
+ err = tp->ops->reoffload(tp, add, cb, cb_priv,
+ extack);
+ if (err && add)
+ goto err_playback_remove;
+ } else if (add && offload_in_use) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(extack, "Filter replay failed - a filters doesn't support re-offloading");
+ goto err_playback_remove;
+ }
+ }
+ }
+
+ return 0;
+
+err_playback_remove:
+ tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
+ extack);
+ return err;
+}
+
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
void *cb_priv,
struct netlink_ext_ack *extack)
{
struct tcf_block_cb *block_cb;
+ int err;
- /* At this point, playback of previous block cb calls is not supported,
- * so forbid to register to block which already has some offloaded
- * filters present.
- */
- if (tcf_block_offload_in_use(block))
- return ERR_PTR(-EOPNOTSUPP);
+ /* Replay any already present rules */
+ err = tcf_block_playback_offloads(block, cb, cb_priv, true,
+ tcf_block_offload_in_use(block),
+ extack);
+ if (err)
+ return ERR_PTR(err);
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
if (!block_cb)
@@ -788,8 +822,12 @@ int tcf_block_cb_register(struct tcf_block *block,
}
EXPORT_SYMBOL(tcf_block_cb_register);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb)
{
+ tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
+ false, tcf_block_offload_in_use(block),
+ NULL);
list_del(&block_cb->list);
kfree(block_cb);
}
@@ -803,7 +841,7 @@ void tcf_block_cb_unregister(struct tcf_block *block,
block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
if (!block_cb)
return;
- __tcf_block_cb_unregister(block_cb);
+ __tcf_block_cb_unregister(block, block_cb);
}
EXPORT_SYMBOL(tcf_block_cb_unregister);
--
2.17.1
Powered by blists - more mailing lists