[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180525022539.6799-5-jakub.kicinski@netronome.com>
Date: Thu, 24 May 2018 19:25:39 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: netdev@...r.kernel.org
Cc: jiri@...nulli.us, gerlitz.or@...il.com,
sridhar.samudrala@...el.com, oss-drivers@...ronome.com,
john.hurley@...ronome.com
Subject: [RFC net-next 4/4] net: sched: cls_matchall: implement offload tcf_proto_op
From: John Hurley <john.hurley@...ronome.com>
Add the offload tcf_proto_op in matchall to generate an offload message
for each filter in the given tcf_proto. Call the specified callback with
this new offload message. The function only returns an error if the
callback rejects adding a 'hardware only' rule.
Ensure matchall flags correctly report if the rule is in hw by keeping a
reference counter for the number of instances of the rule offloaded. Only
update the flag when this counter changes from or to 0.
Signed-off-by: John Hurley <john.hurley@...ronome.com>
---
net/sched/cls_matchall.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 2ba721a590a7..a3497bc4ee24 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -21,6 +21,7 @@ struct cls_mall_head {
struct tcf_result res;
u32 handle;
u32 flags;
+ u32 in_hw_count;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -106,6 +107,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
mall_destroy_hw_filter(tp, head, cookie, NULL);
return err;
} else if (err > 0) {
+ head->in_hw_count = err;
tcf_block_offload_inc(block, &head->flags);
}
@@ -246,6 +248,43 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
arg->count++;
}
+static int mall_offload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct cls_mall_head *head = rtnl_dereference(tp->root);
+ struct tc_cls_matchall_offload cls_mall = {};
+ struct tcf_block *block = tp->chain->block;
+ int err;
+
+ if (tc_skip_hw(head->flags))
+ return 0;
+
+ tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
+ cls_mall.command = add ?
+ TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
+ cls_mall.exts = &head->exts;
+ cls_mall.cookie = (unsigned long)head;
+
+ err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(head->flags))
+ return err;
+ return 0;
+ }
+
+ if (add) {
+ if (!head->in_hw_count)
+ tcf_block_offload_inc(block, &head->flags);
+ head->in_hw_count++;
+ } else {
+ head->in_hw_count--;
+ if (!head->in_hw_count)
+ tcf_block_offload_dec(block, &head->flags);
+ }
+
+ return 0;
+}
+
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t)
{
@@ -300,6 +339,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
.change = mall_change,
.delete = mall_delete,
.walk = mall_walk,
+ .offload = mall_offload,
.dump = mall_dump,
.bind_class = mall_bind_class,
.owner = THIS_MODULE,
--
2.17.0
Powered by blists - more mailing lists