[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180525022539.6799-4-jakub.kicinski@netronome.com>
Date: Thu, 24 May 2018 19:25:38 -0700
From: Jakub Kicinski <jakub.kicinski@...ronome.com>
To: netdev@...r.kernel.org
Cc: jiri@...nulli.us, gerlitz.or@...il.com,
sridhar.samudrala@...el.com, oss-drivers@...ronome.com,
john.hurley@...ronome.com
Subject: [RFC net-next 3/4] net: sched: cls_flower: implement offload tcf_proto_op
From: John Hurley <john.hurley@...ronome.com>
Add the offload tcf_proto_op in flower to generate an offload message for
each filter in the given tcf_proto. Call the specified callback with this
new offload message. The function only returns an error if the callback
rejects adding a 'hardware only' rule.
A filter contains a flag to indicate if it is in hardware or not. To
ensure the offload function properly maintains this flag, keep a reference
counter for the number of instances of the filter that are in hardware.
Only update the flag when this counter changes from or to 0.
Signed-off-by: John Hurley <john.hurley@...ronome.com>
---
net/sched/cls_flower.c | 51 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index eacaaf803914..c23d338df6a1 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -90,6 +90,7 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
+ u32 in_hw_count;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -289,6 +290,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
fl_hw_destroy_filter(tp, f, NULL);
return err;
} else if (err > 0) {
+ f->in_hw_count = err;
tcf_block_offload_inc(block, &f->flags);
}
@@ -1092,6 +1094,54 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
}
+static int fl_offload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
+ void *cb_priv, struct netlink_ext_ack *extack)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct tc_cls_flower_offload cls_flower = {};
+ struct tcf_block *block = tp->chain->block;
+ struct fl_flow_mask *mask;
+ struct cls_fl_filter *f;
+ int err;
+
+ list_for_each_entry(mask, &head->masks, list) {
+ list_for_each_entry(f, &mask->filters, list) {
+ if (tc_skip_hw(f->flags))
+ continue;
+
+ tc_cls_common_offload_init(&cls_flower.common, tp,
+ f->flags, extack);
+ cls_flower.command = add ?
+ TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
+ cls_flower.cookie = (unsigned long)f;
+ cls_flower.dissector = &mask->dissector;
+ cls_flower.mask = &f->mkey;
+ cls_flower.key = &f->key;
+ cls_flower.exts = &f->exts;
+ cls_flower.classid = f->res.classid;
+
+ err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+ if (err) {
+ if (add && tc_skip_sw(f->flags))
+ return err;
+ continue;
+ }
+
+ if (add) {
+ if (!f->in_hw_count)
+ tcf_block_offload_inc(block, &f->flags);
+ f->in_hw_count++;
+ } else {
+ f->in_hw_count--;
+ if (!f->in_hw_count)
+ tcf_block_offload_dec(block, &f->flags);
+ }
+ }
+ }
+
+ return 0;
+}
+
static int fl_dump_key_val(struct sk_buff *skb,
void *val, int val_type,
void *mask, int mask_type, int len)
@@ -1443,6 +1493,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.change = fl_change,
.delete = fl_delete,
.walk = fl_walk,
+ .offload = fl_offload,
.dump = fl_dump,
.bind_class = fl_bind_class,
.owner = THIS_MODULE,
--
2.17.0
Powered by blists - more mailing lists