[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190708160614.2226-8-pablo@netfilter.org>
Date: Mon, 8 Jul 2019 18:06:09 +0200
From: Pablo Neira Ayuso <pablo@...filter.org>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net, thomas.lendacky@....com, f.fainelli@...il.com,
ariel.elior@...ium.com, michael.chan@...adcom.com,
madalin.bucur@....com, yisen.zhuang@...wei.com,
salil.mehta@...wei.com, jeffrey.t.kirsher@...el.com,
tariqt@...lanox.com, saeedm@...lanox.com, jiri@...lanox.com,
idosch@...lanox.com, jakub.kicinski@...ronome.com,
peppe.cavallaro@...com, grygorii.strashko@...com, andrew@...n.ch,
vivien.didelot@...il.com, alexandre.torgue@...com,
joabreu@...opsys.com, linux-net-drivers@...arflare.com,
ogerlitz@...lanox.com, Manish.Chopra@...ium.com,
marcelo.leitner@...il.com, mkubecek@...e.cz,
venkatkumar.duvvuru@...adcom.com, maxime.chevallier@...tlin.com,
cphealy@...il.com, netfilter-devel@...r.kernel.org
Subject: [PATCH net-next,v3 07/11] net: sched: use flow block API
This patch adds tcf_block_setup() which uses the flow block API.
This infrastructure takes the flow block callbacks coming from the
driver and register/unregister to/from the cls_api core.
Signed-off-by: Pablo Neira Ayuso <pablo@...filter.org>
---
v3: call tcf_block_cb_free() - Jakub Kicinski.
tcf_block_setup() already deals with unroll (see err_unroll) in case
of error, so it is undoing ndo_setup_tc().
net/sched/cls_api.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 89 insertions(+), 1 deletion(-)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 72761b43ae41..728714d8f601 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -672,6 +672,9 @@ static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
kfree(indr_block_cb);
}
+static int tcf_block_setup(struct tcf_block *block,
+ struct flow_block_offload *bo);
+
static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
struct tc_indr_block_cb *indr_block_cb,
enum flow_block_command command)
@@ -682,12 +685,14 @@ static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
.net = dev_net(indr_dev->dev),
.block = indr_dev->block,
};
+ INIT_LIST_HEAD(&bo.cb_list);
if (!indr_dev->block)
return;
indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
&bo);
+ tcf_block_setup(indr_dev->block, &bo);
}
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
@@ -772,6 +777,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
.block = block,
.extack = extack,
};
+ INIT_LIST_HEAD(&bo.cb_list);
indr_dev = tc_indr_block_dev_lookup(dev);
if (!indr_dev)
@@ -782,6 +788,8 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
&bo);
+
+ tcf_block_setup(block, &bo);
}
static bool tcf_block_offload_in_use(struct tcf_block *block)
@@ -796,13 +804,20 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
struct netlink_ext_ack *extack)
{
struct tc_block_offload bo = {};
+ int err;
bo.net = dev_net(dev);
bo.command = command;
bo.binder_type = ei->binder_type;
bo.block = block;
bo.extack = extack;
- return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+ INIT_LIST_HEAD(&bo.cb_list);
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
+ if (err < 0)
+ return err;
+
+ return tcf_block_setup(block, &bo);
}
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
@@ -1636,6 +1651,79 @@ void tcf_block_cb_unregister(struct tcf_block *block,
}
EXPORT_SYMBOL(tcf_block_cb_unregister);
+static int tcf_block_bind(struct tcf_block *block,
+ struct flow_block_offload *bo)
+{
+ struct flow_block_cb *block_cb, *next;
+ int err, i = 0;
+
+ list_for_each_entry(block_cb, &bo->cb_list, driver_list) {
+ err = tcf_block_playback_offloads(block, block_cb->cb,
+ block_cb->cb_priv, true,
+ tcf_block_offload_in_use(block),
+ bo->extack);
+ if (err)
+ goto err_unroll;
+
+ list_add(&block_cb->list, &block->cb_list);
+ i++;
+ }
+ list_splice(&bo->cb_list, bo->driver_block_list);
+
+ return 0;
+
+err_unroll:
+ list_for_each_entry_safe(block_cb, next, &bo->cb_list, driver_list) {
+ if (i-- > 0) {
+ list_del(&block_cb->list);
+ tcf_block_playback_offloads(block, block_cb->cb,
+ block_cb->cb_priv, false,
+ tcf_block_offload_in_use(block),
+ NULL);
+ }
+ flow_block_cb_free(block_cb);
+ }
+
+ return err;
+}
+
+static void tcf_block_unbind(struct tcf_block *block,
+ struct flow_block_offload *bo)
+{
+ struct flow_block_cb *block_cb, *next;
+
+ list_for_each_entry_safe(block_cb, next, &bo->cb_list, driver_list) {
+ list_del(&block_cb->driver_list);
+ tcf_block_playback_offloads(block, block_cb->cb,
+ block_cb->cb_priv, false,
+ tcf_block_offload_in_use(block),
+ NULL);
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ }
+}
+
+static int tcf_block_setup(struct tcf_block *block,
+ struct flow_block_offload *bo)
+{
+ int err;
+
+ switch (bo->command) {
+ case FLOW_BLOCK_BIND:
+ err = tcf_block_bind(block, bo);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ err = 0;
+ tcf_block_unbind(block, bo);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
/* Main classifier routine: scans classifier chain attached
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
--
2.11.0
Powered by blists - more mailing lists