[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1568882232-12847-1-git-send-email-wenxu@ucloud.cn>
Date: Thu, 19 Sep 2019 16:37:12 +0800
From: wenxu@...oud.cn
To: davem@...emloft.net
Cc: netdev@...r.kernel.org
Subject: [PATCH net v3] net/sched: cls_api: Fix nooffloaddevcnt counter when indr block call success
From: wenxu <wenxu@...oud.cn>
A vxlan or gretap device offload through indr block methord. If the device
successfully bind with a real hw through indr block call, It also add
nooffloadcnt counter. This counter will lead the rule add failed in
fl_hw_replace_filter-->tc_setup_cb_call with skip_sw flags.
In the tc_setup_cb_call will check the nooffloaddevcnt and skip_sw flags
as following:
if (block->nooffloaddevcnt && err_stop)
return -EOPNOTSUPP;
So with this patch, if the indr block call success, it will not modify
the nooffloaddevcnt counter.
Fixes: 7f76fa36754b ("net: sched: register callbacks for indirect tc block binds")
Signed-off-by: wenxu <wenxu@...oud.cn>
---
v3: rebase to the net
net/sched/cls_api.c | 30 +++++++++++++++++-------------
1 file changed, 17 insertions(+), 13 deletions(-)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 32577c2..c980127 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -607,11 +607,11 @@ static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
}
-static void tc_indr_block_call(struct tcf_block *block,
- struct net_device *dev,
- struct tcf_block_ext_info *ei,
- enum flow_block_command command,
- struct netlink_ext_ack *extack)
+static int tc_indr_block_call(struct tcf_block *block,
+ struct net_device *dev,
+ struct tcf_block_ext_info *ei,
+ enum flow_block_command command,
+ struct netlink_ext_ack *extack)
{
struct flow_block_offload bo = {
.command = command,
@@ -621,10 +621,15 @@ static void tc_indr_block_call(struct tcf_block *block,
.block_shared = tcf_block_shared(block),
.extack = extack,
};
+
INIT_LIST_HEAD(&bo.cb_list);
flow_indr_block_call(dev, &bo, command);
- tcf_block_setup(block, &bo);
+
+ if (list_empty(&bo.cb_list))
+ return -EOPNOTSUPP;
+
+ return tcf_block_setup(block, &bo);
}
static bool tcf_block_offload_in_use(struct tcf_block *block)
@@ -681,8 +686,6 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
goto no_offload_dev_inc;
if (err)
goto err_unlock;
-
- tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
up_write(&block->cb_lock);
return 0;
@@ -691,9 +694,10 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
err = -EOPNOTSUPP;
goto err_unlock;
}
+ err = tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
+ if (err)
+ block->nooffloaddevcnt++;
err = 0;
- block->nooffloaddevcnt++;
- tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
err_unlock:
up_write(&block->cb_lock);
return err;
@@ -706,8 +710,6 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
int err;
down_write(&block->cb_lock);
- tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
-
if (!dev->netdev_ops->ndo_setup_tc)
goto no_offload_dev_dec;
err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
@@ -717,7 +719,9 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
return;
no_offload_dev_dec:
- WARN_ON(block->nooffloaddevcnt-- == 0);
+ err = tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
+ if (err)
+ WARN_ON(block->nooffloaddevcnt-- == 0);
up_write(&block->cb_lock);
}
--
1.8.3.1
Powered by blists - more mailing lists