[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1570058072-12004-3-git-send-email-john.hurley@netronome.com>
Date: Thu, 3 Oct 2019 00:14:32 +0100
From: John Hurley <john.hurley@...ronome.com>
To: vladbu@...lanox.com
Cc: jiri@...lanox.com, netdev@...r.kernel.org,
simon.horman@...ronome.com, jakub.kicinski@...ronome.com,
oss-drivers@...ronome.com, John Hurley <john.hurley@...ronome.com>
Subject: [RFC net-next 2/2] net: sched: fix tp destroy race conditions in flower
Flower has rule HW offload functions available that drivers can choose to
register for. For the deletion case, these are triggered after filters
have been removed from lookup tables both at the flower level, and the
higher cls_api level. With flower running without RTNL locking, this can
lead to races where HW offload messages get out of order.
Ensure HW offloads stay in line with the kernel tables by triggering
the sending of messages before the kernel processing is completed. For
destroyed tcf_protos, do this at the new pre_destroy hook. Similarly, if
a filter is being added, check that it is not concurrently being deleted
before offloading to hw, rather than the current approach of offloading,
then checking and reversing the offload if required.
Fixes: 1d965c4def07 ("Refactor flower classifier to remove dependency on rtnl lock")
Fixes: 272ffaadeb3e ("net: sched: flower: handle concurrent tcf proto deletion")
Signed-off-by: John Hurley <john.hurley@...ronome.com>
Reported-by: Louis Peens <louis.peens@...ronome.com>
---
net/sched/cls_flower.c | 55 +++++++++++++++++++++++++++-----------------------
1 file changed, 30 insertions(+), 25 deletions(-)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 74221e3..3ac47b5 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -513,13 +513,16 @@ static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
}
static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
- bool *last, bool rtnl_held,
+ bool *last, bool rtnl_held, bool do_hw,
struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = fl_head_dereference(tp);
*last = false;
+ if (do_hw && !tc_skip_hw(f->flags))
+ fl_hw_destroy_filter(tp, f, rtnl_held, extack);
+
spin_lock(&tp->lock);
if (f->deleted) {
spin_unlock(&tp->lock);
@@ -534,8 +537,6 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
spin_unlock(&tp->lock);
*last = fl_mask_put(head, f->mask);
- if (!tc_skip_hw(f->flags))
- fl_hw_destroy_filter(tp, f, rtnl_held, extack);
tcf_unbind_filter(tp, &f->res);
__fl_put(f);
@@ -563,7 +564,7 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
list_for_each_entry_safe(f, next, &mask->filters, list) {
- __fl_delete(tp, f, &last, rtnl_held, extack);
+ __fl_delete(tp, f, &last, rtnl_held, false, extack);
if (last)
break;
}
@@ -574,6 +575,19 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
tcf_queue_work(&head->rwork, fl_destroy_sleepable);
}
+static void fl_pre_destroy(struct tcf_proto *tp, bool rtnl_held,
+ struct netlink_ext_ack *extack)
+{
+ struct cls_fl_head *head = fl_head_dereference(tp);
+ struct fl_flow_mask *mask, *next_mask;
+ struct cls_fl_filter *f, *next;
+
+ list_for_each_entry_safe(mask, next_mask, &head->masks, list)
+ list_for_each_entry_safe(f, next, &mask->filters, list)
+ if (!tc_skip_hw(f->flags))
+ fl_hw_destroy_filter(tp, f, rtnl_held, extack);
+}
+
static void fl_put(struct tcf_proto *tp, void *arg)
{
struct cls_fl_filter *f = arg;
@@ -1588,6 +1602,13 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout_mask;
+ spin_lock(&tp->lock);
+ if (tp->deleting || (fold && fold->deleted)) {
+ err = -EAGAIN;
+ goto errout_lock;
+ }
+ spin_unlock(&tp->lock);
+
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
if (err)
@@ -1598,22 +1619,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
spin_lock(&tp->lock);
-
- /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
- * proto again or create new one, if necessary.
- */
- if (tp->deleting) {
- err = -EAGAIN;
- goto errout_hw;
- }
-
if (fold) {
- /* Fold filter was deleted concurrently. Retry lookup. */
- if (fold->deleted) {
- err = -EAGAIN;
- goto errout_hw;
- }
-
fnew->handle = handle;
if (!in_ht) {
@@ -1624,7 +1630,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
&fnew->ht_node,
params);
if (err)
- goto errout_hw;
+ goto errout_lock;
in_ht = true;
}
@@ -1667,7 +1673,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
INT_MAX, GFP_ATOMIC);
}
if (err)
- goto errout_hw;
+ goto errout_lock;
refcount_inc(&fnew->refcnt);
fnew->handle = handle;
@@ -1683,11 +1689,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
errout_ht:
spin_lock(&tp->lock);
-errout_hw:
+errout_lock:
fnew->deleted = true;
spin_unlock(&tp->lock);
- if (!tc_skip_hw(fnew->flags))
- fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
if (in_ht)
rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
@@ -1713,7 +1717,7 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
bool last_on_mask;
int err = 0;
- err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
+ err = __fl_delete(tp, f, &last_on_mask, rtnl_held, true, extack);
*last = list_empty(&head->masks);
__fl_put(f);
@@ -2509,6 +2513,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.kind = "flower",
.classify = fl_classify,
.init = fl_init,
+ .pre_destroy = fl_pre_destroy,
.destroy = fl_destroy,
.get = fl_get,
.put = fl_put,
--
2.7.4
Powered by blists - more mailing lists