[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230217223620.28508-4-paulb@nvidia.com>
Date: Sat, 18 Feb 2023 00:36:15 +0200
From: Paul Blakey <paulb@...dia.com>
To: Paul Blakey <paulb@...dia.com>, <netdev@...r.kernel.org>,
Saeed Mahameed <saeedm@...dia.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
Eric Dumazet <edumazet@...gle.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
"David S. Miller" <davem@...emloft.net>,
Marcelo Ricardo Leitner <marcelo.leitner@...il.com>
CC: Oz Shlomo <ozsh@...dia.com>, Jiri Pirko <jiri@...dia.com>,
Roi Dayan <roid@...dia.com>, Vlad Buslov <vladbu@...dia.com>,
Ido Schimmel <idosch@...dia.com>,
Simon Horman <simon.horman@...igine.com>
Subject: [PATCH net-next v13 3/8] net/sched: flower: Move filter handle initialization earlier
To support miss to action during hardware offload the filter's
handle is needed when setting up the actions (tcf_exts_init()),
and before offloading.
Move filter handle initialization earlier.
Signed-off-by: Paul Blakey <paulb@...dia.com>
Reviewed-by: Jiri Pirko <jiri@...dia.com>
Reviewed-by: Simon Horman <simon.horman@...igine.com>
Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@...il.com>
---
net/sched/cls_flower.c | 62 ++++++++++++++++++++++++------------------
1 file changed, 35 insertions(+), 27 deletions(-)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 885c95191ccf..be01d39dd7b9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2187,10 +2187,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
INIT_LIST_HEAD(&fnew->hw_list);
refcount_set(&fnew->refcnt, 1);
- err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
- if (err < 0)
- goto errout;
-
if (tb[TCA_FLOWER_FLAGS]) {
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
@@ -2200,15 +2196,45 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
+ if (!fold) {
+ spin_lock(&tp->lock);
+ if (!handle) {
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_ATOMIC);
+ } else {
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_ATOMIC);
+
+ /* Filter with specified handle was concurrently
+ * inserted after initial check in cls_api. This is not
+ * necessarily an error if NLM_F_EXCL is not set in
+ * message flags. Returning EAGAIN will cause cls_api to
+ * try to update concurrently inserted rule.
+ */
+ if (err == -ENOSPC)
+ err = -EAGAIN;
+ }
+ spin_unlock(&tp->lock);
+
+ if (err)
+ goto errout;
+ }
+ fnew->handle = handle;
+
+ err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
+ if (err < 0)
+ goto errout_idr;
+
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
tp->chain->tmplt_priv, flags, fnew->flags,
extack);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_ht_insert_unique(fnew, fold, &in_ht);
if (err)
@@ -2274,29 +2300,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
refcount_dec(&fold->refcnt);
__fl_put(fold);
} else {
- if (handle) {
- /* user specifies a handle and it doesn't exist */
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- handle, GFP_ATOMIC);
-
- /* Filter with specified handle was concurrently
- * inserted after initial check in cls_api. This is not
- * necessarily an error if NLM_F_EXCL is not set in
- * message flags. Returning EAGAIN will cause cls_api to
- * try to update concurrently inserted rule.
- */
- if (err == -ENOSPC)
- err = -EAGAIN;
- } else {
- handle = 1;
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- INT_MAX, GFP_ATOMIC);
- }
- if (err)
- goto errout_hw;
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
refcount_inc(&fnew->refcnt);
- fnew->handle = handle;
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
spin_unlock(&tp->lock);
}
@@ -2319,6 +2325,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
fnew->mask->filter_ht_params);
errout_mask:
fl_mask_put(head, fnew->mask);
+errout_idr:
+ idr_remove(&head->handle_idr, fnew->handle);
errout:
__fl_put(fnew);
errout_tb:
--
2.30.1
Powered by blists - more mailing lists