[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230119082357.21744-3-paulb@nvidia.com>
Date: Thu, 19 Jan 2023 10:23:53 +0200
From: Paul Blakey <paulb@...dia.com>
To: Paul Blakey <paulb@...dia.com>, <netdev@...r.kernel.org>,
Saeed Mahameed <saeedm@...dia.com>,
Paolo Abeni <pabeni@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
Eric Dumazet <edumazet@...gle.com>,
Jamal Hadi Salim <jhs@...atatu.com>,
Cong Wang <xiyou.wangcong@...il.com>,
"David S. Miller" <davem@...emloft.net>
CC: Oz Shlomo <ozsh@...dia.com>, Jiri Pirko <jiri@...dia.com>,
Roi Dayan <roid@...dia.com>, Vlad Buslov <vladbu@...dia.com>
Subject: [PATCH net-next v3 2/6] net/sched: flower: Move filter handle initialization earlier
To support miss to action during hardware offload the filter's
handle is needed when setting up the actions (tcf_exts_init()),
and before offloading.
Move filter handle initialization earlier.
Signed-off-by: Paul Blakey <paulb@...dia.com>
Reviewed-by: Jiri Pirko <jiri@...dia.com>
---
net/sched/cls_flower.c | 62 ++++++++++++++++++++++++------------------
1 file changed, 35 insertions(+), 27 deletions(-)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 0b15698b3531d..564b862870c71 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2192,10 +2192,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
INIT_LIST_HEAD(&fnew->hw_list);
refcount_set(&fnew->refcnt, 1);
- err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
- if (err < 0)
- goto errout;
-
if (tb[TCA_FLOWER_FLAGS]) {
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
@@ -2205,15 +2201,45 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
}
+ if (!fold) {
+ spin_lock(&tp->lock);
+ if (!handle) {
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_ATOMIC);
+ } else {
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_ATOMIC);
+
+ /* Filter with specified handle was concurrently
+ * inserted after initial check in cls_api. This is not
+ * necessarily an error if NLM_F_EXCL is not set in
+ * message flags. Returning EAGAIN will cause cls_api to
+ * try to update concurrently inserted rule.
+ */
+ if (err == -ENOSPC)
+ err = -EAGAIN;
+ }
+ spin_unlock(&tp->lock);
+
+ if (err)
+ goto errout;
+ }
+ fnew->handle = handle;
+
+ err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
+ if (err < 0)
+ goto errout_idr;
+
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
tp->chain->tmplt_priv, flags, fnew->flags,
extack);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_check_assign_mask(head, fnew, fold, mask);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_ht_insert_unique(fnew, fold, &in_ht);
if (err)
@@ -2279,29 +2305,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
refcount_dec(&fold->refcnt);
__fl_put(fold);
} else {
- if (handle) {
- /* user specifies a handle and it doesn't exist */
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- handle, GFP_ATOMIC);
-
- /* Filter with specified handle was concurrently
- * inserted after initial check in cls_api. This is not
- * necessarily an error if NLM_F_EXCL is not set in
- * message flags. Returning EAGAIN will cause cls_api to
- * try to update concurrently inserted rule.
- */
- if (err == -ENOSPC)
- err = -EAGAIN;
- } else {
- handle = 1;
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
- INT_MAX, GFP_ATOMIC);
- }
- if (err)
- goto errout_hw;
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
refcount_inc(&fnew->refcnt);
- fnew->handle = handle;
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
spin_unlock(&tp->lock);
}
@@ -2324,6 +2330,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
fnew->mask->filter_ht_params);
errout_mask:
fl_mask_put(head, fnew->mask);
+errout_idr:
+ idr_remove(&head->handle_idr, fnew->handle);
errout:
__fl_put(fnew);
errout_tb:
--
2.30.1
Powered by blists - more mailing lists