[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171129201922.24370-10-willy@infradead.org>
Date: Wed, 29 Nov 2017 12:19:14 -0800
From: Matthew Wilcox <willy@...radead.org>
To: willy@...radead.org, netdev@...r.kernel.org
Cc: Matthew Wilcox <mawilcox@...rosoft.com>
Subject: [PATCH v2 09/17] net sched actions: Convert to use idr_alloc_u32
From: Matthew Wilcox <mawilcox@...rosoft.com>
Use the new helper. Also untangle the error path, and in so doing
noticed that estimator generator failure would lead to us leaking an
ID. Fix that bug.
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
net/sched/act_api.c | 60 ++++++++++++++++++++++-------------------------------
1 file changed, 25 insertions(+), 35 deletions(-)
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index efb90b8a3bf0..156302c110af 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -274,7 +274,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct idr *idr = &idrinfo->action_idr;
int err = -ENOMEM;
- unsigned long idr_index;
if (unlikely(!p))
return -ENOMEM;
@@ -284,45 +283,28 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
if (cpustats) {
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
- if (!p->cpu_bstats) {
-err1:
- kfree(p);
- return err;
- }
- p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
- if (!p->cpu_qstats) {
-err2:
- free_percpu(p->cpu_bstats);
+ if (!p->cpu_bstats)
goto err1;
- }
+ p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!p->cpu_qstats)
+ goto err2;
}
spin_lock_init(&p->tcfa_lock);
+ idr_preload(GFP_KERNEL);
+ spin_lock_bh(&idrinfo->lock);
/* user doesn't specify an index */
if (!index) {
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&idrinfo->lock);
- err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0,
- GFP_ATOMIC);
- spin_unlock_bh(&idrinfo->lock);
- idr_preload_end();
- if (err) {
-err3:
- free_percpu(p->cpu_qstats);
- goto err2;
- }
- p->tcfa_index = idr_index;
+ index = 1;
+ err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
} else {
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&idrinfo->lock);
- err = idr_alloc_ext(idr, NULL, NULL, index, index + 1,
- GFP_ATOMIC);
- spin_unlock_bh(&idrinfo->lock);
- idr_preload_end();
- if (err)
- goto err3;
- p->tcfa_index = index;
+ err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
}
+ spin_unlock_bh(&idrinfo->lock);
+ idr_preload_end();
+ if (err)
+ goto err3;
+ p->tcfa_index = index;
p->tcfa_tm.install = jiffies;
p->tcfa_tm.lastuse = jiffies;
p->tcfa_tm.firstuse = 0;
@@ -330,9 +312,8 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
&p->tcfa_rate_est,
&p->tcfa_lock, NULL, est);
- if (err) {
- goto err3;
- }
+ if (err)
+ goto err4;
}
p->idrinfo = idrinfo;
@@ -340,6 +321,15 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
INIT_LIST_HEAD(&p->list);
*a = p;
return 0;
+err4:
+ idr_remove(idr, index);
+err3:
+ free_percpu(p->cpu_qstats);
+err2:
+ free_percpu(p->cpu_bstats);
+err1:
+ kfree(p);
+ return err;
}
EXPORT_SYMBOL(tcf_idr_create);
--
2.15.0
Powered by blists - more mailing lists