[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1276076350.2442.90.camel@edumazet-laptop>
Date: Wed, 09 Jun 2010 11:39:10 +0200
From: Eric Dumazet <eric.dumazet@...il.com>
To: Changli Gao <xiaosuo@...il.com>, David Miller <davem@...emloft.net>
Cc: netdev <netdev@...r.kernel.org>,
Stephen Hemminger <shemminger@...tta.com>,
Jarek Poplawski <jarkao2@...il.com>,
Patrick McHardy <kaber@...sh.net>
Subject: [PATCH net-2.6 v2] pkt_sched: gen_estimator: add a new lock
Le lundi 07 juin 2010 à 19:18 +0200, Eric Dumazet a écrit :
> [PATCH net-2.6] pkt_sched: gen_estimator: add a new lock
>
> gen_kill_estimator() / gen_new_estimator() is not always called with
> RTNL held.
>
> net/netfilter/xt_RATEEST.c is one user of these API that do not hold
> RTNL, so random corruptions can occur between "tc" and "iptables"
>
> Add a new fine grained lock instead of trying to use RTNL in xt_RATEEST
Here is v2 of patch, adding protection in gen_estimator_active() as
well.
[PATCH net-2.6 v2] pkt_sched: gen_estimator: add a new lock
gen_kill_estimator() / gen_new_estimator() is not always called with
RTNL held.
net/netfilter/xt_RATEEST.c is one user of these API that do not hold
RTNL, so random corruptions can occur between "tc" and "iptables".
Add a new fine grained lock instead of trying to use RTNL in netfilter.
Signed-off-by: Eric Dumazet <eric.dumazet@...il.com>
---
net/core/gen_estimator.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cf8e703..785e527 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);
/* Protects against soft lockup during large deletion */
static struct rb_root est_root = RB_ROOT;
+static DEFINE_SPINLOCK(est_tree_lock);
static void est_timer(unsigned long arg)
{
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
*
* Returns 0 on success or a negative error code.
*
- * NOTE: Called under rtnl_mutex
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
+ spin_lock(&est_tree_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
+ spin_unlock(&est_tree_lock);
return 0;
}
@@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)
*
* Removes the rate estimator specified by &bstats and &rate_est.
*
- * NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est)
{
struct gen_estimator *e;
+ spin_lock(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
@@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
+ spin_unlock(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
@@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est)
{
+ bool res;
+
ASSERT_RTNL();
- return gen_find_node(bstats, rate_est) != NULL;
+ spin_lock(&est_tree_lock);
+ res = gen_find_node(bstats, rate_est) != NULL;
+ spin_unlock(&est_tree_lock);
+
+ return res;
}
EXPORT_SYMBOL(gen_estimator_active);
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Powered by blists - more mailing lists